1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void tcomplain (void) ATTRIBUTE_NORETURN
;
60 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
62 static int return_zero (void);
64 static int return_one (void);
66 static int return_minus_one (void);
68 static void *return_null (void);
70 void target_ignore (void);
72 static void target_command (char *, int);
74 static struct target_ops
*find_default_run_target (char *);
76 static target_xfer_partial_ftype default_xfer_partial
;
78 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
81 static int find_default_can_async_p (struct target_ops
*ignore
);
83 static int find_default_is_async_p (struct target_ops
*ignore
);
85 #include "target-delegates.c"
87 static void init_dummy_target (void);
89 static struct target_ops debug_target
;
91 static void debug_to_open (char *, int);
93 static void debug_to_prepare_to_store (struct target_ops
*self
,
96 static void debug_to_files_info (struct target_ops
*);
98 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
99 struct bp_target_info
*);
101 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
102 struct bp_target_info
*);
104 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
107 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
109 struct bp_target_info
*);
111 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
113 struct bp_target_info
*);
115 static int debug_to_insert_watchpoint (struct target_ops
*self
,
117 struct expression
*);
119 static int debug_to_remove_watchpoint (struct target_ops
*self
,
121 struct expression
*);
123 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
125 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
126 CORE_ADDR
, CORE_ADDR
, int);
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
133 struct expression
*);
135 static void debug_to_terminal_init (struct target_ops
*self
);
137 static void debug_to_terminal_inferior (struct target_ops
*self
);
139 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
141 static void debug_to_terminal_save_ours (struct target_ops
*self
);
143 static void debug_to_terminal_ours (struct target_ops
*self
);
145 static void debug_to_load (struct target_ops
*self
, char *, int);
147 static int debug_to_can_run (struct target_ops
*self
);
149 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
154 struct target_ops
**target_structs
;
155 unsigned target_struct_size
;
156 unsigned target_struct_allocsize
;
157 #define DEFAULT_ALLOCSIZE 10
159 /* The initial current target, so that there is always a semi-valid
162 static struct target_ops dummy_target
;
164 /* Top of target stack. */
166 static struct target_ops
*target_stack
;
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
171 struct target_ops current_target
;
173 /* Command list for target. */
175 static struct cmd_list_element
*targetlist
= NULL
;
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
180 static int trust_readonly
= 0;
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
185 static int show_memory_breakpoints
= 0;
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
191 int may_write_registers
= 1;
193 int may_write_memory
= 1;
195 int may_insert_breakpoints
= 1;
197 int may_insert_tracepoints
= 1;
199 int may_insert_fast_tracepoints
= 1;
203 /* Non-zero if we want to see trace of target level stuff. */
205 static unsigned int targetdebug
= 0;
207 show_targetdebug (struct ui_file
*file
, int from_tty
,
208 struct cmd_list_element
*c
, const char *value
)
210 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
213 static void setup_target_debug (void);
215 /* The user just typed 'target' without the name of a target. */
218 target_command (char *arg
, int from_tty
)
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
224 /* Default target_has_* methods for process_stratum targets. */
227 default_child_has_all_memory (struct target_ops
*ops
)
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid
, null_ptid
))
237 default_child_has_memory (struct target_ops
*ops
)
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid
, null_ptid
))
247 default_child_has_stack (struct target_ops
*ops
)
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid
, null_ptid
))
257 default_child_has_registers (struct target_ops
*ops
)
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid
, null_ptid
))
267 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
269 /* If there's no thread selected, then we can't make it run through
271 if (ptid_equal (the_ptid
, null_ptid
))
279 target_has_all_memory_1 (void)
281 struct target_ops
*t
;
283 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
284 if (t
->to_has_all_memory (t
))
291 target_has_memory_1 (void)
293 struct target_ops
*t
;
295 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
296 if (t
->to_has_memory (t
))
303 target_has_stack_1 (void)
305 struct target_ops
*t
;
307 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
308 if (t
->to_has_stack (t
))
315 target_has_registers_1 (void)
317 struct target_ops
*t
;
319 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
320 if (t
->to_has_registers (t
))
327 target_has_execution_1 (ptid_t the_ptid
)
329 struct target_ops
*t
;
331 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
332 if (t
->to_has_execution (t
, the_ptid
))
339 target_has_execution_current (void)
341 return target_has_execution_1 (inferior_ptid
);
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
348 complete_target_initialization (struct target_ops
*t
)
350 /* Provide default values for all "must have" methods. */
351 if (t
->to_xfer_partial
== NULL
)
352 t
->to_xfer_partial
= default_xfer_partial
;
354 if (t
->to_has_all_memory
== NULL
)
355 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
357 if (t
->to_has_memory
== NULL
)
358 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
360 if (t
->to_has_stack
== NULL
)
361 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
363 if (t
->to_has_registers
== NULL
)
364 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
366 if (t
->to_has_execution
== NULL
)
367 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
369 install_delegators (t
);
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
377 add_target_with_completer (struct target_ops
*t
,
378 completer_ftype
*completer
)
380 struct cmd_list_element
*c
;
382 complete_target_initialization (t
);
386 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
387 target_structs
= (struct target_ops
**) xmalloc
388 (target_struct_allocsize
* sizeof (*target_structs
));
390 if (target_struct_size
>= target_struct_allocsize
)
392 target_struct_allocsize
*= 2;
393 target_structs
= (struct target_ops
**)
394 xrealloc ((char *) target_structs
,
395 target_struct_allocsize
* sizeof (*target_structs
));
397 target_structs
[target_struct_size
++] = t
;
399 if (targetlist
== NULL
)
400 add_prefix_cmd ("target", class_run
, target_command
, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist
, "target ", 0, &cmdlist
);
407 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
409 if (completer
!= NULL
)
410 set_cmd_completer (c
, completer
);
413 /* Add a possible target architecture to the list. */
416 add_target (struct target_ops
*t
)
418 add_target_with_completer (t
, NULL
);
424 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
426 struct cmd_list_element
*c
;
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
431 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
432 alt
= xstrprintf ("target %s", t
->to_shortname
);
433 deprecate_cmd (c
, alt
);
446 struct target_ops
*t
;
448 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
449 if (t
->to_kill
!= NULL
)
452 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
462 target_load (char *arg
, int from_tty
)
464 target_dcache_invalidate ();
465 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
469 target_create_inferior (char *exec_file
, char *args
,
470 char **env
, int from_tty
)
472 struct target_ops
*t
;
474 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
476 if (t
->to_create_inferior
!= NULL
)
478 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
480 fprintf_unfiltered (gdb_stdlog
,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file
, args
, from_tty
);
487 internal_error (__FILE__
, __LINE__
,
488 _("could not find a target to create inferior"));
492 target_terminal_inferior (void)
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution
)
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target
.to_terminal_inferior
) (¤t_target
);
507 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
508 struct target_ops
*t
)
510 errno
= EIO
; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
517 error (_("You can't do that when your target is `%s'"),
518 current_target
.to_shortname
);
524 error (_("You can't do that without a process to debug."));
528 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
530 printf_unfiltered (_("No saved terminal information.\n"));
533 /* A default implementation for the to_get_ada_task_ptid target method.
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
540 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
542 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops
*self
)
548 if (!target_can_execute_reverse
)
550 else if (!target_can_async_p ())
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
570 update_current_target (void)
572 struct target_ops
*t
;
574 /* First, reset current's contents. */
575 memset (¤t_target
, 0, sizeof (current_target
));
577 /* Install the delegators. */
578 install_delegators (¤t_target
);
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
584 for (t
= target_stack
; t
; t
= t
->beneath
)
586 INHERIT (to_shortname
, t
);
587 INHERIT (to_longname
, t
);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 INHERIT (to_post_attach
, t
);
593 INHERIT (to_attach_no_wait
, t
);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 INHERIT (to_prepare_to_store
, t
);
601 INHERIT (deprecated_xfer_memory
, t
);
602 INHERIT (to_files_info
, t
);
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 INHERIT (to_can_use_hw_breakpoint
, t
);
606 INHERIT (to_insert_hw_breakpoint
, t
);
607 INHERIT (to_remove_hw_breakpoint
, t
);
608 /* Do not inherit to_ranged_break_num_registers. */
609 INHERIT (to_insert_watchpoint
, t
);
610 INHERIT (to_remove_watchpoint
, t
);
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint
, t
);
615 INHERIT (to_have_continuable_watchpoint
, t
);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 INHERIT (to_watchpoint_addr_within_range
, t
);
618 INHERIT (to_region_ok_for_hw_watchpoint
, t
);
619 INHERIT (to_can_accel_watchpoint_condition
, t
);
620 /* Do not inherit to_masked_watch_num_registers. */
621 INHERIT (to_terminal_init
, t
);
622 INHERIT (to_terminal_inferior
, t
);
623 INHERIT (to_terminal_ours_for_output
, t
);
624 INHERIT (to_terminal_ours
, t
);
625 INHERIT (to_terminal_save_ours
, t
);
626 INHERIT (to_terminal_info
, t
);
627 /* Do not inherit to_kill. */
628 INHERIT (to_load
, t
);
629 /* Do no inherit to_create_inferior. */
630 INHERIT (to_post_startup_inferior
, t
);
631 INHERIT (to_insert_fork_catchpoint
, t
);
632 INHERIT (to_remove_fork_catchpoint
, t
);
633 INHERIT (to_insert_vfork_catchpoint
, t
);
634 INHERIT (to_remove_vfork_catchpoint
, t
);
635 /* Do not inherit to_follow_fork. */
636 INHERIT (to_insert_exec_catchpoint
, t
);
637 INHERIT (to_remove_exec_catchpoint
, t
);
638 INHERIT (to_set_syscall_catchpoint
, t
);
639 INHERIT (to_has_exited
, t
);
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run
, t
);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 INHERIT (to_extra_thread_info
, t
);
648 INHERIT (to_thread_name
, t
);
649 INHERIT (to_stop
, t
);
650 /* Do not inherit to_xfer_partial. */
651 INHERIT (to_rcmd
, t
);
652 INHERIT (to_pid_to_exec_file
, t
);
653 INHERIT (to_log_command
, t
);
654 INHERIT (to_stratum
, t
);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control
, t
);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions
, t
);
665 INHERIT (to_make_corefile_notes
, t
);
666 INHERIT (to_get_bookmark
, t
);
667 INHERIT (to_goto_bookmark
, t
);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse
, t
);
670 INHERIT (to_execution_direction
, t
);
671 INHERIT (to_thread_architecture
, t
);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid
, t
);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process
, t
);
676 INHERIT (to_supports_enable_disable_tracepoint
, t
);
677 INHERIT (to_supports_string_tracing
, t
);
678 INHERIT (to_trace_init
, t
);
679 INHERIT (to_download_tracepoint
, t
);
680 INHERIT (to_can_download_tracepoint
, t
);
681 INHERIT (to_download_trace_state_variable
, t
);
682 INHERIT (to_enable_tracepoint
, t
);
683 INHERIT (to_disable_tracepoint
, t
);
684 INHERIT (to_trace_set_readonly_regions
, t
);
685 INHERIT (to_trace_start
, t
);
686 INHERIT (to_get_trace_status
, t
);
687 INHERIT (to_get_tracepoint_status
, t
);
688 INHERIT (to_trace_stop
, t
);
689 INHERIT (to_trace_find
, t
);
690 INHERIT (to_get_trace_state_variable_value
, t
);
691 INHERIT (to_save_trace_data
, t
);
692 INHERIT (to_upload_tracepoints
, t
);
693 INHERIT (to_upload_trace_state_variables
, t
);
694 INHERIT (to_get_raw_trace_data
, t
);
695 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
696 INHERIT (to_set_disconnected_tracing
, t
);
697 INHERIT (to_set_circular_trace_buffer
, t
);
698 INHERIT (to_set_trace_buffer_size
, t
);
699 INHERIT (to_set_trace_notes
, t
);
700 INHERIT (to_get_tib_address
, t
);
701 INHERIT (to_set_permissions
, t
);
702 INHERIT (to_static_tracepoint_marker_at
, t
);
703 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
704 INHERIT (to_traceframe_info
, t
);
705 INHERIT (to_use_agent
, t
);
706 INHERIT (to_can_use_agent
, t
);
707 INHERIT (to_augmented_libraries_svr4_read
, t
);
708 INHERIT (to_magic
, t
);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
710 INHERIT (to_can_run_breakpoint_commands
, t
);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
726 (void (*) (char *, int))
729 (void (*) (struct target_ops
*))
731 de_fault (to_post_attach
,
732 (void (*) (struct target_ops
*, int))
734 de_fault (to_prepare_to_store
,
735 (void (*) (struct target_ops
*, struct regcache
*))
737 de_fault (deprecated_xfer_memory
,
738 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
739 struct mem_attrib
*, struct target_ops
*))
741 de_fault (to_files_info
,
742 (void (*) (struct target_ops
*))
744 de_fault (to_can_use_hw_breakpoint
,
745 (int (*) (struct target_ops
*, int, int, int))
747 de_fault (to_insert_hw_breakpoint
,
748 (int (*) (struct target_ops
*, struct gdbarch
*,
749 struct bp_target_info
*))
751 de_fault (to_remove_hw_breakpoint
,
752 (int (*) (struct target_ops
*, struct gdbarch
*,
753 struct bp_target_info
*))
755 de_fault (to_insert_watchpoint
,
756 (int (*) (struct target_ops
*, CORE_ADDR
, int, int,
757 struct expression
*))
759 de_fault (to_remove_watchpoint
,
760 (int (*) (struct target_ops
*, CORE_ADDR
, int, int,
761 struct expression
*))
763 de_fault (to_watchpoint_addr_within_range
,
764 default_watchpoint_addr_within_range
);
765 de_fault (to_region_ok_for_hw_watchpoint
,
766 default_region_ok_for_hw_watchpoint
);
767 de_fault (to_can_accel_watchpoint_condition
,
768 (int (*) (struct target_ops
*, CORE_ADDR
, int, int,
769 struct expression
*))
771 de_fault (to_terminal_init
,
772 (void (*) (struct target_ops
*))
774 de_fault (to_terminal_inferior
,
775 (void (*) (struct target_ops
*))
777 de_fault (to_terminal_ours_for_output
,
778 (void (*) (struct target_ops
*))
780 de_fault (to_terminal_ours
,
781 (void (*) (struct target_ops
*))
783 de_fault (to_terminal_save_ours
,
784 (void (*) (struct target_ops
*))
786 de_fault (to_terminal_info
,
787 default_terminal_info
);
789 (void (*) (struct target_ops
*, char *, int))
791 de_fault (to_post_startup_inferior
,
792 (void (*) (struct target_ops
*, ptid_t
))
794 de_fault (to_insert_fork_catchpoint
,
795 (int (*) (struct target_ops
*, int))
797 de_fault (to_remove_fork_catchpoint
,
798 (int (*) (struct target_ops
*, int))
800 de_fault (to_insert_vfork_catchpoint
,
801 (int (*) (struct target_ops
*, int))
803 de_fault (to_remove_vfork_catchpoint
,
804 (int (*) (struct target_ops
*, int))
806 de_fault (to_insert_exec_catchpoint
,
807 (int (*) (struct target_ops
*, int))
809 de_fault (to_remove_exec_catchpoint
,
810 (int (*) (struct target_ops
*, int))
812 de_fault (to_set_syscall_catchpoint
,
813 (int (*) (struct target_ops
*, int, int, int, int, int *))
815 de_fault (to_has_exited
,
816 (int (*) (struct target_ops
*, int, int, int *))
818 de_fault (to_can_run
,
819 (int (*) (struct target_ops
*))
821 de_fault (to_extra_thread_info
,
822 (char *(*) (struct target_ops
*, struct thread_info
*))
824 de_fault (to_thread_name
,
825 (char *(*) (struct target_ops
*, struct thread_info
*))
828 (void (*) (struct target_ops
*, ptid_t
))
831 (void (*) (struct target_ops
*, char *, struct ui_file
*))
833 de_fault (to_pid_to_exec_file
,
834 (char *(*) (struct target_ops
*, int))
836 de_fault (to_thread_architecture
,
837 default_thread_architecture
);
838 current_target
.to_read_description
= NULL
;
839 de_fault (to_get_ada_task_ptid
,
840 (ptid_t (*) (struct target_ops
*, long, long))
841 default_get_ada_task_ptid
);
842 de_fault (to_supports_multi_process
,
843 (int (*) (struct target_ops
*))
845 de_fault (to_supports_enable_disable_tracepoint
,
846 (int (*) (struct target_ops
*))
848 de_fault (to_supports_string_tracing
,
849 (int (*) (struct target_ops
*))
851 de_fault (to_trace_init
,
852 (void (*) (struct target_ops
*))
854 de_fault (to_download_tracepoint
,
855 (void (*) (struct target_ops
*, struct bp_location
*))
857 de_fault (to_can_download_tracepoint
,
858 (int (*) (struct target_ops
*))
860 de_fault (to_download_trace_state_variable
,
861 (void (*) (struct target_ops
*, struct trace_state_variable
*))
863 de_fault (to_enable_tracepoint
,
864 (void (*) (struct bp_location
*))
866 de_fault (to_disable_tracepoint
,
867 (void (*) (struct bp_location
*))
869 de_fault (to_trace_set_readonly_regions
,
872 de_fault (to_trace_start
,
875 de_fault (to_get_trace_status
,
876 (int (*) (struct trace_status
*))
878 de_fault (to_get_tracepoint_status
,
879 (void (*) (struct breakpoint
*, struct uploaded_tp
*))
881 de_fault (to_trace_stop
,
884 de_fault (to_trace_find
,
885 (int (*) (enum trace_find_type
, int, CORE_ADDR
, CORE_ADDR
, int *))
887 de_fault (to_get_trace_state_variable_value
,
888 (int (*) (int, LONGEST
*))
890 de_fault (to_save_trace_data
,
891 (int (*) (const char *))
893 de_fault (to_upload_tracepoints
,
894 (int (*) (struct uploaded_tp
**))
896 de_fault (to_upload_trace_state_variables
,
897 (int (*) (struct uploaded_tsv
**))
899 de_fault (to_get_raw_trace_data
,
900 (LONGEST (*) (gdb_byte
*, ULONGEST
, LONGEST
))
902 de_fault (to_get_min_fast_tracepoint_insn_len
,
905 de_fault (to_set_disconnected_tracing
,
908 de_fault (to_set_circular_trace_buffer
,
911 de_fault (to_set_trace_buffer_size
,
914 de_fault (to_set_trace_notes
,
915 (int (*) (const char *, const char *, const char *))
917 de_fault (to_get_tib_address
,
918 (int (*) (ptid_t
, CORE_ADDR
*))
920 de_fault (to_set_permissions
,
923 de_fault (to_static_tracepoint_marker_at
,
924 (int (*) (CORE_ADDR
, struct static_tracepoint_marker
*))
926 de_fault (to_static_tracepoint_markers_by_strid
,
927 (VEC(static_tracepoint_marker_p
) * (*) (const char *))
929 de_fault (to_traceframe_info
,
930 (struct traceframe_info
* (*) (void))
932 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
933 (int (*) (struct target_ops
*))
935 de_fault (to_can_run_breakpoint_commands
,
936 (int (*) (struct target_ops
*))
938 de_fault (to_use_agent
,
941 de_fault (to_can_use_agent
,
944 de_fault (to_augmented_libraries_svr4_read
,
947 de_fault (to_execution_direction
, default_execution_direction
);
951 /* Finally, position the target-stack beneath the squashed
952 "current_target". That way code looking for a non-inherited
953 target method can quickly and simply find it. */
954 current_target
.beneath
= target_stack
;
957 setup_target_debug ();
960 /* Push a new target type into the stack of the existing target accessors,
961 possibly superseding some of the existing accessors.
963 Rather than allow an empty stack, we always have the dummy target at
964 the bottom stratum, so we can call the function vectors without
968 push_target (struct target_ops
*t
)
970 struct target_ops
**cur
;
972 /* Check magic number. If wrong, it probably means someone changed
973 the struct definition, but not all the places that initialize one. */
974 if (t
->to_magic
!= OPS_MAGIC
)
976 fprintf_unfiltered (gdb_stderr
,
977 "Magic number of %s target struct wrong\n",
979 internal_error (__FILE__
, __LINE__
,
980 _("failed internal consistency check"));
983 /* Find the proper stratum to install this target in. */
984 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
986 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
990 /* If there's already targets at this stratum, remove them. */
991 /* FIXME: cagney/2003-10-15: I think this should be popping all
992 targets to CUR, and not just those at this stratum level. */
993 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
995 /* There's already something at this stratum level. Close it,
996 and un-hook it from the stack. */
997 struct target_ops
*tmp
= (*cur
);
999 (*cur
) = (*cur
)->beneath
;
1000 tmp
->beneath
= NULL
;
1004 /* We have removed all targets in our stratum, now add the new one. */
1005 t
->beneath
= (*cur
);
1008 update_current_target ();
1011 /* Remove a target_ops vector from the stack, wherever it may be.
1012 Return how many times it was removed (0 or 1). */
1015 unpush_target (struct target_ops
*t
)
1017 struct target_ops
**cur
;
1018 struct target_ops
*tmp
;
1020 if (t
->to_stratum
== dummy_stratum
)
1021 internal_error (__FILE__
, __LINE__
,
1022 _("Attempt to unpush the dummy target"));
1024 /* Look for the specified target. Note that we assume that a target
1025 can only occur once in the target stack. */
1027 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1033 /* If we don't find target_ops, quit. Only open targets should be
1038 /* Unchain the target. */
1040 (*cur
) = (*cur
)->beneath
;
1041 tmp
->beneath
= NULL
;
1043 update_current_target ();
1045 /* Finally close the target. Note we do this after unchaining, so
1046 any target method calls from within the target_close
1047 implementation don't end up in T anymore. */
1054 pop_all_targets_above (enum strata above_stratum
)
1056 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
1058 if (!unpush_target (target_stack
))
1060 fprintf_unfiltered (gdb_stderr
,
1061 "pop_all_targets couldn't find target %s\n",
1062 target_stack
->to_shortname
);
1063 internal_error (__FILE__
, __LINE__
,
1064 _("failed internal consistency check"));
1071 pop_all_targets (void)
1073 pop_all_targets_above (dummy_stratum
);
1076 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1079 target_is_pushed (struct target_ops
*t
)
1081 struct target_ops
**cur
;
1083 /* Check magic number. If wrong, it probably means someone changed
1084 the struct definition, but not all the places that initialize one. */
1085 if (t
->to_magic
!= OPS_MAGIC
)
1087 fprintf_unfiltered (gdb_stderr
,
1088 "Magic number of %s target struct wrong\n",
1090 internal_error (__FILE__
, __LINE__
,
1091 _("failed internal consistency check"));
1094 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1101 /* Using the objfile specified in OBJFILE, find the address for the
1102 current thread's thread-local storage with offset OFFSET. */
1104 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1106 volatile CORE_ADDR addr
= 0;
1107 struct target_ops
*target
;
1109 for (target
= current_target
.beneath
;
1111 target
= target
->beneath
)
1113 if (target
->to_get_thread_local_address
!= NULL
)
1118 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1120 ptid_t ptid
= inferior_ptid
;
1121 volatile struct gdb_exception ex
;
1123 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1127 /* Fetch the load module address for this objfile. */
1128 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1130 /* If it's 0, throw the appropriate exception. */
1132 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1133 _("TLS load module not found"));
1135 addr
= target
->to_get_thread_local_address (target
, ptid
,
1138 /* If an error occurred, print TLS related messages here. Otherwise,
1139 throw the error to some higher catcher. */
1142 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1146 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1147 error (_("Cannot find thread-local variables "
1148 "in this thread library."));
1150 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1151 if (objfile_is_library
)
1152 error (_("Cannot find shared library `%s' in dynamic"
1153 " linker's load module list"), objfile_name (objfile
));
1155 error (_("Cannot find executable file `%s' in dynamic"
1156 " linker's load module list"), objfile_name (objfile
));
1158 case TLS_NOT_ALLOCATED_YET_ERROR
:
1159 if (objfile_is_library
)
1160 error (_("The inferior has not yet allocated storage for"
1161 " thread-local variables in\n"
1162 "the shared library `%s'\n"
1164 objfile_name (objfile
), target_pid_to_str (ptid
));
1166 error (_("The inferior has not yet allocated storage for"
1167 " thread-local variables in\n"
1168 "the executable `%s'\n"
1170 objfile_name (objfile
), target_pid_to_str (ptid
));
1172 case TLS_GENERIC_ERROR
:
1173 if (objfile_is_library
)
1174 error (_("Cannot find thread-local storage for %s, "
1175 "shared library %s:\n%s"),
1176 target_pid_to_str (ptid
),
1177 objfile_name (objfile
), ex
.message
);
1179 error (_("Cannot find thread-local storage for %s, "
1180 "executable file %s:\n%s"),
1181 target_pid_to_str (ptid
),
1182 objfile_name (objfile
), ex
.message
);
1185 throw_exception (ex
);
1190 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1191 TLS is an ABI-specific thing. But we don't do that yet. */
1193 error (_("Cannot find thread-local variables on this target"));
1199 target_xfer_status_to_string (enum target_xfer_status err
)
1201 #define CASE(X) case X: return #X
1204 CASE(TARGET_XFER_E_IO
);
1205 CASE(TARGET_XFER_E_UNAVAILABLE
);
1214 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1216 /* target_read_string -- read a null terminated string, up to LEN bytes,
1217 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1218 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1219 is responsible for freeing it. Return the number of bytes successfully
1223 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1225 int tlen
, offset
, i
;
1229 int buffer_allocated
;
1231 unsigned int nbytes_read
= 0;
1233 gdb_assert (string
);
1235 /* Small for testing. */
1236 buffer_allocated
= 4;
1237 buffer
= xmalloc (buffer_allocated
);
1242 tlen
= MIN (len
, 4 - (memaddr
& 3));
1243 offset
= memaddr
& 3;
1245 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1248 /* The transfer request might have crossed the boundary to an
1249 unallocated region of memory. Retry the transfer, requesting
1253 errcode
= target_read_memory (memaddr
, buf
, 1);
1258 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1262 bytes
= bufptr
- buffer
;
1263 buffer_allocated
*= 2;
1264 buffer
= xrealloc (buffer
, buffer_allocated
);
1265 bufptr
= buffer
+ bytes
;
1268 for (i
= 0; i
< tlen
; i
++)
1270 *bufptr
++ = buf
[i
+ offset
];
1271 if (buf
[i
+ offset
] == '\000')
1273 nbytes_read
+= i
+ 1;
1280 nbytes_read
+= tlen
;
1289 struct target_section_table
*
1290 target_get_section_table (struct target_ops
*target
)
1292 struct target_ops
*t
;
1295 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1297 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1298 if (t
->to_get_section_table
!= NULL
)
1299 return (*t
->to_get_section_table
) (t
);
1304 /* Find a section containing ADDR. */
1306 struct target_section
*
1307 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1309 struct target_section_table
*table
= target_get_section_table (target
);
1310 struct target_section
*secp
;
1315 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1317 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1323 /* Read memory from the live target, even if currently inspecting a
1324 traceframe. The return is the same as that of target_read. */
1326 static enum target_xfer_status
1327 target_read_live_memory (enum target_object object
,
1328 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1329 ULONGEST
*xfered_len
)
1331 enum target_xfer_status ret
;
1332 struct cleanup
*cleanup
;
1334 /* Switch momentarily out of tfind mode so to access live memory.
1335 Note that this must not clear global state, such as the frame
1336 cache, which must still remain valid for the previous traceframe.
1337 We may be _building_ the frame cache at this point. */
1338 cleanup
= make_cleanup_restore_traceframe_number ();
1339 set_traceframe_number (-1);
1341 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1342 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1344 do_cleanups (cleanup
);
1348 /* Using the set of read-only target sections of OPS, read live
1349 read-only memory. Note that the actual reads start from the
1350 top-most target again.
1352 For interface/parameters/return description see target.h,
1355 static enum target_xfer_status
1356 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1357 enum target_object object
,
1358 gdb_byte
*readbuf
, ULONGEST memaddr
,
1359 ULONGEST len
, ULONGEST
*xfered_len
)
1361 struct target_section
*secp
;
1362 struct target_section_table
*table
;
1364 secp
= target_section_by_addr (ops
, memaddr
);
1366 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1367 secp
->the_bfd_section
)
1370 struct target_section
*p
;
1371 ULONGEST memend
= memaddr
+ len
;
1373 table
= target_get_section_table (ops
);
1375 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1377 if (memaddr
>= p
->addr
)
1379 if (memend
<= p
->endaddr
)
1381 /* Entire transfer is within this section. */
1382 return target_read_live_memory (object
, memaddr
,
1383 readbuf
, len
, xfered_len
);
1385 else if (memaddr
>= p
->endaddr
)
1387 /* This section ends before the transfer starts. */
1392 /* This section overlaps the transfer. Just do half. */
1393 len
= p
->endaddr
- memaddr
;
1394 return target_read_live_memory (object
, memaddr
,
1395 readbuf
, len
, xfered_len
);
1401 return TARGET_XFER_EOF
;
1404 /* Read memory from more than one valid target. A core file, for
1405 instance, could have some of memory but delegate other bits to
1406 the target below it. So, we must manually try all targets. */
1408 static enum target_xfer_status
1409 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1410 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1411 ULONGEST
*xfered_len
)
1413 enum target_xfer_status res
;
1417 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1418 readbuf
, writebuf
, memaddr
, len
,
1420 if (res
== TARGET_XFER_OK
)
1423 /* Stop if the target reports that the memory is not available. */
1424 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1427 /* We want to continue past core files to executables, but not
1428 past a running target's memory. */
1429 if (ops
->to_has_all_memory (ops
))
1434 while (ops
!= NULL
);
1439 /* Perform a partial memory transfer.
1440 For docs see target.h, to_xfer_partial. */
1442 static enum target_xfer_status
1443 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1444 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1445 ULONGEST len
, ULONGEST
*xfered_len
)
1447 enum target_xfer_status res
;
1449 struct mem_region
*region
;
1450 struct inferior
*inf
;
1452 /* For accesses to unmapped overlay sections, read directly from
1453 files. Must do this first, as MEMADDR may need adjustment. */
1454 if (readbuf
!= NULL
&& overlay_debugging
)
1456 struct obj_section
*section
= find_pc_overlay (memaddr
);
1458 if (pc_in_unmapped_range (memaddr
, section
))
1460 struct target_section_table
*table
1461 = target_get_section_table (ops
);
1462 const char *section_name
= section
->the_bfd_section
->name
;
1464 memaddr
= overlay_mapped_address (memaddr
, section
);
1465 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1466 memaddr
, len
, xfered_len
,
1468 table
->sections_end
,
1473 /* Try the executable files, if "trust-readonly-sections" is set. */
1474 if (readbuf
!= NULL
&& trust_readonly
)
1476 struct target_section
*secp
;
1477 struct target_section_table
*table
;
1479 secp
= target_section_by_addr (ops
, memaddr
);
1481 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1482 secp
->the_bfd_section
)
1485 table
= target_get_section_table (ops
);
1486 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1487 memaddr
, len
, xfered_len
,
1489 table
->sections_end
,
1494 /* If reading unavailable memory in the context of traceframes, and
1495 this address falls within a read-only section, fallback to
1496 reading from live memory. */
1497 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1499 VEC(mem_range_s
) *available
;
1501 /* If we fail to get the set of available memory, then the
1502 target does not support querying traceframe info, and so we
1503 attempt reading from the traceframe anyway (assuming the
1504 target implements the old QTro packet then). */
1505 if (traceframe_available_memory (&available
, memaddr
, len
))
1507 struct cleanup
*old_chain
;
1509 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1511 if (VEC_empty (mem_range_s
, available
)
1512 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1514 /* Don't read into the traceframe's available
1516 if (!VEC_empty (mem_range_s
, available
))
1518 LONGEST oldlen
= len
;
1520 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1521 gdb_assert (len
<= oldlen
);
1524 do_cleanups (old_chain
);
1526 /* This goes through the topmost target again. */
1527 res
= memory_xfer_live_readonly_partial (ops
, object
,
1530 if (res
== TARGET_XFER_OK
)
1531 return TARGET_XFER_OK
;
1534 /* No use trying further, we know some memory starting
1535 at MEMADDR isn't available. */
1537 return TARGET_XFER_E_UNAVAILABLE
;
1541 /* Don't try to read more than how much is available, in
1542 case the target implements the deprecated QTro packet to
1543 cater for older GDBs (the target's knowledge of read-only
1544 sections may be outdated by now). */
1545 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1547 do_cleanups (old_chain
);
1551 /* Try GDB's internal data cache. */
1552 region
= lookup_mem_region (memaddr
);
1553 /* region->hi == 0 means there's no upper bound. */
1554 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1557 reg_len
= region
->hi
- memaddr
;
1559 switch (region
->attrib
.mode
)
1562 if (writebuf
!= NULL
)
1563 return TARGET_XFER_E_IO
;
1567 if (readbuf
!= NULL
)
1568 return TARGET_XFER_E_IO
;
1572 /* We only support writing to flash during "load" for now. */
1573 if (writebuf
!= NULL
)
1574 error (_("Writing to flash memory forbidden in this context"));
1578 return TARGET_XFER_E_IO
;
1581 if (!ptid_equal (inferior_ptid
, null_ptid
))
1582 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1587 /* The dcache reads whole cache lines; that doesn't play well
1588 with reading from a trace buffer, because reading outside of
1589 the collected memory range fails. */
1590 && get_traceframe_number () == -1
1591 && (region
->attrib
.cache
1592 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1593 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1595 DCACHE
*dcache
= target_dcache_get_or_init ();
1598 if (readbuf
!= NULL
)
1599 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1601 /* FIXME drow/2006-08-09: If we're going to preserve const
1602 correctness dcache_xfer_memory should take readbuf and
1604 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1607 return TARGET_XFER_E_IO
;
1610 *xfered_len
= (ULONGEST
) l
;
1611 return TARGET_XFER_OK
;
1615 /* If none of those methods found the memory we wanted, fall back
1616 to a target partial transfer. Normally a single call to
1617 to_xfer_partial is enough; if it doesn't recognize an object
1618 it will call the to_xfer_partial of the next target down.
1619 But for memory this won't do. Memory is the only target
1620 object which can be read from more than one valid target.
1621 A core file, for instance, could have some of memory but
1622 delegate other bits to the target below it. So, we must
1623 manually try all targets. */
1625 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1628 /* Make sure the cache gets updated no matter what - if we are writing
1629 to the stack. Even if this write is not tagged as such, we still need
1630 to update the cache. */
1632 if (res
== TARGET_XFER_OK
1635 && target_dcache_init_p ()
1636 && !region
->attrib
.cache
1637 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1638 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1640 DCACHE
*dcache
= target_dcache_get ();
1642 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1645 /* If we still haven't got anything, return the last error. We
1650 /* Perform a partial memory transfer. For docs see target.h,
1653 static enum target_xfer_status
1654 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1655 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1656 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1658 enum target_xfer_status res
;
1660 /* Zero length requests are ok and require no work. */
1662 return TARGET_XFER_EOF
;
1664 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1665 breakpoint insns, thus hiding out from higher layers whether
1666 there are software breakpoints inserted in the code stream. */
1667 if (readbuf
!= NULL
)
1669 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1672 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1673 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1678 struct cleanup
*old_chain
;
1680 /* A large write request is likely to be partially satisfied
1681 by memory_xfer_partial_1. We will continually malloc
1682 and free a copy of the entire write request for breakpoint
1683 shadow handling even though we only end up writing a small
1684 subset of it. Cap writes to 4KB to mitigate this. */
1685 len
= min (4096, len
);
1687 buf
= xmalloc (len
);
1688 old_chain
= make_cleanup (xfree
, buf
);
1689 memcpy (buf
, writebuf
, len
);
1691 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1692 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1695 do_cleanups (old_chain
);
1702 restore_show_memory_breakpoints (void *arg
)
1704 show_memory_breakpoints
= (uintptr_t) arg
;
1708 make_show_memory_breakpoints_cleanup (int show
)
1710 int current
= show_memory_breakpoints
;
1712 show_memory_breakpoints
= show
;
1713 return make_cleanup (restore_show_memory_breakpoints
,
1714 (void *) (uintptr_t) current
);
1717 /* For docs see target.h, to_xfer_partial. */
1719 enum target_xfer_status
1720 target_xfer_partial (struct target_ops
*ops
,
1721 enum target_object object
, const char *annex
,
1722 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1723 ULONGEST offset
, ULONGEST len
,
1724 ULONGEST
*xfered_len
)
1726 enum target_xfer_status retval
;
1728 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1730 /* Transfer is done when LEN is zero. */
1732 return TARGET_XFER_EOF
;
1734 if (writebuf
&& !may_write_memory
)
1735 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1736 core_addr_to_string_nz (offset
), plongest (len
));
1740 /* If this is a memory transfer, let the memory-specific code
1741 have a look at it instead. Memory transfers are more
1743 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1744 || object
== TARGET_OBJECT_CODE_MEMORY
)
1745 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1746 writebuf
, offset
, len
, xfered_len
);
1747 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1749 /* Request the normal memory object from other layers. */
1750 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1754 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1755 writebuf
, offset
, len
, xfered_len
);
1759 const unsigned char *myaddr
= NULL
;
1761 fprintf_unfiltered (gdb_stdlog
,
1762 "%s:target_xfer_partial "
1763 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1766 (annex
? annex
: "(null)"),
1767 host_address_to_string (readbuf
),
1768 host_address_to_string (writebuf
),
1769 core_addr_to_string_nz (offset
),
1770 pulongest (len
), retval
,
1771 pulongest (*xfered_len
));
1777 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1781 fputs_unfiltered (", bytes =", gdb_stdlog
);
1782 for (i
= 0; i
< *xfered_len
; i
++)
1784 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1786 if (targetdebug
< 2 && i
> 0)
1788 fprintf_unfiltered (gdb_stdlog
, " ...");
1791 fprintf_unfiltered (gdb_stdlog
, "\n");
1794 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1798 fputc_unfiltered ('\n', gdb_stdlog
);
1801 /* Check implementations of to_xfer_partial update *XFERED_LEN
1802 properly. Do assertion after printing debug messages, so that we
1803 can find more clues on assertion failure from debugging messages. */
1804 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1805 gdb_assert (*xfered_len
> 0);
1810 /* Read LEN bytes of target memory at address MEMADDR, placing the
1811 results in GDB's memory at MYADDR. Returns either 0 for success or
1812 TARGET_XFER_E_IO if any error occurs.
1814 If an error occurs, no guarantee is made about the contents of the data at
1815 MYADDR. In particular, the caller should not depend upon partial reads
1816 filling the buffer with good data. There is no way for the caller to know
1817 how much good data might have been transfered anyway. Callers that can
1818 deal with partial reads should call target_read (which will retry until
1819 it makes no progress, and then return how much was transferred). */
1822 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1824 /* Dispatch to the topmost target, not the flattened current_target.
1825 Memory accesses check target->to_has_(all_)memory, and the
1826 flattened target doesn't inherit those. */
1827 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1828 myaddr
, memaddr
, len
) == len
)
1831 return TARGET_XFER_E_IO
;
1834 /* Like target_read_memory, but specify explicitly that this is a read
1835 from the target's raw memory. That is, this read bypasses the
1836 dcache, breakpoint shadowing, etc. */
1839 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1841 /* See comment in target_read_memory about why the request starts at
1842 current_target.beneath. */
1843 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1844 myaddr
, memaddr
, len
) == len
)
1847 return TARGET_XFER_E_IO
;
1850 /* Like target_read_memory, but specify explicitly that this is a read from
1851 the target's stack. This may trigger different cache behavior. */
1854 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1856 /* See comment in target_read_memory about why the request starts at
1857 current_target.beneath. */
1858 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1859 myaddr
, memaddr
, len
) == len
)
1862 return TARGET_XFER_E_IO
;
1865 /* Like target_read_memory, but specify explicitly that this is a read from
1866 the target's code. This may trigger different cache behavior. */
1869 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1871 /* See comment in target_read_memory about why the request starts at
1872 current_target.beneath. */
1873 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1874 myaddr
, memaddr
, len
) == len
)
1877 return TARGET_XFER_E_IO
;
1880 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1881 Returns either 0 for success or TARGET_XFER_E_IO if any
1882 error occurs. If an error occurs, no guarantee is made about how
1883 much data got written. Callers that can deal with partial writes
1884 should call target_write. */
1887 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1889 /* See comment in target_read_memory about why the request starts at
1890 current_target.beneath. */
1891 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1892 myaddr
, memaddr
, len
) == len
)
1895 return TARGET_XFER_E_IO
;
1898 /* Write LEN bytes from MYADDR to target raw memory at address
1899 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1900 if any error occurs. If an error occurs, no guarantee is made
1901 about how much data got written. Callers that can deal with
1902 partial writes should call target_write. */
1905 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1907 /* See comment in target_read_memory about why the request starts at
1908 current_target.beneath. */
1909 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1910 myaddr
, memaddr
, len
) == len
)
1913 return TARGET_XFER_E_IO
;
1916 /* Fetch the target's memory map. */
1919 target_memory_map (void)
1921 VEC(mem_region_s
) *result
;
1922 struct mem_region
*last_one
, *this_one
;
1924 struct target_ops
*t
;
1927 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1929 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1930 if (t
->to_memory_map
!= NULL
)
1936 result
= t
->to_memory_map (t
);
1940 qsort (VEC_address (mem_region_s
, result
),
1941 VEC_length (mem_region_s
, result
),
1942 sizeof (struct mem_region
), mem_region_cmp
);
1944 /* Check that regions do not overlap. Simultaneously assign
1945 a numbering for the "mem" commands to use to refer to
1948 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1950 this_one
->number
= ix
;
1952 if (last_one
&& last_one
->hi
> this_one
->lo
)
1954 warning (_("Overlapping regions in memory map: ignoring"));
1955 VEC_free (mem_region_s
, result
);
1958 last_one
= this_one
;
1965 target_flash_erase (ULONGEST address
, LONGEST length
)
1967 struct target_ops
*t
;
1969 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1970 if (t
->to_flash_erase
!= NULL
)
1973 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1974 hex_string (address
), phex (length
, 0));
1975 t
->to_flash_erase (t
, address
, length
);
1983 target_flash_done (void)
1985 struct target_ops
*t
;
1987 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1988 if (t
->to_flash_done
!= NULL
)
1991 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1992 t
->to_flash_done (t
);
2000 show_trust_readonly (struct ui_file
*file
, int from_tty
,
2001 struct cmd_list_element
*c
, const char *value
)
2003 fprintf_filtered (file
,
2004 _("Mode for reading from readonly sections is %s.\n"),
2008 /* More generic transfers. */
2010 static enum target_xfer_status
2011 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
2012 const char *annex
, gdb_byte
*readbuf
,
2013 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
2014 ULONGEST
*xfered_len
)
2016 if (object
== TARGET_OBJECT_MEMORY
2017 && ops
->deprecated_xfer_memory
!= NULL
)
2018 /* If available, fall back to the target's
2019 "deprecated_xfer_memory" method. */
2024 if (writebuf
!= NULL
)
2026 void *buffer
= xmalloc (len
);
2027 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
2029 memcpy (buffer
, writebuf
, len
);
2030 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
2031 1/*write*/, NULL
, ops
);
2032 do_cleanups (cleanup
);
2034 if (readbuf
!= NULL
)
2035 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
2036 0/*read*/, NULL
, ops
);
2039 *xfered_len
= (ULONGEST
) xfered
;
2040 return TARGET_XFER_E_IO
;
2042 else if (xfered
== 0 && errno
== 0)
2043 /* "deprecated_xfer_memory" uses 0, cross checked against
2044 ERRNO as one indication of an error. */
2045 return TARGET_XFER_EOF
;
2047 return TARGET_XFER_E_IO
;
2051 gdb_assert (ops
->beneath
!= NULL
);
2052 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
2053 readbuf
, writebuf
, offset
, len
,
2058 /* Target vector read/write partial wrapper functions. */
2060 static enum target_xfer_status
2061 target_read_partial (struct target_ops
*ops
,
2062 enum target_object object
,
2063 const char *annex
, gdb_byte
*buf
,
2064 ULONGEST offset
, ULONGEST len
,
2065 ULONGEST
*xfered_len
)
2067 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
2071 static enum target_xfer_status
2072 target_write_partial (struct target_ops
*ops
,
2073 enum target_object object
,
2074 const char *annex
, const gdb_byte
*buf
,
2075 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
2077 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
2081 /* Wrappers to perform the full transfer. */
2083 /* For docs on target_read see target.h. */
2086 target_read (struct target_ops
*ops
,
2087 enum target_object object
,
2088 const char *annex
, gdb_byte
*buf
,
2089 ULONGEST offset
, LONGEST len
)
2093 while (xfered
< len
)
2095 ULONGEST xfered_len
;
2096 enum target_xfer_status status
;
2098 status
= target_read_partial (ops
, object
, annex
,
2099 (gdb_byte
*) buf
+ xfered
,
2100 offset
+ xfered
, len
- xfered
,
2103 /* Call an observer, notifying them of the xfer progress? */
2104 if (status
== TARGET_XFER_EOF
)
2106 else if (status
== TARGET_XFER_OK
)
2108 xfered
+= xfered_len
;
2118 /* Assuming that the entire [begin, end) range of memory cannot be
2119 read, try to read whatever subrange is possible to read.
2121 The function returns, in RESULT, either zero or one memory block.
2122 If there's a readable subrange at the beginning, it is completely
2123 read and returned. Any further readable subrange will not be read.
2124 Otherwise, if there's a readable subrange at the end, it will be
2125 completely read and returned. Any readable subranges before it
2126 (obviously, not starting at the beginning), will be ignored. In
2127 other cases -- either no readable subrange, or readable subrange(s)
2128 that is neither at the beginning, or end, nothing is returned.
2130 The purpose of this function is to handle a read across a boundary
2131 of accessible memory in a case when memory map is not available.
2132 The above restrictions are fine for this case, but will give
2133 incorrect results if the memory is 'patchy'. However, supporting
2134 'patchy' memory would require trying to read every single byte,
2135 and it seems unacceptable solution. Explicit memory map is
2136 recommended for this case -- and target_read_memory_robust will
2137 take care of reading multiple ranges then. */
2140 read_whatever_is_readable (struct target_ops
*ops
,
2141 ULONGEST begin
, ULONGEST end
,
2142 VEC(memory_read_result_s
) **result
)
2144 gdb_byte
*buf
= xmalloc (end
- begin
);
2145 ULONGEST current_begin
= begin
;
2146 ULONGEST current_end
= end
;
2148 memory_read_result_s r
;
2149 ULONGEST xfered_len
;
2151 /* If we previously failed to read 1 byte, nothing can be done here. */
2152 if (end
- begin
<= 1)
2158 /* Check that either first or the last byte is readable, and give up
2159 if not. This heuristic is meant to permit reading accessible memory
2160 at the boundary of accessible region. */
2161 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2162 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2167 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2168 buf
+ (end
-begin
) - 1, end
- 1, 1,
2169 &xfered_len
) == TARGET_XFER_OK
)
2180 /* Loop invariant is that the [current_begin, current_end) was previously
2181 found to be not readable as a whole.
2183 Note loop condition -- if the range has 1 byte, we can't divide the range
2184 so there's no point trying further. */
2185 while (current_end
- current_begin
> 1)
2187 ULONGEST first_half_begin
, first_half_end
;
2188 ULONGEST second_half_begin
, second_half_end
;
2190 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2194 first_half_begin
= current_begin
;
2195 first_half_end
= middle
;
2196 second_half_begin
= middle
;
2197 second_half_end
= current_end
;
2201 first_half_begin
= middle
;
2202 first_half_end
= current_end
;
2203 second_half_begin
= current_begin
;
2204 second_half_end
= middle
;
2207 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2208 buf
+ (first_half_begin
- begin
),
2210 first_half_end
- first_half_begin
);
2212 if (xfer
== first_half_end
- first_half_begin
)
2214 /* This half reads up fine. So, the error must be in the
2216 current_begin
= second_half_begin
;
2217 current_end
= second_half_end
;
2221 /* This half is not readable. Because we've tried one byte, we
2222 know some part of this half if actually redable. Go to the next
2223 iteration to divide again and try to read.
2225 We don't handle the other half, because this function only tries
2226 to read a single readable subrange. */
2227 current_begin
= first_half_begin
;
2228 current_end
= first_half_end
;
2234 /* The [begin, current_begin) range has been read. */
2236 r
.end
= current_begin
;
2241 /* The [current_end, end) range has been read. */
2242 LONGEST rlen
= end
- current_end
;
2244 r
.data
= xmalloc (rlen
);
2245 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2246 r
.begin
= current_end
;
2250 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2254 free_memory_read_result_vector (void *x
)
2256 VEC(memory_read_result_s
) *v
= x
;
2257 memory_read_result_s
*current
;
2260 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2262 xfree (current
->data
);
2264 VEC_free (memory_read_result_s
, v
);
2267 VEC(memory_read_result_s
) *
2268 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2270 VEC(memory_read_result_s
) *result
= 0;
2273 while (xfered
< len
)
2275 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2278 /* If there is no explicit region, a fake one should be created. */
2279 gdb_assert (region
);
2281 if (region
->hi
== 0)
2282 rlen
= len
- xfered
;
2284 rlen
= region
->hi
- offset
;
2286 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2288 /* Cannot read this region. Note that we can end up here only
2289 if the region is explicitly marked inaccessible, or
2290 'inaccessible-by-default' is in effect. */
2295 LONGEST to_read
= min (len
- xfered
, rlen
);
2296 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2298 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2299 (gdb_byte
*) buffer
,
2300 offset
+ xfered
, to_read
);
2301 /* Call an observer, notifying them of the xfer progress? */
2304 /* Got an error reading full chunk. See if maybe we can read
2307 read_whatever_is_readable (ops
, offset
+ xfered
,
2308 offset
+ xfered
+ to_read
, &result
);
2313 struct memory_read_result r
;
2315 r
.begin
= offset
+ xfered
;
2316 r
.end
= r
.begin
+ xfer
;
2317 VEC_safe_push (memory_read_result_s
, result
, &r
);
2327 /* An alternative to target_write with progress callbacks. */
2330 target_write_with_progress (struct target_ops
*ops
,
2331 enum target_object object
,
2332 const char *annex
, const gdb_byte
*buf
,
2333 ULONGEST offset
, LONGEST len
,
2334 void (*progress
) (ULONGEST
, void *), void *baton
)
2338 /* Give the progress callback a chance to set up. */
2340 (*progress
) (0, baton
);
2342 while (xfered
< len
)
2344 ULONGEST xfered_len
;
2345 enum target_xfer_status status
;
2347 status
= target_write_partial (ops
, object
, annex
,
2348 (gdb_byte
*) buf
+ xfered
,
2349 offset
+ xfered
, len
- xfered
,
2352 if (status
== TARGET_XFER_EOF
)
2354 if (TARGET_XFER_STATUS_ERROR_P (status
))
2357 gdb_assert (status
== TARGET_XFER_OK
);
2359 (*progress
) (xfered_len
, baton
);
2361 xfered
+= xfered_len
;
2367 /* For docs on target_write see target.h. */
2370 target_write (struct target_ops
*ops
,
2371 enum target_object object
,
2372 const char *annex
, const gdb_byte
*buf
,
2373 ULONGEST offset
, LONGEST len
)
2375 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2379 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2380 the size of the transferred data. PADDING additional bytes are
2381 available in *BUF_P. This is a helper function for
2382 target_read_alloc; see the declaration of that function for more
2386 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2387 const char *annex
, gdb_byte
**buf_p
, int padding
)
2389 size_t buf_alloc
, buf_pos
;
2392 /* This function does not have a length parameter; it reads the
2393 entire OBJECT). Also, it doesn't support objects fetched partly
2394 from one target and partly from another (in a different stratum,
2395 e.g. a core file and an executable). Both reasons make it
2396 unsuitable for reading memory. */
2397 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2399 /* Start by reading up to 4K at a time. The target will throttle
2400 this number down if necessary. */
2402 buf
= xmalloc (buf_alloc
);
2406 ULONGEST xfered_len
;
2407 enum target_xfer_status status
;
2409 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2410 buf_pos
, buf_alloc
- buf_pos
- padding
,
2413 if (status
== TARGET_XFER_EOF
)
2415 /* Read all there was. */
2422 else if (status
!= TARGET_XFER_OK
)
2424 /* An error occurred. */
2426 return TARGET_XFER_E_IO
;
2429 buf_pos
+= xfered_len
;
2431 /* If the buffer is filling up, expand it. */
2432 if (buf_alloc
< buf_pos
* 2)
2435 buf
= xrealloc (buf
, buf_alloc
);
2442 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2443 the size of the transferred data. See the declaration in "target.h"
2444 function for more information about the return value. */
2447 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2448 const char *annex
, gdb_byte
**buf_p
)
2450 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2453 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2454 returned as a string, allocated using xmalloc. If an error occurs
2455 or the transfer is unsupported, NULL is returned. Empty objects
2456 are returned as allocated but empty strings. A warning is issued
2457 if the result contains any embedded NUL bytes. */
2460 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2465 LONGEST i
, transferred
;
2467 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2468 bufstr
= (char *) buffer
;
2470 if (transferred
< 0)
2473 if (transferred
== 0)
2474 return xstrdup ("");
2476 bufstr
[transferred
] = 0;
2478 /* Check for embedded NUL bytes; but allow trailing NULs. */
2479 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2482 warning (_("target object %d, annex %s, "
2483 "contained unexpected null characters"),
2484 (int) object
, annex
? annex
: "(none)");
2491 /* Memory transfer methods. */
2494 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2497 /* This method is used to read from an alternate, non-current
2498 target. This read must bypass the overlay support (as symbols
2499 don't match this target), and GDB's internal cache (wrong cache
2500 for this target). */
2501 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2503 memory_error (TARGET_XFER_E_IO
, addr
);
2507 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2508 int len
, enum bfd_endian byte_order
)
2510 gdb_byte buf
[sizeof (ULONGEST
)];
2512 gdb_assert (len
<= sizeof (buf
));
2513 get_target_memory (ops
, addr
, buf
, len
);
2514 return extract_unsigned_integer (buf
, len
, byte_order
);
2520 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2521 struct bp_target_info
*bp_tgt
)
2523 if (!may_insert_breakpoints
)
2525 warning (_("May not insert breakpoints"));
2529 return current_target
.to_insert_breakpoint (¤t_target
,
2536 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2537 struct bp_target_info
*bp_tgt
)
2539 /* This is kind of a weird case to handle, but the permission might
2540 have been changed after breakpoints were inserted - in which case
2541 we should just take the user literally and assume that any
2542 breakpoints should be left in place. */
2543 if (!may_insert_breakpoints
)
2545 warning (_("May not remove breakpoints"));
2549 return current_target
.to_remove_breakpoint (¤t_target
,
2554 target_info (char *args
, int from_tty
)
2556 struct target_ops
*t
;
2557 int has_all_mem
= 0;
2559 if (symfile_objfile
!= NULL
)
2560 printf_unfiltered (_("Symbols from \"%s\".\n"),
2561 objfile_name (symfile_objfile
));
2563 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2565 if (!(*t
->to_has_memory
) (t
))
2568 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2571 printf_unfiltered (_("\tWhile running this, "
2572 "GDB does not access memory from...\n"));
2573 printf_unfiltered ("%s:\n", t
->to_longname
);
2574 (t
->to_files_info
) (t
);
2575 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2579 /* This function is called before any new inferior is created, e.g.
2580 by running a program, attaching, or connecting to a target.
2581 It cleans up any state from previous invocations which might
2582 change between runs. This is a subset of what target_preopen
2583 resets (things which might change between targets). */
2586 target_pre_inferior (int from_tty
)
2588 /* Clear out solib state. Otherwise the solib state of the previous
2589 inferior might have survived and is entirely wrong for the new
2590 target. This has been observed on GNU/Linux using glibc 2.3. How
2602 Cannot access memory at address 0xdeadbeef
2605 /* In some OSs, the shared library list is the same/global/shared
2606 across inferiors. If code is shared between processes, so are
2607 memory regions and features. */
2608 if (!gdbarch_has_global_solist (target_gdbarch ()))
2610 no_shared_libraries (NULL
, from_tty
);
2612 invalidate_target_mem_regions ();
2614 target_clear_description ();
2617 agent_capability_invalidate ();
2620 /* Callback for iterate_over_inferiors. Gets rid of the given
2624 dispose_inferior (struct inferior
*inf
, void *args
)
2626 struct thread_info
*thread
;
2628 thread
= any_thread_of_process (inf
->pid
);
2631 switch_to_thread (thread
->ptid
);
2633 /* Core inferiors actually should be detached, not killed. */
2634 if (target_has_execution
)
2637 target_detach (NULL
, 0);
2643 /* This is to be called by the open routine before it does
2647 target_preopen (int from_tty
)
2651 if (have_inferiors ())
2654 || !have_live_inferiors ()
2655 || query (_("A program is being debugged already. Kill it? ")))
2656 iterate_over_inferiors (dispose_inferior
, NULL
);
2658 error (_("Program not killed."));
2661 /* Calling target_kill may remove the target from the stack. But if
2662 it doesn't (which seems like a win for UDI), remove it now. */
2663 /* Leave the exec target, though. The user may be switching from a
2664 live process to a core of the same program. */
2665 pop_all_targets_above (file_stratum
);
2667 target_pre_inferior (from_tty
);
2670 /* Detach a target after doing deferred register stores. */
2673 target_detach (const char *args
, int from_tty
)
2675 struct target_ops
* t
;
2677 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2678 /* Don't remove global breakpoints here. They're removed on
2679 disconnection from the target. */
2682 /* If we're in breakpoints-always-inserted mode, have to remove
2683 them before detaching. */
2684 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2686 prepare_for_detach ();
2688 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2690 if (t
->to_detach
!= NULL
)
2692 t
->to_detach (t
, args
, from_tty
);
2694 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2700 internal_error (__FILE__
, __LINE__
, _("could not find a target to detach"));
2704 target_disconnect (char *args
, int from_tty
)
2706 struct target_ops
*t
;
2708 /* If we're in breakpoints-always-inserted mode or if breakpoints
2709 are global across processes, we have to remove them before
2711 remove_breakpoints ();
2713 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2714 if (t
->to_disconnect
!= NULL
)
2717 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2719 t
->to_disconnect (t
, args
, from_tty
);
2727 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2729 struct target_ops
*t
;
2730 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2735 char *status_string
;
2736 char *options_string
;
2738 status_string
= target_waitstatus_to_string (status
);
2739 options_string
= target_options_to_string (options
);
2740 fprintf_unfiltered (gdb_stdlog
,
2741 "target_wait (%d, status, options={%s})"
2743 ptid_get_pid (ptid
), options_string
,
2744 ptid_get_pid (retval
), status_string
);
2745 xfree (status_string
);
2746 xfree (options_string
);
2753 target_pid_to_str (ptid_t ptid
)
2755 struct target_ops
*t
;
2757 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2759 if (t
->to_pid_to_str
!= NULL
)
2760 return (*t
->to_pid_to_str
) (t
, ptid
);
2763 return normal_pid_to_str (ptid
);
2767 target_thread_name (struct thread_info
*info
)
2769 struct target_ops
*t
;
2771 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2773 if (t
->to_thread_name
!= NULL
)
2774 return (*t
->to_thread_name
) (t
, info
);
2781 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2783 struct target_ops
*t
;
2785 target_dcache_invalidate ();
2787 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2789 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2790 ptid_get_pid (ptid
),
2791 step
? "step" : "continue",
2792 gdb_signal_to_name (signal
));
2794 registers_changed_ptid (ptid
);
2795 set_executing (ptid
, 1);
2796 set_running (ptid
, 1);
2797 clear_inline_frame_state (ptid
);
2801 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2803 struct target_ops
*t
;
2805 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2807 if (t
->to_pass_signals
!= NULL
)
2813 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2816 for (i
= 0; i
< numsigs
; i
++)
2817 if (pass_signals
[i
])
2818 fprintf_unfiltered (gdb_stdlog
, " %s",
2819 gdb_signal_to_name (i
));
2821 fprintf_unfiltered (gdb_stdlog
, " })\n");
2824 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2831 target_program_signals (int numsigs
, unsigned char *program_signals
)
2833 struct target_ops
*t
;
2835 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2837 if (t
->to_program_signals
!= NULL
)
2843 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2846 for (i
= 0; i
< numsigs
; i
++)
2847 if (program_signals
[i
])
2848 fprintf_unfiltered (gdb_stdlog
, " %s",
2849 gdb_signal_to_name (i
));
2851 fprintf_unfiltered (gdb_stdlog
, " })\n");
2854 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2860 /* Look through the list of possible targets for a target that can
2864 target_follow_fork (int follow_child
, int detach_fork
)
2866 struct target_ops
*t
;
2868 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2870 if (t
->to_follow_fork
!= NULL
)
2872 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2875 fprintf_unfiltered (gdb_stdlog
,
2876 "target_follow_fork (%d, %d) = %d\n",
2877 follow_child
, detach_fork
, retval
);
2882 /* Some target returned a fork event, but did not know how to follow it. */
2883 internal_error (__FILE__
, __LINE__
,
2884 _("could not find a target to follow fork"));
2888 target_mourn_inferior (void)
2890 struct target_ops
*t
;
2892 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2894 if (t
->to_mourn_inferior
!= NULL
)
2896 t
->to_mourn_inferior (t
);
2898 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2900 /* We no longer need to keep handles on any of the object files.
2901 Make sure to release them to avoid unnecessarily locking any
2902 of them while we're not actually debugging. */
2903 bfd_cache_close_all ();
2909 internal_error (__FILE__
, __LINE__
,
2910 _("could not find a target to follow mourn inferior"));
2913 /* Look for a target which can describe architectural features, starting
2914 from TARGET. If we find one, return its description. */
2916 const struct target_desc
*
2917 target_read_description (struct target_ops
*target
)
2919 struct target_ops
*t
;
2921 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2922 if (t
->to_read_description
!= NULL
)
2924 const struct target_desc
*tdesc
;
2926 tdesc
= t
->to_read_description (t
);
2934 /* The default implementation of to_search_memory.
2935 This implements a basic search of memory, reading target memory and
2936 performing the search here (as opposed to performing the search in on the
2937 target side with, for example, gdbserver). */
2940 simple_search_memory (struct target_ops
*ops
,
2941 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2942 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2943 CORE_ADDR
*found_addrp
)
2945 /* NOTE: also defined in find.c testcase. */
2946 #define SEARCH_CHUNK_SIZE 16000
2947 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2948 /* Buffer to hold memory contents for searching. */
2949 gdb_byte
*search_buf
;
2950 unsigned search_buf_size
;
2951 struct cleanup
*old_cleanups
;
2953 search_buf_size
= chunk_size
+ pattern_len
- 1;
2955 /* No point in trying to allocate a buffer larger than the search space. */
2956 if (search_space_len
< search_buf_size
)
2957 search_buf_size
= search_space_len
;
2959 search_buf
= malloc (search_buf_size
);
2960 if (search_buf
== NULL
)
2961 error (_("Unable to allocate memory to perform the search."));
2962 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2964 /* Prime the search buffer. */
2966 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2967 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2969 warning (_("Unable to access %s bytes of target "
2970 "memory at %s, halting search."),
2971 pulongest (search_buf_size
), hex_string (start_addr
));
2972 do_cleanups (old_cleanups
);
2976 /* Perform the search.
2978 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2979 When we've scanned N bytes we copy the trailing bytes to the start and
2980 read in another N bytes. */
2982 while (search_space_len
>= pattern_len
)
2984 gdb_byte
*found_ptr
;
2985 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2987 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2988 pattern
, pattern_len
);
2990 if (found_ptr
!= NULL
)
2992 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2994 *found_addrp
= found_addr
;
2995 do_cleanups (old_cleanups
);
2999 /* Not found in this chunk, skip to next chunk. */
3001 /* Don't let search_space_len wrap here, it's unsigned. */
3002 if (search_space_len
>= chunk_size
)
3003 search_space_len
-= chunk_size
;
3005 search_space_len
= 0;
3007 if (search_space_len
>= pattern_len
)
3009 unsigned keep_len
= search_buf_size
- chunk_size
;
3010 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
3013 /* Copy the trailing part of the previous iteration to the front
3014 of the buffer for the next iteration. */
3015 gdb_assert (keep_len
== pattern_len
- 1);
3016 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
3018 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
3020 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3021 search_buf
+ keep_len
, read_addr
,
3022 nr_to_read
) != nr_to_read
)
3024 warning (_("Unable to access %s bytes of target "
3025 "memory at %s, halting search."),
3026 plongest (nr_to_read
),
3027 hex_string (read_addr
));
3028 do_cleanups (old_cleanups
);
3032 start_addr
+= chunk_size
;
3038 do_cleanups (old_cleanups
);
3042 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3043 sequence of bytes in PATTERN with length PATTERN_LEN.
3045 The result is 1 if found, 0 if not found, and -1 if there was an error
3046 requiring halting of the search (e.g. memory read error).
3047 If the pattern is found the address is recorded in FOUND_ADDRP. */
3050 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
3051 const gdb_byte
*pattern
, ULONGEST pattern_len
,
3052 CORE_ADDR
*found_addrp
)
3054 struct target_ops
*t
;
3057 /* We don't use INHERIT to set current_target.to_search_memory,
3058 so we have to scan the target stack and handle targetdebug
3062 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
3063 hex_string (start_addr
));
3065 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3066 if (t
->to_search_memory
!= NULL
)
3071 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
3072 pattern
, pattern_len
, found_addrp
);
3076 /* If a special version of to_search_memory isn't available, use the
3078 found
= simple_search_memory (current_target
.beneath
,
3079 start_addr
, search_space_len
,
3080 pattern
, pattern_len
, found_addrp
);
3084 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
3089 /* Look through the currently pushed targets. If none of them will
3090 be able to restart the currently running process, issue an error
3094 target_require_runnable (void)
3096 struct target_ops
*t
;
3098 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
3100 /* If this target knows how to create a new program, then
3101 assume we will still be able to after killing the current
3102 one. Either killing and mourning will not pop T, or else
3103 find_default_run_target will find it again. */
3104 if (t
->to_create_inferior
!= NULL
)
3107 /* Do not worry about thread_stratum targets that can not
3108 create inferiors. Assume they will be pushed again if
3109 necessary, and continue to the process_stratum. */
3110 if (t
->to_stratum
== thread_stratum
3111 || t
->to_stratum
== arch_stratum
)
3114 error (_("The \"%s\" target does not support \"run\". "
3115 "Try \"help target\" or \"continue\"."),
3119 /* This function is only called if the target is running. In that
3120 case there should have been a process_stratum target and it
3121 should either know how to create inferiors, or not... */
3122 internal_error (__FILE__
, __LINE__
, _("No targets found"));
3125 /* Look through the list of possible targets for a target that can
3126 execute a run or attach command without any other data. This is
3127 used to locate the default process stratum.
3129 If DO_MESG is not NULL, the result is always valid (error() is
3130 called for errors); else, return NULL on error. */
3132 static struct target_ops
*
3133 find_default_run_target (char *do_mesg
)
3135 struct target_ops
**t
;
3136 struct target_ops
*runable
= NULL
;
3141 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
3144 if ((*t
)->to_can_run
&& target_can_run (*t
))
3154 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3163 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3165 struct target_ops
*t
;
3167 t
= find_default_run_target ("attach");
3168 (t
->to_attach
) (t
, args
, from_tty
);
3173 find_default_create_inferior (struct target_ops
*ops
,
3174 char *exec_file
, char *allargs
, char **env
,
3177 struct target_ops
*t
;
3179 t
= find_default_run_target ("run");
3180 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3185 find_default_can_async_p (struct target_ops
*ignore
)
3187 struct target_ops
*t
;
3189 /* This may be called before the target is pushed on the stack;
3190 look for the default process stratum. If there's none, gdb isn't
3191 configured with a native debugger, and target remote isn't
3193 t
= find_default_run_target (NULL
);
3194 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
3195 return (t
->to_can_async_p
) (t
);
3200 find_default_is_async_p (struct target_ops
*ignore
)
3202 struct target_ops
*t
;
3204 /* This may be called before the target is pushed on the stack;
3205 look for the default process stratum. If there's none, gdb isn't
3206 configured with a native debugger, and target remote isn't
3208 t
= find_default_run_target (NULL
);
3209 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
3210 return (t
->to_is_async_p
) (t
);
3215 find_default_supports_non_stop (struct target_ops
*self
)
3217 struct target_ops
*t
;
3219 t
= find_default_run_target (NULL
);
3220 if (t
&& t
->to_supports_non_stop
)
3221 return (t
->to_supports_non_stop
) (t
);
3226 target_supports_non_stop (void)
3228 struct target_ops
*t
;
3230 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3231 if (t
->to_supports_non_stop
)
3232 return t
->to_supports_non_stop (t
);
3237 /* Implement the "info proc" command. */
3240 target_info_proc (char *args
, enum info_proc_what what
)
3242 struct target_ops
*t
;
3244 /* If we're already connected to something that can get us OS
3245 related data, use it. Otherwise, try using the native
3247 if (current_target
.to_stratum
>= process_stratum
)
3248 t
= current_target
.beneath
;
3250 t
= find_default_run_target (NULL
);
3252 for (; t
!= NULL
; t
= t
->beneath
)
3254 if (t
->to_info_proc
!= NULL
)
3256 t
->to_info_proc (t
, args
, what
);
3259 fprintf_unfiltered (gdb_stdlog
,
3260 "target_info_proc (\"%s\", %d)\n", args
, what
);
3270 find_default_supports_disable_randomization (struct target_ops
*self
)
3272 struct target_ops
*t
;
3274 t
= find_default_run_target (NULL
);
3275 if (t
&& t
->to_supports_disable_randomization
)
3276 return (t
->to_supports_disable_randomization
) (t
);
3281 target_supports_disable_randomization (void)
3283 struct target_ops
*t
;
3285 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3286 if (t
->to_supports_disable_randomization
)
3287 return t
->to_supports_disable_randomization (t
);
3293 target_get_osdata (const char *type
)
3295 struct target_ops
*t
;
3297 /* If we're already connected to something that can get us OS
3298 related data, use it. Otherwise, try using the native
3300 if (current_target
.to_stratum
>= process_stratum
)
3301 t
= current_target
.beneath
;
3303 t
= find_default_run_target ("get OS data");
3308 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3311 /* Determine the current address space of thread PTID. */
3313 struct address_space
*
3314 target_thread_address_space (ptid_t ptid
)
3316 struct address_space
*aspace
;
3317 struct inferior
*inf
;
3318 struct target_ops
*t
;
3320 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3322 if (t
->to_thread_address_space
!= NULL
)
3324 aspace
= t
->to_thread_address_space (t
, ptid
);
3325 gdb_assert (aspace
);
3328 fprintf_unfiltered (gdb_stdlog
,
3329 "target_thread_address_space (%s) = %d\n",
3330 target_pid_to_str (ptid
),
3331 address_space_num (aspace
));
3336 /* Fall-back to the "main" address space of the inferior. */
3337 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3339 if (inf
== NULL
|| inf
->aspace
== NULL
)
3340 internal_error (__FILE__
, __LINE__
,
3341 _("Can't determine the current "
3342 "address space of thread %s\n"),
3343 target_pid_to_str (ptid
));
3349 /* Target file operations. */
3351 static struct target_ops
*
3352 default_fileio_target (void)
3354 /* If we're already connected to something that can perform
3355 file I/O, use it. Otherwise, try using the native target. */
3356 if (current_target
.to_stratum
>= process_stratum
)
3357 return current_target
.beneath
;
3359 return find_default_run_target ("file I/O");
3362 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3363 target file descriptor, or -1 if an error occurs (and set
3366 target_fileio_open (const char *filename
, int flags
, int mode
,
3369 struct target_ops
*t
;
3371 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3373 if (t
->to_fileio_open
!= NULL
)
3375 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3378 fprintf_unfiltered (gdb_stdlog
,
3379 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3380 filename
, flags
, mode
,
3381 fd
, fd
!= -1 ? 0 : *target_errno
);
3386 *target_errno
= FILEIO_ENOSYS
;
3390 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3391 Return the number of bytes written, or -1 if an error occurs
3392 (and set *TARGET_ERRNO). */
3394 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3395 ULONGEST offset
, int *target_errno
)
3397 struct target_ops
*t
;
3399 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3401 if (t
->to_fileio_pwrite
!= NULL
)
3403 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3407 fprintf_unfiltered (gdb_stdlog
,
3408 "target_fileio_pwrite (%d,...,%d,%s) "
3410 fd
, len
, pulongest (offset
),
3411 ret
, ret
!= -1 ? 0 : *target_errno
);
3416 *target_errno
= FILEIO_ENOSYS
;
3420 /* Read up to LEN bytes FD on the target into READ_BUF.
3421 Return the number of bytes read, or -1 if an error occurs
3422 (and set *TARGET_ERRNO). */
3424 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3425 ULONGEST offset
, int *target_errno
)
3427 struct target_ops
*t
;
3429 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3431 if (t
->to_fileio_pread
!= NULL
)
3433 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3437 fprintf_unfiltered (gdb_stdlog
,
3438 "target_fileio_pread (%d,...,%d,%s) "
3440 fd
, len
, pulongest (offset
),
3441 ret
, ret
!= -1 ? 0 : *target_errno
);
3446 *target_errno
= FILEIO_ENOSYS
;
3450 /* Close FD on the target. Return 0, or -1 if an error occurs
3451 (and set *TARGET_ERRNO). */
3453 target_fileio_close (int fd
, int *target_errno
)
3455 struct target_ops
*t
;
3457 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3459 if (t
->to_fileio_close
!= NULL
)
3461 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3464 fprintf_unfiltered (gdb_stdlog
,
3465 "target_fileio_close (%d) = %d (%d)\n",
3466 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3471 *target_errno
= FILEIO_ENOSYS
;
3475 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3476 occurs (and set *TARGET_ERRNO). */
3478 target_fileio_unlink (const char *filename
, int *target_errno
)
3480 struct target_ops
*t
;
3482 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3484 if (t
->to_fileio_unlink
!= NULL
)
3486 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3489 fprintf_unfiltered (gdb_stdlog
,
3490 "target_fileio_unlink (%s) = %d (%d)\n",
3491 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3496 *target_errno
= FILEIO_ENOSYS
;
3500 /* Read value of symbolic link FILENAME on the target. Return a
3501 null-terminated string allocated via xmalloc, or NULL if an error
3502 occurs (and set *TARGET_ERRNO). */
3504 target_fileio_readlink (const char *filename
, int *target_errno
)
3506 struct target_ops
*t
;
3508 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3510 if (t
->to_fileio_readlink
!= NULL
)
3512 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3515 fprintf_unfiltered (gdb_stdlog
,
3516 "target_fileio_readlink (%s) = %s (%d)\n",
3517 filename
, ret
? ret
: "(nil)",
3518 ret
? 0 : *target_errno
);
3523 *target_errno
= FILEIO_ENOSYS
;
3528 target_fileio_close_cleanup (void *opaque
)
3530 int fd
= *(int *) opaque
;
3533 target_fileio_close (fd
, &target_errno
);
3536 /* Read target file FILENAME. Store the result in *BUF_P and
3537 return the size of the transferred data. PADDING additional bytes are
3538 available in *BUF_P. This is a helper function for
3539 target_fileio_read_alloc; see the declaration of that function for more
3543 target_fileio_read_alloc_1 (const char *filename
,
3544 gdb_byte
**buf_p
, int padding
)
3546 struct cleanup
*close_cleanup
;
3547 size_t buf_alloc
, buf_pos
;
3553 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3557 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3559 /* Start by reading up to 4K at a time. The target will throttle
3560 this number down if necessary. */
3562 buf
= xmalloc (buf_alloc
);
3566 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3567 buf_alloc
- buf_pos
- padding
, buf_pos
,
3571 /* An error occurred. */
3572 do_cleanups (close_cleanup
);
3578 /* Read all there was. */
3579 do_cleanups (close_cleanup
);
3589 /* If the buffer is filling up, expand it. */
3590 if (buf_alloc
< buf_pos
* 2)
3593 buf
= xrealloc (buf
, buf_alloc
);
3600 /* Read target file FILENAME. Store the result in *BUF_P and return
3601 the size of the transferred data. See the declaration in "target.h"
3602 function for more information about the return value. */
3605 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3607 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3610 /* Read target file FILENAME. The result is NUL-terminated and
3611 returned as a string, allocated using xmalloc. If an error occurs
3612 or the transfer is unsupported, NULL is returned. Empty objects
3613 are returned as allocated but empty strings. A warning is issued
3614 if the result contains any embedded NUL bytes. */
3617 target_fileio_read_stralloc (const char *filename
)
3621 LONGEST i
, transferred
;
3623 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3624 bufstr
= (char *) buffer
;
3626 if (transferred
< 0)
3629 if (transferred
== 0)
3630 return xstrdup ("");
3632 bufstr
[transferred
] = 0;
3634 /* Check for embedded NUL bytes; but allow trailing NULs. */
3635 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3638 warning (_("target file %s "
3639 "contained unexpected null characters"),
3649 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3650 CORE_ADDR addr
, int len
)
3652 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3656 default_watchpoint_addr_within_range (struct target_ops
*target
,
3658 CORE_ADDR start
, int length
)
3660 return addr
>= start
&& addr
< start
+ length
;
3663 static struct gdbarch
*
3664 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3666 return target_gdbarch ();
3682 return_minus_one (void)
3694 * Find the next target down the stack from the specified target.
3698 find_target_beneath (struct target_ops
*t
)
3706 find_target_at (enum strata stratum
)
3708 struct target_ops
*t
;
3710 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3711 if (t
->to_stratum
== stratum
)
3718 /* The inferior process has died. Long live the inferior! */
3721 generic_mourn_inferior (void)
3725 ptid
= inferior_ptid
;
3726 inferior_ptid
= null_ptid
;
3728 /* Mark breakpoints uninserted in case something tries to delete a
3729 breakpoint while we delete the inferior's threads (which would
3730 fail, since the inferior is long gone). */
3731 mark_breakpoints_out ();
3733 if (!ptid_equal (ptid
, null_ptid
))
3735 int pid
= ptid_get_pid (ptid
);
3736 exit_inferior (pid
);
3739 /* Note this wipes step-resume breakpoints, so needs to be done
3740 after exit_inferior, which ends up referencing the step-resume
3741 breakpoints through clear_thread_inferior_resources. */
3742 breakpoint_init_inferior (inf_exited
);
3744 registers_changed ();
3746 reopen_exec_file ();
3747 reinit_frame_cache ();
3749 if (deprecated_detach_hook
)
3750 deprecated_detach_hook ();
3753 /* Convert a normal process ID to a string. Returns the string in a
3757 normal_pid_to_str (ptid_t ptid
)
3759 static char buf
[32];
3761 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3766 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3768 return normal_pid_to_str (ptid
);
3771 /* Error-catcher for target_find_memory_regions. */
3773 dummy_find_memory_regions (struct target_ops
*self
,
3774 find_memory_region_ftype ignore1
, void *ignore2
)
3776 error (_("Command not implemented for this target."));
3780 /* Error-catcher for target_make_corefile_notes. */
3782 dummy_make_corefile_notes (struct target_ops
*self
,
3783 bfd
*ignore1
, int *ignore2
)
3785 error (_("Command not implemented for this target."));
3789 /* Error-catcher for target_get_bookmark. */
3791 dummy_get_bookmark (struct target_ops
*self
, char *ignore1
, int ignore2
)
3797 /* Error-catcher for target_goto_bookmark. */
3799 dummy_goto_bookmark (struct target_ops
*self
, gdb_byte
*ignore
, int from_tty
)
3804 /* Set up the handful of non-empty slots needed by the dummy target
3808 init_dummy_target (void)
3810 dummy_target
.to_shortname
= "None";
3811 dummy_target
.to_longname
= "None";
3812 dummy_target
.to_doc
= "";
3813 dummy_target
.to_attach
= find_default_attach
;
3814 dummy_target
.to_detach
=
3815 (void (*)(struct target_ops
*, const char *, int))target_ignore
;
3816 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3817 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3818 dummy_target
.to_supports_disable_randomization
3819 = find_default_supports_disable_randomization
;
3820 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3821 dummy_target
.to_stratum
= dummy_stratum
;
3822 dummy_target
.to_find_memory_regions
= dummy_find_memory_regions
;
3823 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3824 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3825 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3826 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3827 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3828 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3829 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3830 dummy_target
.to_has_execution
3831 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3832 dummy_target
.to_magic
= OPS_MAGIC
;
3834 install_dummy_methods (&dummy_target
);
3838 debug_to_open (char *args
, int from_tty
)
3840 debug_target
.to_open (args
, from_tty
);
3842 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3846 target_close (struct target_ops
*targ
)
3848 gdb_assert (!target_is_pushed (targ
));
3850 if (targ
->to_xclose
!= NULL
)
3851 targ
->to_xclose (targ
);
3852 else if (targ
->to_close
!= NULL
)
3853 targ
->to_close (targ
);
3856 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3860 target_attach (char *args
, int from_tty
)
3862 struct target_ops
*t
;
3864 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3866 if (t
->to_attach
!= NULL
)
3868 t
->to_attach (t
, args
, from_tty
);
3870 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3876 internal_error (__FILE__
, __LINE__
,
3877 _("could not find a target to attach"));
3881 target_thread_alive (ptid_t ptid
)
3883 struct target_ops
*t
;
3885 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3887 if (t
->to_thread_alive
!= NULL
)
3891 retval
= t
->to_thread_alive (t
, ptid
);
3893 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3894 ptid_get_pid (ptid
), retval
);
3904 target_find_new_threads (void)
3906 struct target_ops
*t
;
3908 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3910 if (t
->to_find_new_threads
!= NULL
)
3912 t
->to_find_new_threads (t
);
3914 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3922 target_stop (ptid_t ptid
)
3926 warning (_("May not interrupt or stop the target, ignoring attempt"));
3930 (*current_target
.to_stop
) (¤t_target
, ptid
);
3934 debug_to_post_attach (struct target_ops
*self
, int pid
)
3936 debug_target
.to_post_attach (&debug_target
, pid
);
3938 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3941 /* Concatenate ELEM to LIST, a comma separate list, and return the
3942 result. The LIST incoming argument is released. */
3945 str_comma_list_concat_elem (char *list
, const char *elem
)
3948 return xstrdup (elem
);
3950 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3953 /* Helper for target_options_to_string. If OPT is present in
3954 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3955 Returns the new resulting string. OPT is removed from
3959 do_option (int *target_options
, char *ret
,
3960 int opt
, char *opt_str
)
3962 if ((*target_options
& opt
) != 0)
3964 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3965 *target_options
&= ~opt
;
3972 target_options_to_string (int target_options
)
3976 #define DO_TARG_OPTION(OPT) \
3977 ret = do_option (&target_options, ret, OPT, #OPT)
3979 DO_TARG_OPTION (TARGET_WNOHANG
);
3981 if (target_options
!= 0)
3982 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3990 debug_print_register (const char * func
,
3991 struct regcache
*regcache
, int regno
)
3993 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3995 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3996 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3997 && gdbarch_register_name (gdbarch
, regno
) != NULL
3998 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3999 fprintf_unfiltered (gdb_stdlog
, "(%s)",
4000 gdbarch_register_name (gdbarch
, regno
));
4002 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
4003 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
4005 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4006 int i
, size
= register_size (gdbarch
, regno
);
4007 gdb_byte buf
[MAX_REGISTER_SIZE
];
4009 regcache_raw_collect (regcache
, regno
, buf
);
4010 fprintf_unfiltered (gdb_stdlog
, " = ");
4011 for (i
= 0; i
< size
; i
++)
4013 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
4015 if (size
<= sizeof (LONGEST
))
4017 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
4019 fprintf_unfiltered (gdb_stdlog
, " %s %s",
4020 core_addr_to_string_nz (val
), plongest (val
));
4023 fprintf_unfiltered (gdb_stdlog
, "\n");
4027 target_fetch_registers (struct regcache
*regcache
, int regno
)
4029 struct target_ops
*t
;
4031 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4033 if (t
->to_fetch_registers
!= NULL
)
4035 t
->to_fetch_registers (t
, regcache
, regno
);
4037 debug_print_register ("target_fetch_registers", regcache
, regno
);
4044 target_store_registers (struct regcache
*regcache
, int regno
)
4046 struct target_ops
*t
;
4048 if (!may_write_registers
)
4049 error (_("Writing to registers is not allowed (regno %d)"), regno
);
4051 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
4054 debug_print_register ("target_store_registers", regcache
, regno
);
4059 target_core_of_thread (ptid_t ptid
)
4061 struct target_ops
*t
;
4063 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4065 if (t
->to_core_of_thread
!= NULL
)
4067 int retval
= t
->to_core_of_thread (t
, ptid
);
4070 fprintf_unfiltered (gdb_stdlog
,
4071 "target_core_of_thread (%d) = %d\n",
4072 ptid_get_pid (ptid
), retval
);
4081 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
4083 struct target_ops
*t
;
4085 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4087 if (t
->to_verify_memory
!= NULL
)
4089 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
4092 fprintf_unfiltered (gdb_stdlog
,
4093 "target_verify_memory (%s, %s) = %d\n",
4094 paddress (target_gdbarch (), memaddr
),
4104 /* The documentation for this function is in its prototype declaration in
4108 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4110 struct target_ops
*t
;
4112 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4113 if (t
->to_insert_mask_watchpoint
!= NULL
)
4117 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
4120 fprintf_unfiltered (gdb_stdlog
, "\
4121 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4122 core_addr_to_string (addr
),
4123 core_addr_to_string (mask
), rw
, ret
);
4131 /* The documentation for this function is in its prototype declaration in
4135 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4137 struct target_ops
*t
;
4139 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4140 if (t
->to_remove_mask_watchpoint
!= NULL
)
4144 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
4147 fprintf_unfiltered (gdb_stdlog
, "\
4148 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4149 core_addr_to_string (addr
),
4150 core_addr_to_string (mask
), rw
, ret
);
4158 /* The documentation for this function is in its prototype declaration
4162 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4164 struct target_ops
*t
;
4166 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4167 if (t
->to_masked_watch_num_registers
!= NULL
)
4168 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
4173 /* The documentation for this function is in its prototype declaration
4177 target_ranged_break_num_registers (void)
4179 struct target_ops
*t
;
4181 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4182 if (t
->to_ranged_break_num_registers
!= NULL
)
4183 return t
->to_ranged_break_num_registers (t
);
4190 struct btrace_target_info
*
4191 target_enable_btrace (ptid_t ptid
)
4193 struct target_ops
*t
;
4195 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4196 if (t
->to_enable_btrace
!= NULL
)
4197 return t
->to_enable_btrace (ptid
);
4206 target_disable_btrace (struct btrace_target_info
*btinfo
)
4208 struct target_ops
*t
;
4210 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4211 if (t
->to_disable_btrace
!= NULL
)
4213 t
->to_disable_btrace (btinfo
);
4223 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4225 struct target_ops
*t
;
4227 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4228 if (t
->to_teardown_btrace
!= NULL
)
4230 t
->to_teardown_btrace (btinfo
);
4240 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4241 struct btrace_target_info
*btinfo
,
4242 enum btrace_read_type type
)
4244 struct target_ops
*t
;
4246 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4247 if (t
->to_read_btrace
!= NULL
)
4248 return t
->to_read_btrace (btrace
, btinfo
, type
);
4251 return BTRACE_ERR_NOT_SUPPORTED
;
4257 target_stop_recording (void)
4259 struct target_ops
*t
;
4261 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4262 if (t
->to_stop_recording
!= NULL
)
4264 t
->to_stop_recording ();
4268 /* This is optional. */
4274 target_info_record (void)
4276 struct target_ops
*t
;
4278 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4279 if (t
->to_info_record
!= NULL
)
4281 t
->to_info_record ();
4291 target_save_record (const char *filename
)
4293 struct target_ops
*t
;
4295 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4296 if (t
->to_save_record
!= NULL
)
4298 t
->to_save_record (filename
);
4308 target_supports_delete_record (void)
4310 struct target_ops
*t
;
4312 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4313 if (t
->to_delete_record
!= NULL
)
4322 target_delete_record (void)
4324 struct target_ops
*t
;
4326 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4327 if (t
->to_delete_record
!= NULL
)
4329 t
->to_delete_record ();
4339 target_record_is_replaying (void)
4341 struct target_ops
*t
;
4343 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4344 if (t
->to_record_is_replaying
!= NULL
)
4345 return t
->to_record_is_replaying ();
4353 target_goto_record_begin (void)
4355 struct target_ops
*t
;
4357 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4358 if (t
->to_goto_record_begin
!= NULL
)
4360 t
->to_goto_record_begin ();
4370 target_goto_record_end (void)
4372 struct target_ops
*t
;
4374 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4375 if (t
->to_goto_record_end
!= NULL
)
4377 t
->to_goto_record_end ();
4387 target_goto_record (ULONGEST insn
)
4389 struct target_ops
*t
;
4391 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4392 if (t
->to_goto_record
!= NULL
)
4394 t
->to_goto_record (insn
);
4404 target_insn_history (int size
, int flags
)
4406 struct target_ops
*t
;
4408 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4409 if (t
->to_insn_history
!= NULL
)
4411 t
->to_insn_history (size
, flags
);
4421 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4423 struct target_ops
*t
;
4425 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4426 if (t
->to_insn_history_from
!= NULL
)
4428 t
->to_insn_history_from (from
, size
, flags
);
4438 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4440 struct target_ops
*t
;
4442 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4443 if (t
->to_insn_history_range
!= NULL
)
4445 t
->to_insn_history_range (begin
, end
, flags
);
4455 target_call_history (int size
, int flags
)
4457 struct target_ops
*t
;
4459 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4460 if (t
->to_call_history
!= NULL
)
4462 t
->to_call_history (size
, flags
);
4472 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4474 struct target_ops
*t
;
4476 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4477 if (t
->to_call_history_from
!= NULL
)
4479 t
->to_call_history_from (begin
, size
, flags
);
4489 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4491 struct target_ops
*t
;
4493 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4494 if (t
->to_call_history_range
!= NULL
)
4496 t
->to_call_history_range (begin
, end
, flags
);
4504 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4506 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4508 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4513 const struct frame_unwind
*
4514 target_get_unwinder (void)
4516 struct target_ops
*t
;
4518 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4519 if (t
->to_get_unwinder
!= NULL
)
4520 return t
->to_get_unwinder
;
4527 const struct frame_unwind
*
4528 target_get_tailcall_unwinder (void)
4530 struct target_ops
*t
;
4532 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4533 if (t
->to_get_tailcall_unwinder
!= NULL
)
4534 return t
->to_get_tailcall_unwinder
;
4542 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4543 struct gdbarch
*gdbarch
)
4545 for (; ops
!= NULL
; ops
= ops
->beneath
)
4546 if (ops
->to_decr_pc_after_break
!= NULL
)
4547 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4549 return gdbarch_decr_pc_after_break (gdbarch
);
4555 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4557 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4561 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4562 int write
, struct mem_attrib
*attrib
,
4563 struct target_ops
*target
)
4567 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4570 fprintf_unfiltered (gdb_stdlog
,
4571 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4572 paddress (target_gdbarch (), memaddr
), len
,
4573 write
? "write" : "read", retval
);
4579 fputs_unfiltered (", bytes =", gdb_stdlog
);
4580 for (i
= 0; i
< retval
; i
++)
4582 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4584 if (targetdebug
< 2 && i
> 0)
4586 fprintf_unfiltered (gdb_stdlog
, " ...");
4589 fprintf_unfiltered (gdb_stdlog
, "\n");
4592 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4596 fputc_unfiltered ('\n', gdb_stdlog
);
4602 debug_to_files_info (struct target_ops
*target
)
4604 debug_target
.to_files_info (target
);
4606 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4610 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4611 struct bp_target_info
*bp_tgt
)
4615 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4617 fprintf_unfiltered (gdb_stdlog
,
4618 "target_insert_breakpoint (%s, xxx) = %ld\n",
4619 core_addr_to_string (bp_tgt
->placed_address
),
4620 (unsigned long) retval
);
4625 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4626 struct bp_target_info
*bp_tgt
)
4630 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4632 fprintf_unfiltered (gdb_stdlog
,
4633 "target_remove_breakpoint (%s, xxx) = %ld\n",
4634 core_addr_to_string (bp_tgt
->placed_address
),
4635 (unsigned long) retval
);
4640 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4641 int type
, int cnt
, int from_tty
)
4645 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4646 type
, cnt
, from_tty
);
4648 fprintf_unfiltered (gdb_stdlog
,
4649 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4650 (unsigned long) type
,
4651 (unsigned long) cnt
,
4652 (unsigned long) from_tty
,
4653 (unsigned long) retval
);
4658 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4659 CORE_ADDR addr
, int len
)
4663 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4666 fprintf_unfiltered (gdb_stdlog
,
4667 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4668 core_addr_to_string (addr
), (unsigned long) len
,
4669 core_addr_to_string (retval
));
4674 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4675 CORE_ADDR addr
, int len
, int rw
,
4676 struct expression
*cond
)
4680 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4684 fprintf_unfiltered (gdb_stdlog
,
4685 "target_can_accel_watchpoint_condition "
4686 "(%s, %d, %d, %s) = %ld\n",
4687 core_addr_to_string (addr
), len
, rw
,
4688 host_address_to_string (cond
), (unsigned long) retval
);
4693 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4697 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4699 fprintf_unfiltered (gdb_stdlog
,
4700 "target_stopped_by_watchpoint () = %ld\n",
4701 (unsigned long) retval
);
4706 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4710 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4712 fprintf_unfiltered (gdb_stdlog
,
4713 "target_stopped_data_address ([%s]) = %ld\n",
4714 core_addr_to_string (*addr
),
4715 (unsigned long)retval
);
4720 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4722 CORE_ADDR start
, int length
)
4726 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4729 fprintf_filtered (gdb_stdlog
,
4730 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4731 core_addr_to_string (addr
), core_addr_to_string (start
),
4737 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4738 struct gdbarch
*gdbarch
,
4739 struct bp_target_info
*bp_tgt
)
4743 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4746 fprintf_unfiltered (gdb_stdlog
,
4747 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4748 core_addr_to_string (bp_tgt
->placed_address
),
4749 (unsigned long) retval
);
4754 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4755 struct gdbarch
*gdbarch
,
4756 struct bp_target_info
*bp_tgt
)
4760 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4763 fprintf_unfiltered (gdb_stdlog
,
4764 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4765 core_addr_to_string (bp_tgt
->placed_address
),
4766 (unsigned long) retval
);
4771 debug_to_insert_watchpoint (struct target_ops
*self
,
4772 CORE_ADDR addr
, int len
, int type
,
4773 struct expression
*cond
)
4777 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4778 addr
, len
, type
, cond
);
4780 fprintf_unfiltered (gdb_stdlog
,
4781 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4782 core_addr_to_string (addr
), len
, type
,
4783 host_address_to_string (cond
), (unsigned long) retval
);
4788 debug_to_remove_watchpoint (struct target_ops
*self
,
4789 CORE_ADDR addr
, int len
, int type
,
4790 struct expression
*cond
)
4794 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4795 addr
, len
, type
, cond
);
4797 fprintf_unfiltered (gdb_stdlog
,
4798 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4799 core_addr_to_string (addr
), len
, type
,
4800 host_address_to_string (cond
), (unsigned long) retval
);
4805 debug_to_terminal_init (struct target_ops
*self
)
4807 debug_target
.to_terminal_init (&debug_target
);
4809 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4813 debug_to_terminal_inferior (struct target_ops
*self
)
4815 debug_target
.to_terminal_inferior (&debug_target
);
4817 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4821 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4823 debug_target
.to_terminal_ours_for_output (&debug_target
);
4825 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4829 debug_to_terminal_ours (struct target_ops
*self
)
4831 debug_target
.to_terminal_ours (&debug_target
);
4833 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4837 debug_to_terminal_save_ours (struct target_ops
*self
)
4839 debug_target
.to_terminal_save_ours (&debug_target
);
4841 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4845 debug_to_terminal_info (struct target_ops
*self
,
4846 const char *arg
, int from_tty
)
4848 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4850 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4855 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4857 debug_target
.to_load (&debug_target
, args
, from_tty
);
4859 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4863 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4865 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4867 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4868 ptid_get_pid (ptid
));
4872 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4876 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4878 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4885 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4889 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4891 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4898 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4902 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4904 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4911 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4915 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4917 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4924 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4928 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4930 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4937 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4941 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4943 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4950 debug_to_has_exited (struct target_ops
*self
,
4951 int pid
, int wait_status
, int *exit_status
)
4955 has_exited
= debug_target
.to_has_exited (&debug_target
,
4956 pid
, wait_status
, exit_status
);
4958 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4959 pid
, wait_status
, *exit_status
, has_exited
);
4965 debug_to_can_run (struct target_ops
*self
)
4969 retval
= debug_target
.to_can_run (&debug_target
);
4971 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4976 static struct gdbarch
*
4977 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4979 struct gdbarch
*retval
;
4981 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4983 fprintf_unfiltered (gdb_stdlog
,
4984 "target_thread_architecture (%s) = %s [%s]\n",
4985 target_pid_to_str (ptid
),
4986 host_address_to_string (retval
),
4987 gdbarch_bfd_arch_info (retval
)->printable_name
);
4992 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4994 debug_target
.to_stop (&debug_target
, ptid
);
4996 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4997 target_pid_to_str (ptid
));
5001 debug_to_rcmd (struct target_ops
*self
, char *command
,
5002 struct ui_file
*outbuf
)
5004 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
5005 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
5009 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
5013 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
5015 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
5022 setup_target_debug (void)
5024 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
5026 current_target
.to_open
= debug_to_open
;
5027 current_target
.to_post_attach
= debug_to_post_attach
;
5028 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
5029 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
5030 current_target
.to_files_info
= debug_to_files_info
;
5031 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
5032 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
5033 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
5034 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
5035 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
5036 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
5037 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
5038 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
5039 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
5040 current_target
.to_watchpoint_addr_within_range
5041 = debug_to_watchpoint_addr_within_range
;
5042 current_target
.to_region_ok_for_hw_watchpoint
5043 = debug_to_region_ok_for_hw_watchpoint
;
5044 current_target
.to_can_accel_watchpoint_condition
5045 = debug_to_can_accel_watchpoint_condition
;
5046 current_target
.to_terminal_init
= debug_to_terminal_init
;
5047 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
5048 current_target
.to_terminal_ours_for_output
5049 = debug_to_terminal_ours_for_output
;
5050 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
5051 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
5052 current_target
.to_terminal_info
= debug_to_terminal_info
;
5053 current_target
.to_load
= debug_to_load
;
5054 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
5055 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
5056 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
5057 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
5058 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
5059 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
5060 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
5061 current_target
.to_has_exited
= debug_to_has_exited
;
5062 current_target
.to_can_run
= debug_to_can_run
;
5063 current_target
.to_stop
= debug_to_stop
;
5064 current_target
.to_rcmd
= debug_to_rcmd
;
5065 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
5066 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
5070 static char targ_desc
[] =
5071 "Names of targets and files being debugged.\nShows the entire \
5072 stack of targets currently in use (including the exec-file,\n\
5073 core-file, and process, if any), as well as the symbol file name.";
5076 do_monitor_command (char *cmd
,
5079 if ((current_target
.to_rcmd
5080 == (void (*) (struct target_ops
*, char *, struct ui_file
*)) tcomplain
)
5081 || (current_target
.to_rcmd
== debug_to_rcmd
5082 && (debug_target
.to_rcmd
5083 == (void (*) (struct target_ops
*,
5084 char *, struct ui_file
*)) tcomplain
)))
5085 error (_("\"monitor\" command not supported by this target."));
5086 target_rcmd (cmd
, gdb_stdtarg
);
5089 /* Print the name of each layers of our target stack. */
5092 maintenance_print_target_stack (char *cmd
, int from_tty
)
5094 struct target_ops
*t
;
5096 printf_filtered (_("The current target stack is:\n"));
5098 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
5100 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
5104 /* Controls if async mode is permitted. */
5105 int target_async_permitted
= 0;
5107 /* The set command writes to this variable. If the inferior is
5108 executing, target_async_permitted is *not* updated. */
5109 static int target_async_permitted_1
= 0;
5112 set_target_async_command (char *args
, int from_tty
,
5113 struct cmd_list_element
*c
)
5115 if (have_live_inferiors ())
5117 target_async_permitted_1
= target_async_permitted
;
5118 error (_("Cannot change this setting while the inferior is running."));
5121 target_async_permitted
= target_async_permitted_1
;
5125 show_target_async_command (struct ui_file
*file
, int from_tty
,
5126 struct cmd_list_element
*c
,
5129 fprintf_filtered (file
,
5130 _("Controlling the inferior in "
5131 "asynchronous mode is %s.\n"), value
);
5134 /* Temporary copies of permission settings. */
5136 static int may_write_registers_1
= 1;
5137 static int may_write_memory_1
= 1;
5138 static int may_insert_breakpoints_1
= 1;
5139 static int may_insert_tracepoints_1
= 1;
5140 static int may_insert_fast_tracepoints_1
= 1;
5141 static int may_stop_1
= 1;
5143 /* Make the user-set values match the real values again. */
5146 update_target_permissions (void)
5148 may_write_registers_1
= may_write_registers
;
5149 may_write_memory_1
= may_write_memory
;
5150 may_insert_breakpoints_1
= may_insert_breakpoints
;
5151 may_insert_tracepoints_1
= may_insert_tracepoints
;
5152 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
5153 may_stop_1
= may_stop
;
5156 /* The one function handles (most of) the permission flags in the same
5160 set_target_permissions (char *args
, int from_tty
,
5161 struct cmd_list_element
*c
)
5163 if (target_has_execution
)
5165 update_target_permissions ();
5166 error (_("Cannot change this setting while the inferior is running."));
5169 /* Make the real values match the user-changed values. */
5170 may_write_registers
= may_write_registers_1
;
5171 may_insert_breakpoints
= may_insert_breakpoints_1
;
5172 may_insert_tracepoints
= may_insert_tracepoints_1
;
5173 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
5174 may_stop
= may_stop_1
;
5175 update_observer_mode ();
5178 /* Set memory write permission independently of observer mode. */
5181 set_write_memory_permission (char *args
, int from_tty
,
5182 struct cmd_list_element
*c
)
5184 /* Make the real values match the user-changed values. */
5185 may_write_memory
= may_write_memory_1
;
5186 update_observer_mode ();
5191 initialize_targets (void)
5193 init_dummy_target ();
5194 push_target (&dummy_target
);
5196 add_info ("target", target_info
, targ_desc
);
5197 add_info ("files", target_info
, targ_desc
);
5199 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
5200 Set target debugging."), _("\
5201 Show target debugging."), _("\
5202 When non-zero, target debugging is enabled. Higher numbers are more\n\
5203 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5207 &setdebuglist
, &showdebuglist
);
5209 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
5210 &trust_readonly
, _("\
5211 Set mode for reading from readonly sections."), _("\
5212 Show mode for reading from readonly sections."), _("\
5213 When this mode is on, memory reads from readonly sections (such as .text)\n\
5214 will be read from the object file instead of from the target. This will\n\
5215 result in significant performance improvement for remote targets."),
5217 show_trust_readonly
,
5218 &setlist
, &showlist
);
5220 add_com ("monitor", class_obscure
, do_monitor_command
,
5221 _("Send a command to the remote monitor (remote targets only)."));
5223 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5224 _("Print the name of each layer of the internal target stack."),
5225 &maintenanceprintlist
);
5227 add_setshow_boolean_cmd ("target-async", no_class
,
5228 &target_async_permitted_1
, _("\
5229 Set whether gdb controls the inferior in asynchronous mode."), _("\
5230 Show whether gdb controls the inferior in asynchronous mode."), _("\
5231 Tells gdb whether to control the inferior in asynchronous mode."),
5232 set_target_async_command
,
5233 show_target_async_command
,
5237 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5238 &may_write_registers_1
, _("\
5239 Set permission to write into registers."), _("\
5240 Show permission to write into registers."), _("\
5241 When this permission is on, GDB may write into the target's registers.\n\
5242 Otherwise, any sort of write attempt will result in an error."),
5243 set_target_permissions
, NULL
,
5244 &setlist
, &showlist
);
5246 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5247 &may_write_memory_1
, _("\
5248 Set permission to write into target memory."), _("\
5249 Show permission to write into target memory."), _("\
5250 When this permission is on, GDB may write into the target's memory.\n\
5251 Otherwise, any sort of write attempt will result in an error."),
5252 set_write_memory_permission
, NULL
,
5253 &setlist
, &showlist
);
5255 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5256 &may_insert_breakpoints_1
, _("\
5257 Set permission to insert breakpoints in the target."), _("\
5258 Show permission to insert breakpoints in the target."), _("\
5259 When this permission is on, GDB may insert breakpoints in the program.\n\
5260 Otherwise, any sort of insertion attempt will result in an error."),
5261 set_target_permissions
, NULL
,
5262 &setlist
, &showlist
);
5264 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5265 &may_insert_tracepoints_1
, _("\
5266 Set permission to insert tracepoints in the target."), _("\
5267 Show permission to insert tracepoints in the target."), _("\
5268 When this permission is on, GDB may insert tracepoints in the program.\n\
5269 Otherwise, any sort of insertion attempt will result in an error."),
5270 set_target_permissions
, NULL
,
5271 &setlist
, &showlist
);
5273 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5274 &may_insert_fast_tracepoints_1
, _("\
5275 Set permission to insert fast tracepoints in the target."), _("\
5276 Show permission to insert fast tracepoints in the target."), _("\
5277 When this permission is on, GDB may insert fast tracepoints.\n\
5278 Otherwise, any sort of insertion attempt will result in an error."),
5279 set_target_permissions
, NULL
,
5280 &setlist
, &showlist
);
5282 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5284 Set permission to interrupt or signal the target."), _("\
5285 Show permission to interrupt or signal the target."), _("\
5286 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5287 Otherwise, any attempt to interrupt or stop will be ignored."),
5288 set_target_permissions
, NULL
,
5289 &setlist
, &showlist
);