1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
60 static void tcomplain (void) ATTRIBUTE_NORETURN
;
62 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
64 static int return_zero (void);
66 static int return_one (void);
68 static int return_minus_one (void);
70 static void *return_null (void);
72 void target_ignore (void);
74 static void target_command (char *, int);
76 static struct target_ops
*find_default_run_target (char *);
78 static target_xfer_partial_ftype default_xfer_partial
;
80 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
83 static int find_default_can_async_p (struct target_ops
*ignore
);
85 static int find_default_is_async_p (struct target_ops
*ignore
);
87 #include "target-delegates.c"
89 static void init_dummy_target (void);
91 static struct target_ops debug_target
;
93 static void debug_to_open (char *, int);
95 static void debug_to_prepare_to_store (struct target_ops
*self
,
98 static void debug_to_files_info (struct target_ops
*);
100 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
101 struct bp_target_info
*);
103 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
104 struct bp_target_info
*);
106 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
109 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
111 struct bp_target_info
*);
113 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
115 struct bp_target_info
*);
117 static int debug_to_insert_watchpoint (struct target_ops
*self
,
119 struct expression
*);
121 static int debug_to_remove_watchpoint (struct target_ops
*self
,
123 struct expression
*);
125 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
127 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
128 CORE_ADDR
, CORE_ADDR
, int);
130 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
133 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
135 struct expression
*);
137 static void debug_to_terminal_init (struct target_ops
*self
);
139 static void debug_to_terminal_inferior (struct target_ops
*self
);
141 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
143 static void debug_to_terminal_save_ours (struct target_ops
*self
);
145 static void debug_to_terminal_ours (struct target_ops
*self
);
147 static void debug_to_load (struct target_ops
*self
, char *, int);
149 static int debug_to_can_run (struct target_ops
*self
);
151 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
156 struct target_ops
**target_structs
;
157 unsigned target_struct_size
;
158 unsigned target_struct_allocsize
;
159 #define DEFAULT_ALLOCSIZE 10
161 /* The initial current target, so that there is always a semi-valid
164 static struct target_ops dummy_target
;
166 /* Top of target stack. */
168 static struct target_ops
*target_stack
;
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
173 struct target_ops current_target
;
175 /* Command list for target. */
177 static struct cmd_list_element
*targetlist
= NULL
;
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
182 static int trust_readonly
= 0;
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
187 static int show_memory_breakpoints
= 0;
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
193 int may_write_registers
= 1;
195 int may_write_memory
= 1;
197 int may_insert_breakpoints
= 1;
199 int may_insert_tracepoints
= 1;
201 int may_insert_fast_tracepoints
= 1;
205 /* Non-zero if we want to see trace of target level stuff. */
207 static unsigned int targetdebug
= 0;
209 show_targetdebug (struct ui_file
*file
, int from_tty
,
210 struct cmd_list_element
*c
, const char *value
)
212 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
215 static void setup_target_debug (void);
217 /* The user just typed 'target' without the name of a target. */
220 target_command (char *arg
, int from_tty
)
222 fputs_filtered ("Argument required (target name). Try `help target'\n",
226 /* Default target_has_* methods for process_stratum targets. */
229 default_child_has_all_memory (struct target_ops
*ops
)
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid
, null_ptid
))
239 default_child_has_memory (struct target_ops
*ops
)
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid
, null_ptid
))
249 default_child_has_stack (struct target_ops
*ops
)
251 /* If no inferior selected, there's no stack. */
252 if (ptid_equal (inferior_ptid
, null_ptid
))
259 default_child_has_registers (struct target_ops
*ops
)
261 /* Can't read registers from no inferior. */
262 if (ptid_equal (inferior_ptid
, null_ptid
))
269 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
271 /* If there's no thread selected, then we can't make it run through
273 if (ptid_equal (the_ptid
, null_ptid
))
281 target_has_all_memory_1 (void)
283 struct target_ops
*t
;
285 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
286 if (t
->to_has_all_memory (t
))
293 target_has_memory_1 (void)
295 struct target_ops
*t
;
297 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
298 if (t
->to_has_memory (t
))
305 target_has_stack_1 (void)
307 struct target_ops
*t
;
309 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
310 if (t
->to_has_stack (t
))
317 target_has_registers_1 (void)
319 struct target_ops
*t
;
321 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
322 if (t
->to_has_registers (t
))
329 target_has_execution_1 (ptid_t the_ptid
)
331 struct target_ops
*t
;
333 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
334 if (t
->to_has_execution (t
, the_ptid
))
341 target_has_execution_current (void)
343 return target_has_execution_1 (inferior_ptid
);
346 /* Complete initialization of T. This ensures that various fields in
347 T are set, if needed by the target implementation. */
350 complete_target_initialization (struct target_ops
*t
)
352 /* Provide default values for all "must have" methods. */
353 if (t
->to_xfer_partial
== NULL
)
354 t
->to_xfer_partial
= default_xfer_partial
;
356 if (t
->to_has_all_memory
== NULL
)
357 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
359 if (t
->to_has_memory
== NULL
)
360 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
362 if (t
->to_has_stack
== NULL
)
363 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
365 if (t
->to_has_registers
== NULL
)
366 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
368 if (t
->to_has_execution
== NULL
)
369 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
371 install_delegators (t
);
374 /* Add possible target architecture T to the list and add a new
375 command 'target T->to_shortname'. Set COMPLETER as the command's
376 completer if not NULL. */
379 add_target_with_completer (struct target_ops
*t
,
380 completer_ftype
*completer
)
382 struct cmd_list_element
*c
;
384 complete_target_initialization (t
);
388 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
389 target_structs
= (struct target_ops
**) xmalloc
390 (target_struct_allocsize
* sizeof (*target_structs
));
392 if (target_struct_size
>= target_struct_allocsize
)
394 target_struct_allocsize
*= 2;
395 target_structs
= (struct target_ops
**)
396 xrealloc ((char *) target_structs
,
397 target_struct_allocsize
* sizeof (*target_structs
));
399 target_structs
[target_struct_size
++] = t
;
401 if (targetlist
== NULL
)
402 add_prefix_cmd ("target", class_run
, target_command
, _("\
403 Connect to a target machine or process.\n\
404 The first argument is the type or protocol of the target machine.\n\
405 Remaining arguments are interpreted by the target protocol. For more\n\
406 information on the arguments for a particular protocol, type\n\
407 `help target ' followed by the protocol name."),
408 &targetlist
, "target ", 0, &cmdlist
);
409 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
411 if (completer
!= NULL
)
412 set_cmd_completer (c
, completer
);
415 /* Add a possible target architecture to the list. */
418 add_target (struct target_ops
*t
)
420 add_target_with_completer (t
, NULL
);
426 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
428 struct cmd_list_element
*c
;
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
433 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
434 alt
= xstrprintf ("target %s", t
->to_shortname
);
435 deprecate_cmd (c
, alt
);
448 struct target_ops
*t
;
450 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
451 if (t
->to_kill
!= NULL
)
454 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
464 target_load (char *arg
, int from_tty
)
466 target_dcache_invalidate ();
467 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
471 target_create_inferior (char *exec_file
, char *args
,
472 char **env
, int from_tty
)
474 struct target_ops
*t
;
476 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
478 if (t
->to_create_inferior
!= NULL
)
480 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
482 fprintf_unfiltered (gdb_stdlog
,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file
, args
, from_tty
);
489 internal_error (__FILE__
, __LINE__
,
490 _("could not find a target to create inferior"));
494 target_terminal_inferior (void)
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution
)
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target
.to_terminal_inferior
) (¤t_target
);
509 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
510 struct target_ops
*t
)
512 errno
= EIO
; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
519 error (_("You can't do that when your target is `%s'"),
520 current_target
.to_shortname
);
526 error (_("You can't do that without a process to debug."));
530 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
532 printf_unfiltered (_("No saved terminal information.\n"));
535 /* A default implementation for the to_get_ada_task_ptid target method.
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
542 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
544 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops
*self
)
550 if (!target_can_execute_reverse
)
552 else if (!target_can_async_p ())
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
572 update_current_target (void)
574 struct target_ops
*t
;
576 /* First, reset current's contents. */
577 memset (¤t_target
, 0, sizeof (current_target
));
579 /* Install the delegators. */
580 install_delegators (¤t_target
);
582 #define INHERIT(FIELD, TARGET) \
583 if (!current_target.FIELD) \
584 current_target.FIELD = (TARGET)->FIELD
586 for (t
= target_stack
; t
; t
= t
->beneath
)
588 INHERIT (to_shortname
, t
);
589 INHERIT (to_longname
, t
);
591 /* Do not inherit to_open. */
592 /* Do not inherit to_close. */
593 /* Do not inherit to_attach. */
594 /* Do not inherit to_post_attach. */
595 INHERIT (to_attach_no_wait
, t
);
596 /* Do not inherit to_detach. */
597 /* Do not inherit to_disconnect. */
598 /* Do not inherit to_resume. */
599 /* Do not inherit to_wait. */
600 /* Do not inherit to_fetch_registers. */
601 /* Do not inherit to_store_registers. */
602 /* Do not inherit to_prepare_to_store. */
603 INHERIT (deprecated_xfer_memory
, t
);
604 /* Do not inherit to_files_info. */
605 /* Do not inherit to_insert_breakpoint. */
606 /* Do not inherit to_remove_breakpoint. */
607 /* Do not inherit to_can_use_hw_breakpoint. */
608 INHERIT (to_insert_hw_breakpoint
, t
);
609 INHERIT (to_remove_hw_breakpoint
, t
);
610 /* Do not inherit to_ranged_break_num_registers. */
611 INHERIT (to_insert_watchpoint
, t
);
612 INHERIT (to_remove_watchpoint
, t
);
613 /* Do not inherit to_insert_mask_watchpoint. */
614 /* Do not inherit to_remove_mask_watchpoint. */
615 /* Do not inherit to_stopped_data_address. */
616 INHERIT (to_have_steppable_watchpoint
, t
);
617 INHERIT (to_have_continuable_watchpoint
, t
);
618 /* Do not inherit to_stopped_by_watchpoint. */
619 INHERIT (to_watchpoint_addr_within_range
, t
);
620 INHERIT (to_region_ok_for_hw_watchpoint
, t
);
621 INHERIT (to_can_accel_watchpoint_condition
, t
);
622 /* Do not inherit to_masked_watch_num_registers. */
623 INHERIT (to_terminal_init
, t
);
624 INHERIT (to_terminal_inferior
, t
);
625 INHERIT (to_terminal_ours_for_output
, t
);
626 INHERIT (to_terminal_ours
, t
);
627 INHERIT (to_terminal_save_ours
, t
);
628 INHERIT (to_terminal_info
, t
);
629 /* Do not inherit to_kill. */
630 INHERIT (to_load
, t
);
631 /* Do no inherit to_create_inferior. */
632 INHERIT (to_post_startup_inferior
, t
);
633 INHERIT (to_insert_fork_catchpoint
, t
);
634 INHERIT (to_remove_fork_catchpoint
, t
);
635 INHERIT (to_insert_vfork_catchpoint
, t
);
636 INHERIT (to_remove_vfork_catchpoint
, t
);
637 /* Do not inherit to_follow_fork. */
638 INHERIT (to_insert_exec_catchpoint
, t
);
639 INHERIT (to_remove_exec_catchpoint
, t
);
640 INHERIT (to_set_syscall_catchpoint
, t
);
641 INHERIT (to_has_exited
, t
);
642 /* Do not inherit to_mourn_inferior. */
643 INHERIT (to_can_run
, t
);
644 /* Do not inherit to_pass_signals. */
645 /* Do not inherit to_program_signals. */
646 /* Do not inherit to_thread_alive. */
647 /* Do not inherit to_find_new_threads. */
648 /* Do not inherit to_pid_to_str. */
649 INHERIT (to_extra_thread_info
, t
);
650 INHERIT (to_thread_name
, t
);
651 INHERIT (to_stop
, t
);
652 /* Do not inherit to_xfer_partial. */
653 /* Do not inherit to_rcmd. */
654 INHERIT (to_pid_to_exec_file
, t
);
655 INHERIT (to_log_command
, t
);
656 INHERIT (to_stratum
, t
);
657 /* Do not inherit to_has_all_memory. */
658 /* Do not inherit to_has_memory. */
659 /* Do not inherit to_has_stack. */
660 /* Do not inherit to_has_registers. */
661 /* Do not inherit to_has_execution. */
662 INHERIT (to_has_thread_control
, t
);
663 /* Do not inherit to_can_async_p. */
664 /* Do not inherit to_is_async_p. */
665 /* Do not inherit to_async. */
666 INHERIT (to_find_memory_regions
, t
);
667 INHERIT (to_make_corefile_notes
, t
);
668 INHERIT (to_get_bookmark
, t
);
669 INHERIT (to_goto_bookmark
, t
);
670 /* Do not inherit to_get_thread_local_address. */
671 INHERIT (to_can_execute_reverse
, t
);
672 INHERIT (to_execution_direction
, t
);
673 INHERIT (to_thread_architecture
, t
);
674 /* Do not inherit to_read_description. */
675 INHERIT (to_get_ada_task_ptid
, t
);
676 /* Do not inherit to_search_memory. */
677 INHERIT (to_supports_multi_process
, t
);
678 INHERIT (to_supports_enable_disable_tracepoint
, t
);
679 INHERIT (to_supports_string_tracing
, t
);
680 INHERIT (to_trace_init
, t
);
681 INHERIT (to_download_tracepoint
, t
);
682 INHERIT (to_can_download_tracepoint
, t
);
683 INHERIT (to_download_trace_state_variable
, t
);
684 INHERIT (to_enable_tracepoint
, t
);
685 INHERIT (to_disable_tracepoint
, t
);
686 INHERIT (to_trace_set_readonly_regions
, t
);
687 INHERIT (to_trace_start
, t
);
688 INHERIT (to_get_trace_status
, t
);
689 INHERIT (to_get_tracepoint_status
, t
);
690 INHERIT (to_trace_stop
, t
);
691 INHERIT (to_trace_find
, t
);
692 INHERIT (to_get_trace_state_variable_value
, t
);
693 INHERIT (to_save_trace_data
, t
);
694 INHERIT (to_upload_tracepoints
, t
);
695 INHERIT (to_upload_trace_state_variables
, t
);
696 INHERIT (to_get_raw_trace_data
, t
);
697 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
698 INHERIT (to_set_disconnected_tracing
, t
);
699 INHERIT (to_set_circular_trace_buffer
, t
);
700 INHERIT (to_set_trace_buffer_size
, t
);
701 INHERIT (to_set_trace_notes
, t
);
702 INHERIT (to_get_tib_address
, t
);
703 INHERIT (to_set_permissions
, t
);
704 INHERIT (to_static_tracepoint_marker_at
, t
);
705 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
706 INHERIT (to_traceframe_info
, t
);
707 INHERIT (to_use_agent
, t
);
708 INHERIT (to_can_use_agent
, t
);
709 INHERIT (to_augmented_libraries_svr4_read
, t
);
710 INHERIT (to_magic
, t
);
711 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
712 INHERIT (to_can_run_breakpoint_commands
, t
);
713 /* Do not inherit to_memory_map. */
714 /* Do not inherit to_flash_erase. */
715 /* Do not inherit to_flash_done. */
719 /* Clean up a target struct so it no longer has any zero pointers in
720 it. Some entries are defaulted to a method that print an error,
721 others are hard-wired to a standard recursive default. */
723 #define de_fault(field, value) \
724 if (!current_target.field) \
725 current_target.field = value
728 (void (*) (char *, int))
731 (void (*) (struct target_ops
*))
733 de_fault (deprecated_xfer_memory
,
734 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
735 struct mem_attrib
*, struct target_ops
*))
737 de_fault (to_insert_hw_breakpoint
,
738 (int (*) (struct target_ops
*, struct gdbarch
*,
739 struct bp_target_info
*))
741 de_fault (to_remove_hw_breakpoint
,
742 (int (*) (struct target_ops
*, struct gdbarch
*,
743 struct bp_target_info
*))
745 de_fault (to_insert_watchpoint
,
746 (int (*) (struct target_ops
*, CORE_ADDR
, int, int,
747 struct expression
*))
749 de_fault (to_remove_watchpoint
,
750 (int (*) (struct target_ops
*, CORE_ADDR
, int, int,
751 struct expression
*))
753 de_fault (to_watchpoint_addr_within_range
,
754 default_watchpoint_addr_within_range
);
755 de_fault (to_region_ok_for_hw_watchpoint
,
756 default_region_ok_for_hw_watchpoint
);
757 de_fault (to_can_accel_watchpoint_condition
,
758 (int (*) (struct target_ops
*, CORE_ADDR
, int, int,
759 struct expression
*))
761 de_fault (to_terminal_init
,
762 (void (*) (struct target_ops
*))
764 de_fault (to_terminal_inferior
,
765 (void (*) (struct target_ops
*))
767 de_fault (to_terminal_ours_for_output
,
768 (void (*) (struct target_ops
*))
770 de_fault (to_terminal_ours
,
771 (void (*) (struct target_ops
*))
773 de_fault (to_terminal_save_ours
,
774 (void (*) (struct target_ops
*))
776 de_fault (to_terminal_info
,
777 default_terminal_info
);
779 (void (*) (struct target_ops
*, char *, int))
781 de_fault (to_post_startup_inferior
,
782 (void (*) (struct target_ops
*, ptid_t
))
784 de_fault (to_insert_fork_catchpoint
,
785 (int (*) (struct target_ops
*, int))
787 de_fault (to_remove_fork_catchpoint
,
788 (int (*) (struct target_ops
*, int))
790 de_fault (to_insert_vfork_catchpoint
,
791 (int (*) (struct target_ops
*, int))
793 de_fault (to_remove_vfork_catchpoint
,
794 (int (*) (struct target_ops
*, int))
796 de_fault (to_insert_exec_catchpoint
,
797 (int (*) (struct target_ops
*, int))
799 de_fault (to_remove_exec_catchpoint
,
800 (int (*) (struct target_ops
*, int))
802 de_fault (to_set_syscall_catchpoint
,
803 (int (*) (struct target_ops
*, int, int, int, int, int *))
805 de_fault (to_has_exited
,
806 (int (*) (struct target_ops
*, int, int, int *))
808 de_fault (to_can_run
,
809 (int (*) (struct target_ops
*))
811 de_fault (to_extra_thread_info
,
812 (char *(*) (struct target_ops
*, struct thread_info
*))
814 de_fault (to_thread_name
,
815 (char *(*) (struct target_ops
*, struct thread_info
*))
818 (void (*) (struct target_ops
*, ptid_t
))
820 de_fault (to_pid_to_exec_file
,
821 (char *(*) (struct target_ops
*, int))
823 de_fault (to_thread_architecture
,
824 default_thread_architecture
);
825 current_target
.to_read_description
= NULL
;
826 de_fault (to_get_ada_task_ptid
,
827 (ptid_t (*) (struct target_ops
*, long, long))
828 default_get_ada_task_ptid
);
829 de_fault (to_supports_multi_process
,
830 (int (*) (struct target_ops
*))
832 de_fault (to_supports_enable_disable_tracepoint
,
833 (int (*) (struct target_ops
*))
835 de_fault (to_supports_string_tracing
,
836 (int (*) (struct target_ops
*))
838 de_fault (to_trace_init
,
839 (void (*) (struct target_ops
*))
841 de_fault (to_download_tracepoint
,
842 (void (*) (struct target_ops
*, struct bp_location
*))
844 de_fault (to_can_download_tracepoint
,
845 (int (*) (struct target_ops
*))
847 de_fault (to_download_trace_state_variable
,
848 (void (*) (struct target_ops
*, struct trace_state_variable
*))
850 de_fault (to_enable_tracepoint
,
851 (void (*) (struct target_ops
*, struct bp_location
*))
853 de_fault (to_disable_tracepoint
,
854 (void (*) (struct target_ops
*, struct bp_location
*))
856 de_fault (to_trace_set_readonly_regions
,
857 (void (*) (struct target_ops
*))
859 de_fault (to_trace_start
,
860 (void (*) (struct target_ops
*))
862 de_fault (to_get_trace_status
,
863 (int (*) (struct target_ops
*, struct trace_status
*))
865 de_fault (to_get_tracepoint_status
,
866 (void (*) (struct target_ops
*, struct breakpoint
*,
867 struct uploaded_tp
*))
869 de_fault (to_trace_stop
,
870 (void (*) (struct target_ops
*))
872 de_fault (to_trace_find
,
873 (int (*) (struct target_ops
*,
874 enum trace_find_type
, int, CORE_ADDR
, CORE_ADDR
, int *))
876 de_fault (to_get_trace_state_variable_value
,
877 (int (*) (struct target_ops
*, int, LONGEST
*))
879 de_fault (to_save_trace_data
,
880 (int (*) (struct target_ops
*, const char *))
882 de_fault (to_upload_tracepoints
,
883 (int (*) (struct target_ops
*, struct uploaded_tp
**))
885 de_fault (to_upload_trace_state_variables
,
886 (int (*) (struct target_ops
*, struct uploaded_tsv
**))
888 de_fault (to_get_raw_trace_data
,
889 (LONGEST (*) (struct target_ops
*, gdb_byte
*, ULONGEST
, LONGEST
))
891 de_fault (to_get_min_fast_tracepoint_insn_len
,
892 (int (*) (struct target_ops
*))
894 de_fault (to_set_disconnected_tracing
,
895 (void (*) (struct target_ops
*, int))
897 de_fault (to_set_circular_trace_buffer
,
898 (void (*) (struct target_ops
*, int))
900 de_fault (to_set_trace_buffer_size
,
901 (void (*) (struct target_ops
*, LONGEST
))
903 de_fault (to_set_trace_notes
,
904 (int (*) (struct target_ops
*,
905 const char *, const char *, const char *))
907 de_fault (to_get_tib_address
,
908 (int (*) (struct target_ops
*, ptid_t
, CORE_ADDR
*))
910 de_fault (to_set_permissions
,
911 (void (*) (struct target_ops
*))
913 de_fault (to_static_tracepoint_marker_at
,
914 (int (*) (struct target_ops
*,
915 CORE_ADDR
, struct static_tracepoint_marker
*))
917 de_fault (to_static_tracepoint_markers_by_strid
,
918 (VEC(static_tracepoint_marker_p
) * (*) (struct target_ops
*,
921 de_fault (to_traceframe_info
,
922 (struct traceframe_info
* (*) (struct target_ops
*))
924 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
925 (int (*) (struct target_ops
*))
927 de_fault (to_can_run_breakpoint_commands
,
928 (int (*) (struct target_ops
*))
930 de_fault (to_use_agent
,
931 (int (*) (struct target_ops
*, int))
933 de_fault (to_can_use_agent
,
934 (int (*) (struct target_ops
*))
936 de_fault (to_augmented_libraries_svr4_read
,
937 (int (*) (struct target_ops
*))
939 de_fault (to_execution_direction
, default_execution_direction
);
943 /* Finally, position the target-stack beneath the squashed
944 "current_target". That way code looking for a non-inherited
945 target method can quickly and simply find it. */
946 current_target
.beneath
= target_stack
;
949 setup_target_debug ();
952 /* Push a new target type into the stack of the existing target accessors,
953 possibly superseding some of the existing accessors.
955 Rather than allow an empty stack, we always have the dummy target at
956 the bottom stratum, so we can call the function vectors without
960 push_target (struct target_ops
*t
)
962 struct target_ops
**cur
;
964 /* Check magic number. If wrong, it probably means someone changed
965 the struct definition, but not all the places that initialize one. */
966 if (t
->to_magic
!= OPS_MAGIC
)
968 fprintf_unfiltered (gdb_stderr
,
969 "Magic number of %s target struct wrong\n",
971 internal_error (__FILE__
, __LINE__
,
972 _("failed internal consistency check"));
975 /* Find the proper stratum to install this target in. */
976 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
978 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
982 /* If there's already targets at this stratum, remove them. */
983 /* FIXME: cagney/2003-10-15: I think this should be popping all
984 targets to CUR, and not just those at this stratum level. */
985 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
987 /* There's already something at this stratum level. Close it,
988 and un-hook it from the stack. */
989 struct target_ops
*tmp
= (*cur
);
991 (*cur
) = (*cur
)->beneath
;
996 /* We have removed all targets in our stratum, now add the new one. */
1000 update_current_target ();
1003 /* Remove a target_ops vector from the stack, wherever it may be.
1004 Return how many times it was removed (0 or 1). */
1007 unpush_target (struct target_ops
*t
)
1009 struct target_ops
**cur
;
1010 struct target_ops
*tmp
;
1012 if (t
->to_stratum
== dummy_stratum
)
1013 internal_error (__FILE__
, __LINE__
,
1014 _("Attempt to unpush the dummy target"));
1016 /* Look for the specified target. Note that we assume that a target
1017 can only occur once in the target stack. */
1019 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1025 /* If we don't find target_ops, quit. Only open targets should be
1030 /* Unchain the target. */
1032 (*cur
) = (*cur
)->beneath
;
1033 tmp
->beneath
= NULL
;
1035 update_current_target ();
1037 /* Finally close the target. Note we do this after unchaining, so
1038 any target method calls from within the target_close
1039 implementation don't end up in T anymore. */
1046 pop_all_targets_above (enum strata above_stratum
)
1048 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
1050 if (!unpush_target (target_stack
))
1052 fprintf_unfiltered (gdb_stderr
,
1053 "pop_all_targets couldn't find target %s\n",
1054 target_stack
->to_shortname
);
1055 internal_error (__FILE__
, __LINE__
,
1056 _("failed internal consistency check"));
1063 pop_all_targets (void)
1065 pop_all_targets_above (dummy_stratum
);
1068 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1071 target_is_pushed (struct target_ops
*t
)
1073 struct target_ops
**cur
;
1075 /* Check magic number. If wrong, it probably means someone changed
1076 the struct definition, but not all the places that initialize one. */
1077 if (t
->to_magic
!= OPS_MAGIC
)
1079 fprintf_unfiltered (gdb_stderr
,
1080 "Magic number of %s target struct wrong\n",
1082 internal_error (__FILE__
, __LINE__
,
1083 _("failed internal consistency check"));
1086 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1093 /* Using the objfile specified in OBJFILE, find the address for the
1094 current thread's thread-local storage with offset OFFSET. */
1096 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1098 volatile CORE_ADDR addr
= 0;
1099 struct target_ops
*target
;
1101 for (target
= current_target
.beneath
;
1103 target
= target
->beneath
)
1105 if (target
->to_get_thread_local_address
!= NULL
)
1110 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1112 ptid_t ptid
= inferior_ptid
;
1113 volatile struct gdb_exception ex
;
1115 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1119 /* Fetch the load module address for this objfile. */
1120 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1122 /* If it's 0, throw the appropriate exception. */
1124 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1125 _("TLS load module not found"));
1127 addr
= target
->to_get_thread_local_address (target
, ptid
,
1130 /* If an error occurred, print TLS related messages here. Otherwise,
1131 throw the error to some higher catcher. */
1134 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1138 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1139 error (_("Cannot find thread-local variables "
1140 "in this thread library."));
1142 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1143 if (objfile_is_library
)
1144 error (_("Cannot find shared library `%s' in dynamic"
1145 " linker's load module list"), objfile_name (objfile
));
1147 error (_("Cannot find executable file `%s' in dynamic"
1148 " linker's load module list"), objfile_name (objfile
));
1150 case TLS_NOT_ALLOCATED_YET_ERROR
:
1151 if (objfile_is_library
)
1152 error (_("The inferior has not yet allocated storage for"
1153 " thread-local variables in\n"
1154 "the shared library `%s'\n"
1156 objfile_name (objfile
), target_pid_to_str (ptid
));
1158 error (_("The inferior has not yet allocated storage for"
1159 " thread-local variables in\n"
1160 "the executable `%s'\n"
1162 objfile_name (objfile
), target_pid_to_str (ptid
));
1164 case TLS_GENERIC_ERROR
:
1165 if (objfile_is_library
)
1166 error (_("Cannot find thread-local storage for %s, "
1167 "shared library %s:\n%s"),
1168 target_pid_to_str (ptid
),
1169 objfile_name (objfile
), ex
.message
);
1171 error (_("Cannot find thread-local storage for %s, "
1172 "executable file %s:\n%s"),
1173 target_pid_to_str (ptid
),
1174 objfile_name (objfile
), ex
.message
);
1177 throw_exception (ex
);
1182 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1183 TLS is an ABI-specific thing. But we don't do that yet. */
1185 error (_("Cannot find thread-local variables on this target"));
1191 target_xfer_status_to_string (enum target_xfer_status err
)
1193 #define CASE(X) case X: return #X
1196 CASE(TARGET_XFER_E_IO
);
1197 CASE(TARGET_XFER_E_UNAVAILABLE
);
1206 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1208 /* target_read_string -- read a null terminated string, up to LEN bytes,
1209 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1210 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1211 is responsible for freeing it. Return the number of bytes successfully
1215 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1217 int tlen
, offset
, i
;
1221 int buffer_allocated
;
1223 unsigned int nbytes_read
= 0;
1225 gdb_assert (string
);
1227 /* Small for testing. */
1228 buffer_allocated
= 4;
1229 buffer
= xmalloc (buffer_allocated
);
1234 tlen
= MIN (len
, 4 - (memaddr
& 3));
1235 offset
= memaddr
& 3;
1237 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1240 /* The transfer request might have crossed the boundary to an
1241 unallocated region of memory. Retry the transfer, requesting
1245 errcode
= target_read_memory (memaddr
, buf
, 1);
1250 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1254 bytes
= bufptr
- buffer
;
1255 buffer_allocated
*= 2;
1256 buffer
= xrealloc (buffer
, buffer_allocated
);
1257 bufptr
= buffer
+ bytes
;
1260 for (i
= 0; i
< tlen
; i
++)
1262 *bufptr
++ = buf
[i
+ offset
];
1263 if (buf
[i
+ offset
] == '\000')
1265 nbytes_read
+= i
+ 1;
1272 nbytes_read
+= tlen
;
1281 struct target_section_table
*
1282 target_get_section_table (struct target_ops
*target
)
1284 struct target_ops
*t
;
1287 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1289 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1290 if (t
->to_get_section_table
!= NULL
)
1291 return (*t
->to_get_section_table
) (t
);
1296 /* Find a section containing ADDR. */
1298 struct target_section
*
1299 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1301 struct target_section_table
*table
= target_get_section_table (target
);
1302 struct target_section
*secp
;
1307 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1309 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1315 /* Read memory from the live target, even if currently inspecting a
1316 traceframe. The return is the same as that of target_read. */
1318 static enum target_xfer_status
1319 target_read_live_memory (enum target_object object
,
1320 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1321 ULONGEST
*xfered_len
)
1323 enum target_xfer_status ret
;
1324 struct cleanup
*cleanup
;
1326 /* Switch momentarily out of tfind mode so to access live memory.
1327 Note that this must not clear global state, such as the frame
1328 cache, which must still remain valid for the previous traceframe.
1329 We may be _building_ the frame cache at this point. */
1330 cleanup
= make_cleanup_restore_traceframe_number ();
1331 set_traceframe_number (-1);
1333 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1334 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1336 do_cleanups (cleanup
);
1340 /* Using the set of read-only target sections of OPS, read live
1341 read-only memory. Note that the actual reads start from the
1342 top-most target again.
1344 For interface/parameters/return description see target.h,
1347 static enum target_xfer_status
1348 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1349 enum target_object object
,
1350 gdb_byte
*readbuf
, ULONGEST memaddr
,
1351 ULONGEST len
, ULONGEST
*xfered_len
)
1353 struct target_section
*secp
;
1354 struct target_section_table
*table
;
1356 secp
= target_section_by_addr (ops
, memaddr
);
1358 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1359 secp
->the_bfd_section
)
1362 struct target_section
*p
;
1363 ULONGEST memend
= memaddr
+ len
;
1365 table
= target_get_section_table (ops
);
1367 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1369 if (memaddr
>= p
->addr
)
1371 if (memend
<= p
->endaddr
)
1373 /* Entire transfer is within this section. */
1374 return target_read_live_memory (object
, memaddr
,
1375 readbuf
, len
, xfered_len
);
1377 else if (memaddr
>= p
->endaddr
)
1379 /* This section ends before the transfer starts. */
1384 /* This section overlaps the transfer. Just do half. */
1385 len
= p
->endaddr
- memaddr
;
1386 return target_read_live_memory (object
, memaddr
,
1387 readbuf
, len
, xfered_len
);
1393 return TARGET_XFER_EOF
;
1396 /* Read memory from more than one valid target. A core file, for
1397 instance, could have some of memory but delegate other bits to
1398 the target below it. So, we must manually try all targets. */
1400 static enum target_xfer_status
1401 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1402 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1403 ULONGEST
*xfered_len
)
1405 enum target_xfer_status res
;
1409 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1410 readbuf
, writebuf
, memaddr
, len
,
1412 if (res
== TARGET_XFER_OK
)
1415 /* Stop if the target reports that the memory is not available. */
1416 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1419 /* We want to continue past core files to executables, but not
1420 past a running target's memory. */
1421 if (ops
->to_has_all_memory (ops
))
1426 while (ops
!= NULL
);
1431 /* Perform a partial memory transfer.
1432 For docs see target.h, to_xfer_partial. */
1434 static enum target_xfer_status
1435 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1436 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1437 ULONGEST len
, ULONGEST
*xfered_len
)
1439 enum target_xfer_status res
;
1441 struct mem_region
*region
;
1442 struct inferior
*inf
;
1444 /* For accesses to unmapped overlay sections, read directly from
1445 files. Must do this first, as MEMADDR may need adjustment. */
1446 if (readbuf
!= NULL
&& overlay_debugging
)
1448 struct obj_section
*section
= find_pc_overlay (memaddr
);
1450 if (pc_in_unmapped_range (memaddr
, section
))
1452 struct target_section_table
*table
1453 = target_get_section_table (ops
);
1454 const char *section_name
= section
->the_bfd_section
->name
;
1456 memaddr
= overlay_mapped_address (memaddr
, section
);
1457 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1458 memaddr
, len
, xfered_len
,
1460 table
->sections_end
,
1465 /* Try the executable files, if "trust-readonly-sections" is set. */
1466 if (readbuf
!= NULL
&& trust_readonly
)
1468 struct target_section
*secp
;
1469 struct target_section_table
*table
;
1471 secp
= target_section_by_addr (ops
, memaddr
);
1473 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1474 secp
->the_bfd_section
)
1477 table
= target_get_section_table (ops
);
1478 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1479 memaddr
, len
, xfered_len
,
1481 table
->sections_end
,
1486 /* If reading unavailable memory in the context of traceframes, and
1487 this address falls within a read-only section, fallback to
1488 reading from live memory. */
1489 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1491 VEC(mem_range_s
) *available
;
1493 /* If we fail to get the set of available memory, then the
1494 target does not support querying traceframe info, and so we
1495 attempt reading from the traceframe anyway (assuming the
1496 target implements the old QTro packet then). */
1497 if (traceframe_available_memory (&available
, memaddr
, len
))
1499 struct cleanup
*old_chain
;
1501 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1503 if (VEC_empty (mem_range_s
, available
)
1504 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1506 /* Don't read into the traceframe's available
1508 if (!VEC_empty (mem_range_s
, available
))
1510 LONGEST oldlen
= len
;
1512 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1513 gdb_assert (len
<= oldlen
);
1516 do_cleanups (old_chain
);
1518 /* This goes through the topmost target again. */
1519 res
= memory_xfer_live_readonly_partial (ops
, object
,
1522 if (res
== TARGET_XFER_OK
)
1523 return TARGET_XFER_OK
;
1526 /* No use trying further, we know some memory starting
1527 at MEMADDR isn't available. */
1529 return TARGET_XFER_E_UNAVAILABLE
;
1533 /* Don't try to read more than how much is available, in
1534 case the target implements the deprecated QTro packet to
1535 cater for older GDBs (the target's knowledge of read-only
1536 sections may be outdated by now). */
1537 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1539 do_cleanups (old_chain
);
1543 /* Try GDB's internal data cache. */
1544 region
= lookup_mem_region (memaddr
);
1545 /* region->hi == 0 means there's no upper bound. */
1546 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1549 reg_len
= region
->hi
- memaddr
;
1551 switch (region
->attrib
.mode
)
1554 if (writebuf
!= NULL
)
1555 return TARGET_XFER_E_IO
;
1559 if (readbuf
!= NULL
)
1560 return TARGET_XFER_E_IO
;
1564 /* We only support writing to flash during "load" for now. */
1565 if (writebuf
!= NULL
)
1566 error (_("Writing to flash memory forbidden in this context"));
1570 return TARGET_XFER_E_IO
;
1573 if (!ptid_equal (inferior_ptid
, null_ptid
))
1574 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1579 /* The dcache reads whole cache lines; that doesn't play well
1580 with reading from a trace buffer, because reading outside of
1581 the collected memory range fails. */
1582 && get_traceframe_number () == -1
1583 && (region
->attrib
.cache
1584 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1585 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1587 DCACHE
*dcache
= target_dcache_get_or_init ();
1590 if (readbuf
!= NULL
)
1591 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1593 /* FIXME drow/2006-08-09: If we're going to preserve const
1594 correctness dcache_xfer_memory should take readbuf and
1596 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1599 return TARGET_XFER_E_IO
;
1602 *xfered_len
= (ULONGEST
) l
;
1603 return TARGET_XFER_OK
;
1607 /* If none of those methods found the memory we wanted, fall back
1608 to a target partial transfer. Normally a single call to
1609 to_xfer_partial is enough; if it doesn't recognize an object
1610 it will call the to_xfer_partial of the next target down.
1611 But for memory this won't do. Memory is the only target
1612 object which can be read from more than one valid target.
1613 A core file, for instance, could have some of memory but
1614 delegate other bits to the target below it. So, we must
1615 manually try all targets. */
1617 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1620 /* Make sure the cache gets updated no matter what - if we are writing
1621 to the stack. Even if this write is not tagged as such, we still need
1622 to update the cache. */
1624 if (res
== TARGET_XFER_OK
1627 && target_dcache_init_p ()
1628 && !region
->attrib
.cache
1629 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1630 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1632 DCACHE
*dcache
= target_dcache_get ();
1634 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1637 /* If we still haven't got anything, return the last error. We
1642 /* Perform a partial memory transfer. For docs see target.h,
1645 static enum target_xfer_status
1646 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1647 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1648 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1650 enum target_xfer_status res
;
1652 /* Zero length requests are ok and require no work. */
1654 return TARGET_XFER_EOF
;
1656 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1657 breakpoint insns, thus hiding out from higher layers whether
1658 there are software breakpoints inserted in the code stream. */
1659 if (readbuf
!= NULL
)
1661 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1664 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1665 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1670 struct cleanup
*old_chain
;
1672 /* A large write request is likely to be partially satisfied
1673 by memory_xfer_partial_1. We will continually malloc
1674 and free a copy of the entire write request for breakpoint
1675 shadow handling even though we only end up writing a small
1676 subset of it. Cap writes to 4KB to mitigate this. */
1677 len
= min (4096, len
);
1679 buf
= xmalloc (len
);
1680 old_chain
= make_cleanup (xfree
, buf
);
1681 memcpy (buf
, writebuf
, len
);
1683 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1684 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1687 do_cleanups (old_chain
);
1694 restore_show_memory_breakpoints (void *arg
)
1696 show_memory_breakpoints
= (uintptr_t) arg
;
1700 make_show_memory_breakpoints_cleanup (int show
)
1702 int current
= show_memory_breakpoints
;
1704 show_memory_breakpoints
= show
;
1705 return make_cleanup (restore_show_memory_breakpoints
,
1706 (void *) (uintptr_t) current
);
1709 /* For docs see target.h, to_xfer_partial. */
1711 enum target_xfer_status
1712 target_xfer_partial (struct target_ops
*ops
,
1713 enum target_object object
, const char *annex
,
1714 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1715 ULONGEST offset
, ULONGEST len
,
1716 ULONGEST
*xfered_len
)
1718 enum target_xfer_status retval
;
1720 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1722 /* Transfer is done when LEN is zero. */
1724 return TARGET_XFER_EOF
;
1726 if (writebuf
&& !may_write_memory
)
1727 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1728 core_addr_to_string_nz (offset
), plongest (len
));
1732 /* If this is a memory transfer, let the memory-specific code
1733 have a look at it instead. Memory transfers are more
1735 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1736 || object
== TARGET_OBJECT_CODE_MEMORY
)
1737 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1738 writebuf
, offset
, len
, xfered_len
);
1739 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1741 /* Request the normal memory object from other layers. */
1742 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1746 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1747 writebuf
, offset
, len
, xfered_len
);
1751 const unsigned char *myaddr
= NULL
;
1753 fprintf_unfiltered (gdb_stdlog
,
1754 "%s:target_xfer_partial "
1755 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1758 (annex
? annex
: "(null)"),
1759 host_address_to_string (readbuf
),
1760 host_address_to_string (writebuf
),
1761 core_addr_to_string_nz (offset
),
1762 pulongest (len
), retval
,
1763 pulongest (*xfered_len
));
1769 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1773 fputs_unfiltered (", bytes =", gdb_stdlog
);
1774 for (i
= 0; i
< *xfered_len
; i
++)
1776 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1778 if (targetdebug
< 2 && i
> 0)
1780 fprintf_unfiltered (gdb_stdlog
, " ...");
1783 fprintf_unfiltered (gdb_stdlog
, "\n");
1786 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1790 fputc_unfiltered ('\n', gdb_stdlog
);
1793 /* Check implementations of to_xfer_partial update *XFERED_LEN
1794 properly. Do assertion after printing debug messages, so that we
1795 can find more clues on assertion failure from debugging messages. */
1796 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1797 gdb_assert (*xfered_len
> 0);
1802 /* Read LEN bytes of target memory at address MEMADDR, placing the
1803 results in GDB's memory at MYADDR. Returns either 0 for success or
1804 TARGET_XFER_E_IO if any error occurs.
1806 If an error occurs, no guarantee is made about the contents of the data at
1807 MYADDR. In particular, the caller should not depend upon partial reads
1808 filling the buffer with good data. There is no way for the caller to know
1809 how much good data might have been transfered anyway. Callers that can
1810 deal with partial reads should call target_read (which will retry until
1811 it makes no progress, and then return how much was transferred). */
1814 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1816 /* Dispatch to the topmost target, not the flattened current_target.
1817 Memory accesses check target->to_has_(all_)memory, and the
1818 flattened target doesn't inherit those. */
1819 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1820 myaddr
, memaddr
, len
) == len
)
1823 return TARGET_XFER_E_IO
;
1826 /* Like target_read_memory, but specify explicitly that this is a read
1827 from the target's raw memory. That is, this read bypasses the
1828 dcache, breakpoint shadowing, etc. */
1831 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1833 /* See comment in target_read_memory about why the request starts at
1834 current_target.beneath. */
1835 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1836 myaddr
, memaddr
, len
) == len
)
1839 return TARGET_XFER_E_IO
;
1842 /* Like target_read_memory, but specify explicitly that this is a read from
1843 the target's stack. This may trigger different cache behavior. */
1846 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1848 /* See comment in target_read_memory about why the request starts at
1849 current_target.beneath. */
1850 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1851 myaddr
, memaddr
, len
) == len
)
1854 return TARGET_XFER_E_IO
;
1857 /* Like target_read_memory, but specify explicitly that this is a read from
1858 the target's code. This may trigger different cache behavior. */
1861 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1863 /* See comment in target_read_memory about why the request starts at
1864 current_target.beneath. */
1865 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1866 myaddr
, memaddr
, len
) == len
)
1869 return TARGET_XFER_E_IO
;
1872 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1873 Returns either 0 for success or TARGET_XFER_E_IO if any
1874 error occurs. If an error occurs, no guarantee is made about how
1875 much data got written. Callers that can deal with partial writes
1876 should call target_write. */
1879 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1881 /* See comment in target_read_memory about why the request starts at
1882 current_target.beneath. */
1883 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1884 myaddr
, memaddr
, len
) == len
)
1887 return TARGET_XFER_E_IO
;
1890 /* Write LEN bytes from MYADDR to target raw memory at address
1891 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1892 if any error occurs. If an error occurs, no guarantee is made
1893 about how much data got written. Callers that can deal with
1894 partial writes should call target_write. */
1897 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1899 /* See comment in target_read_memory about why the request starts at
1900 current_target.beneath. */
1901 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1902 myaddr
, memaddr
, len
) == len
)
1905 return TARGET_XFER_E_IO
;
1908 /* Fetch the target's memory map. */
1911 target_memory_map (void)
1913 VEC(mem_region_s
) *result
;
1914 struct mem_region
*last_one
, *this_one
;
1916 struct target_ops
*t
;
1919 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1921 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1922 if (t
->to_memory_map
!= NULL
)
1928 result
= t
->to_memory_map (t
);
1932 qsort (VEC_address (mem_region_s
, result
),
1933 VEC_length (mem_region_s
, result
),
1934 sizeof (struct mem_region
), mem_region_cmp
);
1936 /* Check that regions do not overlap. Simultaneously assign
1937 a numbering for the "mem" commands to use to refer to
1940 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1942 this_one
->number
= ix
;
1944 if (last_one
&& last_one
->hi
> this_one
->lo
)
1946 warning (_("Overlapping regions in memory map: ignoring"));
1947 VEC_free (mem_region_s
, result
);
1950 last_one
= this_one
;
1957 target_flash_erase (ULONGEST address
, LONGEST length
)
1959 struct target_ops
*t
;
1961 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1962 if (t
->to_flash_erase
!= NULL
)
1965 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1966 hex_string (address
), phex (length
, 0));
1967 t
->to_flash_erase (t
, address
, length
);
1975 target_flash_done (void)
1977 struct target_ops
*t
;
1979 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1980 if (t
->to_flash_done
!= NULL
)
1983 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1984 t
->to_flash_done (t
);
1992 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1993 struct cmd_list_element
*c
, const char *value
)
1995 fprintf_filtered (file
,
1996 _("Mode for reading from readonly sections is %s.\n"),
2000 /* More generic transfers. */
2002 static enum target_xfer_status
2003 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
2004 const char *annex
, gdb_byte
*readbuf
,
2005 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
2006 ULONGEST
*xfered_len
)
2008 if (object
== TARGET_OBJECT_MEMORY
2009 && ops
->deprecated_xfer_memory
!= NULL
)
2010 /* If available, fall back to the target's
2011 "deprecated_xfer_memory" method. */
2016 if (writebuf
!= NULL
)
2018 void *buffer
= xmalloc (len
);
2019 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
2021 memcpy (buffer
, writebuf
, len
);
2022 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
2023 1/*write*/, NULL
, ops
);
2024 do_cleanups (cleanup
);
2026 if (readbuf
!= NULL
)
2027 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
2028 0/*read*/, NULL
, ops
);
2031 *xfered_len
= (ULONGEST
) xfered
;
2032 return TARGET_XFER_E_IO
;
2034 else if (xfered
== 0 && errno
== 0)
2035 /* "deprecated_xfer_memory" uses 0, cross checked against
2036 ERRNO as one indication of an error. */
2037 return TARGET_XFER_EOF
;
2039 return TARGET_XFER_E_IO
;
2043 gdb_assert (ops
->beneath
!= NULL
);
2044 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
2045 readbuf
, writebuf
, offset
, len
,
2050 /* Target vector read/write partial wrapper functions. */
2052 static enum target_xfer_status
2053 target_read_partial (struct target_ops
*ops
,
2054 enum target_object object
,
2055 const char *annex
, gdb_byte
*buf
,
2056 ULONGEST offset
, ULONGEST len
,
2057 ULONGEST
*xfered_len
)
2059 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
2063 static enum target_xfer_status
2064 target_write_partial (struct target_ops
*ops
,
2065 enum target_object object
,
2066 const char *annex
, const gdb_byte
*buf
,
2067 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
2069 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
2073 /* Wrappers to perform the full transfer. */
2075 /* For docs on target_read see target.h. */
2078 target_read (struct target_ops
*ops
,
2079 enum target_object object
,
2080 const char *annex
, gdb_byte
*buf
,
2081 ULONGEST offset
, LONGEST len
)
2085 while (xfered
< len
)
2087 ULONGEST xfered_len
;
2088 enum target_xfer_status status
;
2090 status
= target_read_partial (ops
, object
, annex
,
2091 (gdb_byte
*) buf
+ xfered
,
2092 offset
+ xfered
, len
- xfered
,
2095 /* Call an observer, notifying them of the xfer progress? */
2096 if (status
== TARGET_XFER_EOF
)
2098 else if (status
== TARGET_XFER_OK
)
2100 xfered
+= xfered_len
;
2110 /* Assuming that the entire [begin, end) range of memory cannot be
2111 read, try to read whatever subrange is possible to read.
2113 The function returns, in RESULT, either zero or one memory block.
2114 If there's a readable subrange at the beginning, it is completely
2115 read and returned. Any further readable subrange will not be read.
2116 Otherwise, if there's a readable subrange at the end, it will be
2117 completely read and returned. Any readable subranges before it
2118 (obviously, not starting at the beginning), will be ignored. In
2119 other cases -- either no readable subrange, or readable subrange(s)
2120 that is neither at the beginning, or end, nothing is returned.
2122 The purpose of this function is to handle a read across a boundary
2123 of accessible memory in a case when memory map is not available.
2124 The above restrictions are fine for this case, but will give
2125 incorrect results if the memory is 'patchy'. However, supporting
2126 'patchy' memory would require trying to read every single byte,
2127 and it seems unacceptable solution. Explicit memory map is
2128 recommended for this case -- and target_read_memory_robust will
2129 take care of reading multiple ranges then. */
2132 read_whatever_is_readable (struct target_ops
*ops
,
2133 ULONGEST begin
, ULONGEST end
,
2134 VEC(memory_read_result_s
) **result
)
2136 gdb_byte
*buf
= xmalloc (end
- begin
);
2137 ULONGEST current_begin
= begin
;
2138 ULONGEST current_end
= end
;
2140 memory_read_result_s r
;
2141 ULONGEST xfered_len
;
2143 /* If we previously failed to read 1 byte, nothing can be done here. */
2144 if (end
- begin
<= 1)
2150 /* Check that either first or the last byte is readable, and give up
2151 if not. This heuristic is meant to permit reading accessible memory
2152 at the boundary of accessible region. */
2153 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2154 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2159 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2160 buf
+ (end
-begin
) - 1, end
- 1, 1,
2161 &xfered_len
) == TARGET_XFER_OK
)
2172 /* Loop invariant is that the [current_begin, current_end) was previously
2173 found to be not readable as a whole.
2175 Note loop condition -- if the range has 1 byte, we can't divide the range
2176 so there's no point trying further. */
2177 while (current_end
- current_begin
> 1)
2179 ULONGEST first_half_begin
, first_half_end
;
2180 ULONGEST second_half_begin
, second_half_end
;
2182 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2186 first_half_begin
= current_begin
;
2187 first_half_end
= middle
;
2188 second_half_begin
= middle
;
2189 second_half_end
= current_end
;
2193 first_half_begin
= middle
;
2194 first_half_end
= current_end
;
2195 second_half_begin
= current_begin
;
2196 second_half_end
= middle
;
2199 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2200 buf
+ (first_half_begin
- begin
),
2202 first_half_end
- first_half_begin
);
2204 if (xfer
== first_half_end
- first_half_begin
)
2206 /* This half reads up fine. So, the error must be in the
2208 current_begin
= second_half_begin
;
2209 current_end
= second_half_end
;
2213 /* This half is not readable. Because we've tried one byte, we
2214 know some part of this half if actually redable. Go to the next
2215 iteration to divide again and try to read.
2217 We don't handle the other half, because this function only tries
2218 to read a single readable subrange. */
2219 current_begin
= first_half_begin
;
2220 current_end
= first_half_end
;
2226 /* The [begin, current_begin) range has been read. */
2228 r
.end
= current_begin
;
2233 /* The [current_end, end) range has been read. */
2234 LONGEST rlen
= end
- current_end
;
2236 r
.data
= xmalloc (rlen
);
2237 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2238 r
.begin
= current_end
;
2242 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2246 free_memory_read_result_vector (void *x
)
2248 VEC(memory_read_result_s
) *v
= x
;
2249 memory_read_result_s
*current
;
2252 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2254 xfree (current
->data
);
2256 VEC_free (memory_read_result_s
, v
);
2259 VEC(memory_read_result_s
) *
2260 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2262 VEC(memory_read_result_s
) *result
= 0;
2265 while (xfered
< len
)
2267 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2270 /* If there is no explicit region, a fake one should be created. */
2271 gdb_assert (region
);
2273 if (region
->hi
== 0)
2274 rlen
= len
- xfered
;
2276 rlen
= region
->hi
- offset
;
2278 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2280 /* Cannot read this region. Note that we can end up here only
2281 if the region is explicitly marked inaccessible, or
2282 'inaccessible-by-default' is in effect. */
2287 LONGEST to_read
= min (len
- xfered
, rlen
);
2288 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2290 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2291 (gdb_byte
*) buffer
,
2292 offset
+ xfered
, to_read
);
2293 /* Call an observer, notifying them of the xfer progress? */
2296 /* Got an error reading full chunk. See if maybe we can read
2299 read_whatever_is_readable (ops
, offset
+ xfered
,
2300 offset
+ xfered
+ to_read
, &result
);
2305 struct memory_read_result r
;
2307 r
.begin
= offset
+ xfered
;
2308 r
.end
= r
.begin
+ xfer
;
2309 VEC_safe_push (memory_read_result_s
, result
, &r
);
2319 /* An alternative to target_write with progress callbacks. */
2322 target_write_with_progress (struct target_ops
*ops
,
2323 enum target_object object
,
2324 const char *annex
, const gdb_byte
*buf
,
2325 ULONGEST offset
, LONGEST len
,
2326 void (*progress
) (ULONGEST
, void *), void *baton
)
2330 /* Give the progress callback a chance to set up. */
2332 (*progress
) (0, baton
);
2334 while (xfered
< len
)
2336 ULONGEST xfered_len
;
2337 enum target_xfer_status status
;
2339 status
= target_write_partial (ops
, object
, annex
,
2340 (gdb_byte
*) buf
+ xfered
,
2341 offset
+ xfered
, len
- xfered
,
2344 if (status
== TARGET_XFER_EOF
)
2346 if (TARGET_XFER_STATUS_ERROR_P (status
))
2349 gdb_assert (status
== TARGET_XFER_OK
);
2351 (*progress
) (xfered_len
, baton
);
2353 xfered
+= xfered_len
;
2359 /* For docs on target_write see target.h. */
2362 target_write (struct target_ops
*ops
,
2363 enum target_object object
,
2364 const char *annex
, const gdb_byte
*buf
,
2365 ULONGEST offset
, LONGEST len
)
2367 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2371 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2372 the size of the transferred data. PADDING additional bytes are
2373 available in *BUF_P. This is a helper function for
2374 target_read_alloc; see the declaration of that function for more
2378 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2379 const char *annex
, gdb_byte
**buf_p
, int padding
)
2381 size_t buf_alloc
, buf_pos
;
2384 /* This function does not have a length parameter; it reads the
2385 entire OBJECT). Also, it doesn't support objects fetched partly
2386 from one target and partly from another (in a different stratum,
2387 e.g. a core file and an executable). Both reasons make it
2388 unsuitable for reading memory. */
2389 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2391 /* Start by reading up to 4K at a time. The target will throttle
2392 this number down if necessary. */
2394 buf
= xmalloc (buf_alloc
);
2398 ULONGEST xfered_len
;
2399 enum target_xfer_status status
;
2401 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2402 buf_pos
, buf_alloc
- buf_pos
- padding
,
2405 if (status
== TARGET_XFER_EOF
)
2407 /* Read all there was. */
2414 else if (status
!= TARGET_XFER_OK
)
2416 /* An error occurred. */
2418 return TARGET_XFER_E_IO
;
2421 buf_pos
+= xfered_len
;
2423 /* If the buffer is filling up, expand it. */
2424 if (buf_alloc
< buf_pos
* 2)
2427 buf
= xrealloc (buf
, buf_alloc
);
2434 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2435 the size of the transferred data. See the declaration in "target.h"
2436 function for more information about the return value. */
2439 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2440 const char *annex
, gdb_byte
**buf_p
)
2442 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2445 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2446 returned as a string, allocated using xmalloc. If an error occurs
2447 or the transfer is unsupported, NULL is returned. Empty objects
2448 are returned as allocated but empty strings. A warning is issued
2449 if the result contains any embedded NUL bytes. */
2452 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2457 LONGEST i
, transferred
;
2459 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2460 bufstr
= (char *) buffer
;
2462 if (transferred
< 0)
2465 if (transferred
== 0)
2466 return xstrdup ("");
2468 bufstr
[transferred
] = 0;
2470 /* Check for embedded NUL bytes; but allow trailing NULs. */
2471 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2474 warning (_("target object %d, annex %s, "
2475 "contained unexpected null characters"),
2476 (int) object
, annex
? annex
: "(none)");
2483 /* Memory transfer methods. */
2486 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2489 /* This method is used to read from an alternate, non-current
2490 target. This read must bypass the overlay support (as symbols
2491 don't match this target), and GDB's internal cache (wrong cache
2492 for this target). */
2493 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2495 memory_error (TARGET_XFER_E_IO
, addr
);
2499 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2500 int len
, enum bfd_endian byte_order
)
2502 gdb_byte buf
[sizeof (ULONGEST
)];
2504 gdb_assert (len
<= sizeof (buf
));
2505 get_target_memory (ops
, addr
, buf
, len
);
2506 return extract_unsigned_integer (buf
, len
, byte_order
);
2512 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2513 struct bp_target_info
*bp_tgt
)
2515 if (!may_insert_breakpoints
)
2517 warning (_("May not insert breakpoints"));
2521 return current_target
.to_insert_breakpoint (¤t_target
,
2528 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2529 struct bp_target_info
*bp_tgt
)
2531 /* This is kind of a weird case to handle, but the permission might
2532 have been changed after breakpoints were inserted - in which case
2533 we should just take the user literally and assume that any
2534 breakpoints should be left in place. */
2535 if (!may_insert_breakpoints
)
2537 warning (_("May not remove breakpoints"));
2541 return current_target
.to_remove_breakpoint (¤t_target
,
2546 target_info (char *args
, int from_tty
)
2548 struct target_ops
*t
;
2549 int has_all_mem
= 0;
2551 if (symfile_objfile
!= NULL
)
2552 printf_unfiltered (_("Symbols from \"%s\".\n"),
2553 objfile_name (symfile_objfile
));
2555 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2557 if (!(*t
->to_has_memory
) (t
))
2560 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2563 printf_unfiltered (_("\tWhile running this, "
2564 "GDB does not access memory from...\n"));
2565 printf_unfiltered ("%s:\n", t
->to_longname
);
2566 (t
->to_files_info
) (t
);
2567 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2571 /* This function is called before any new inferior is created, e.g.
2572 by running a program, attaching, or connecting to a target.
2573 It cleans up any state from previous invocations which might
2574 change between runs. This is a subset of what target_preopen
2575 resets (things which might change between targets). */
2578 target_pre_inferior (int from_tty
)
2580 /* Clear out solib state. Otherwise the solib state of the previous
2581 inferior might have survived and is entirely wrong for the new
2582 target. This has been observed on GNU/Linux using glibc 2.3. How
2594 Cannot access memory at address 0xdeadbeef
2597 /* In some OSs, the shared library list is the same/global/shared
2598 across inferiors. If code is shared between processes, so are
2599 memory regions and features. */
2600 if (!gdbarch_has_global_solist (target_gdbarch ()))
2602 no_shared_libraries (NULL
, from_tty
);
2604 invalidate_target_mem_regions ();
2606 target_clear_description ();
2609 agent_capability_invalidate ();
2612 /* Callback for iterate_over_inferiors. Gets rid of the given
2616 dispose_inferior (struct inferior
*inf
, void *args
)
2618 struct thread_info
*thread
;
2620 thread
= any_thread_of_process (inf
->pid
);
2623 switch_to_thread (thread
->ptid
);
2625 /* Core inferiors actually should be detached, not killed. */
2626 if (target_has_execution
)
2629 target_detach (NULL
, 0);
2635 /* This is to be called by the open routine before it does
2639 target_preopen (int from_tty
)
2643 if (have_inferiors ())
2646 || !have_live_inferiors ()
2647 || query (_("A program is being debugged already. Kill it? ")))
2648 iterate_over_inferiors (dispose_inferior
, NULL
);
2650 error (_("Program not killed."));
2653 /* Calling target_kill may remove the target from the stack. But if
2654 it doesn't (which seems like a win for UDI), remove it now. */
2655 /* Leave the exec target, though. The user may be switching from a
2656 live process to a core of the same program. */
2657 pop_all_targets_above (file_stratum
);
2659 target_pre_inferior (from_tty
);
2662 /* Detach a target after doing deferred register stores. */
2665 target_detach (const char *args
, int from_tty
)
2667 struct target_ops
* t
;
2669 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2670 /* Don't remove global breakpoints here. They're removed on
2671 disconnection from the target. */
2674 /* If we're in breakpoints-always-inserted mode, have to remove
2675 them before detaching. */
2676 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2678 prepare_for_detach ();
2680 current_target
.to_detach (¤t_target
, args
, from_tty
);
2682 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2687 target_disconnect (char *args
, int from_tty
)
2689 struct target_ops
*t
;
2691 /* If we're in breakpoints-always-inserted mode or if breakpoints
2692 are global across processes, we have to remove them before
2694 remove_breakpoints ();
2696 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2697 if (t
->to_disconnect
!= NULL
)
2700 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2702 t
->to_disconnect (t
, args
, from_tty
);
2710 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2712 struct target_ops
*t
;
2713 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2718 char *status_string
;
2719 char *options_string
;
2721 status_string
= target_waitstatus_to_string (status
);
2722 options_string
= target_options_to_string (options
);
2723 fprintf_unfiltered (gdb_stdlog
,
2724 "target_wait (%d, status, options={%s})"
2726 ptid_get_pid (ptid
), options_string
,
2727 ptid_get_pid (retval
), status_string
);
2728 xfree (status_string
);
2729 xfree (options_string
);
2736 target_pid_to_str (ptid_t ptid
)
2738 struct target_ops
*t
;
2740 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2742 if (t
->to_pid_to_str
!= NULL
)
2743 return (*t
->to_pid_to_str
) (t
, ptid
);
2746 return normal_pid_to_str (ptid
);
2750 target_thread_name (struct thread_info
*info
)
2752 struct target_ops
*t
;
2754 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2756 if (t
->to_thread_name
!= NULL
)
2757 return (*t
->to_thread_name
) (t
, info
);
2764 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2766 struct target_ops
*t
;
2768 target_dcache_invalidate ();
2770 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2772 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2773 ptid_get_pid (ptid
),
2774 step
? "step" : "continue",
2775 gdb_signal_to_name (signal
));
2777 registers_changed_ptid (ptid
);
2778 set_executing (ptid
, 1);
2779 set_running (ptid
, 1);
2780 clear_inline_frame_state (ptid
);
2784 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2786 struct target_ops
*t
;
2788 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2790 if (t
->to_pass_signals
!= NULL
)
2796 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2799 for (i
= 0; i
< numsigs
; i
++)
2800 if (pass_signals
[i
])
2801 fprintf_unfiltered (gdb_stdlog
, " %s",
2802 gdb_signal_to_name (i
));
2804 fprintf_unfiltered (gdb_stdlog
, " })\n");
2807 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2814 target_program_signals (int numsigs
, unsigned char *program_signals
)
2816 struct target_ops
*t
;
2818 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2820 if (t
->to_program_signals
!= NULL
)
2826 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2829 for (i
= 0; i
< numsigs
; i
++)
2830 if (program_signals
[i
])
2831 fprintf_unfiltered (gdb_stdlog
, " %s",
2832 gdb_signal_to_name (i
));
2834 fprintf_unfiltered (gdb_stdlog
, " })\n");
2837 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2843 /* Look through the list of possible targets for a target that can
2847 target_follow_fork (int follow_child
, int detach_fork
)
2849 struct target_ops
*t
;
2851 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2853 if (t
->to_follow_fork
!= NULL
)
2855 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2858 fprintf_unfiltered (gdb_stdlog
,
2859 "target_follow_fork (%d, %d) = %d\n",
2860 follow_child
, detach_fork
, retval
);
2865 /* Some target returned a fork event, but did not know how to follow it. */
2866 internal_error (__FILE__
, __LINE__
,
2867 _("could not find a target to follow fork"));
2871 target_mourn_inferior (void)
2873 struct target_ops
*t
;
2875 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2877 if (t
->to_mourn_inferior
!= NULL
)
2879 t
->to_mourn_inferior (t
);
2881 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2883 /* We no longer need to keep handles on any of the object files.
2884 Make sure to release them to avoid unnecessarily locking any
2885 of them while we're not actually debugging. */
2886 bfd_cache_close_all ();
2892 internal_error (__FILE__
, __LINE__
,
2893 _("could not find a target to follow mourn inferior"));
2896 /* Look for a target which can describe architectural features, starting
2897 from TARGET. If we find one, return its description. */
2899 const struct target_desc
*
2900 target_read_description (struct target_ops
*target
)
2902 struct target_ops
*t
;
2904 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2905 if (t
->to_read_description
!= NULL
)
2907 const struct target_desc
*tdesc
;
2909 tdesc
= t
->to_read_description (t
);
2917 /* The default implementation of to_search_memory.
2918 This implements a basic search of memory, reading target memory and
2919 performing the search here (as opposed to performing the search in on the
2920 target side with, for example, gdbserver). */
2923 simple_search_memory (struct target_ops
*ops
,
2924 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2925 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2926 CORE_ADDR
*found_addrp
)
2928 /* NOTE: also defined in find.c testcase. */
2929 #define SEARCH_CHUNK_SIZE 16000
2930 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2931 /* Buffer to hold memory contents for searching. */
2932 gdb_byte
*search_buf
;
2933 unsigned search_buf_size
;
2934 struct cleanup
*old_cleanups
;
2936 search_buf_size
= chunk_size
+ pattern_len
- 1;
2938 /* No point in trying to allocate a buffer larger than the search space. */
2939 if (search_space_len
< search_buf_size
)
2940 search_buf_size
= search_space_len
;
2942 search_buf
= malloc (search_buf_size
);
2943 if (search_buf
== NULL
)
2944 error (_("Unable to allocate memory to perform the search."));
2945 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2947 /* Prime the search buffer. */
2949 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2950 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2952 warning (_("Unable to access %s bytes of target "
2953 "memory at %s, halting search."),
2954 pulongest (search_buf_size
), hex_string (start_addr
));
2955 do_cleanups (old_cleanups
);
2959 /* Perform the search.
2961 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2962 When we've scanned N bytes we copy the trailing bytes to the start and
2963 read in another N bytes. */
2965 while (search_space_len
>= pattern_len
)
2967 gdb_byte
*found_ptr
;
2968 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2970 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2971 pattern
, pattern_len
);
2973 if (found_ptr
!= NULL
)
2975 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2977 *found_addrp
= found_addr
;
2978 do_cleanups (old_cleanups
);
2982 /* Not found in this chunk, skip to next chunk. */
2984 /* Don't let search_space_len wrap here, it's unsigned. */
2985 if (search_space_len
>= chunk_size
)
2986 search_space_len
-= chunk_size
;
2988 search_space_len
= 0;
2990 if (search_space_len
>= pattern_len
)
2992 unsigned keep_len
= search_buf_size
- chunk_size
;
2993 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2996 /* Copy the trailing part of the previous iteration to the front
2997 of the buffer for the next iteration. */
2998 gdb_assert (keep_len
== pattern_len
- 1);
2999 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
3001 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
3003 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3004 search_buf
+ keep_len
, read_addr
,
3005 nr_to_read
) != nr_to_read
)
3007 warning (_("Unable to access %s bytes of target "
3008 "memory at %s, halting search."),
3009 plongest (nr_to_read
),
3010 hex_string (read_addr
));
3011 do_cleanups (old_cleanups
);
3015 start_addr
+= chunk_size
;
3021 do_cleanups (old_cleanups
);
3025 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3026 sequence of bytes in PATTERN with length PATTERN_LEN.
3028 The result is 1 if found, 0 if not found, and -1 if there was an error
3029 requiring halting of the search (e.g. memory read error).
3030 If the pattern is found the address is recorded in FOUND_ADDRP. */
3033 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
3034 const gdb_byte
*pattern
, ULONGEST pattern_len
,
3035 CORE_ADDR
*found_addrp
)
3037 struct target_ops
*t
;
3040 /* We don't use INHERIT to set current_target.to_search_memory,
3041 so we have to scan the target stack and handle targetdebug
3045 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
3046 hex_string (start_addr
));
3048 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3049 if (t
->to_search_memory
!= NULL
)
3054 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
3055 pattern
, pattern_len
, found_addrp
);
3059 /* If a special version of to_search_memory isn't available, use the
3061 found
= simple_search_memory (current_target
.beneath
,
3062 start_addr
, search_space_len
,
3063 pattern
, pattern_len
, found_addrp
);
3067 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
3072 /* Look through the currently pushed targets. If none of them will
3073 be able to restart the currently running process, issue an error
3077 target_require_runnable (void)
3079 struct target_ops
*t
;
3081 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
3083 /* If this target knows how to create a new program, then
3084 assume we will still be able to after killing the current
3085 one. Either killing and mourning will not pop T, or else
3086 find_default_run_target will find it again. */
3087 if (t
->to_create_inferior
!= NULL
)
3090 /* Do not worry about thread_stratum targets that can not
3091 create inferiors. Assume they will be pushed again if
3092 necessary, and continue to the process_stratum. */
3093 if (t
->to_stratum
== thread_stratum
3094 || t
->to_stratum
== arch_stratum
)
3097 error (_("The \"%s\" target does not support \"run\". "
3098 "Try \"help target\" or \"continue\"."),
3102 /* This function is only called if the target is running. In that
3103 case there should have been a process_stratum target and it
3104 should either know how to create inferiors, or not... */
3105 internal_error (__FILE__
, __LINE__
, _("No targets found"));
3108 /* Look through the list of possible targets for a target that can
3109 execute a run or attach command without any other data. This is
3110 used to locate the default process stratum.
3112 If DO_MESG is not NULL, the result is always valid (error() is
3113 called for errors); else, return NULL on error. */
3115 static struct target_ops
*
3116 find_default_run_target (char *do_mesg
)
3118 struct target_ops
**t
;
3119 struct target_ops
*runable
= NULL
;
3124 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
3127 if ((*t
)->to_can_run
&& target_can_run (*t
))
3137 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3146 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3148 struct target_ops
*t
;
3150 t
= find_default_run_target ("attach");
3151 (t
->to_attach
) (t
, args
, from_tty
);
3156 find_default_create_inferior (struct target_ops
*ops
,
3157 char *exec_file
, char *allargs
, char **env
,
3160 struct target_ops
*t
;
3162 t
= find_default_run_target ("run");
3163 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3168 find_default_can_async_p (struct target_ops
*ignore
)
3170 struct target_ops
*t
;
3172 /* This may be called before the target is pushed on the stack;
3173 look for the default process stratum. If there's none, gdb isn't
3174 configured with a native debugger, and target remote isn't
3176 t
= find_default_run_target (NULL
);
3177 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
3178 return (t
->to_can_async_p
) (t
);
3183 find_default_is_async_p (struct target_ops
*ignore
)
3185 struct target_ops
*t
;
3187 /* This may be called before the target is pushed on the stack;
3188 look for the default process stratum. If there's none, gdb isn't
3189 configured with a native debugger, and target remote isn't
3191 t
= find_default_run_target (NULL
);
3192 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
3193 return (t
->to_is_async_p
) (t
);
3198 find_default_supports_non_stop (struct target_ops
*self
)
3200 struct target_ops
*t
;
3202 t
= find_default_run_target (NULL
);
3203 if (t
&& t
->to_supports_non_stop
)
3204 return (t
->to_supports_non_stop
) (t
);
3209 target_supports_non_stop (void)
3211 struct target_ops
*t
;
3213 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3214 if (t
->to_supports_non_stop
)
3215 return t
->to_supports_non_stop (t
);
3220 /* Implement the "info proc" command. */
3223 target_info_proc (char *args
, enum info_proc_what what
)
3225 struct target_ops
*t
;
3227 /* If we're already connected to something that can get us OS
3228 related data, use it. Otherwise, try using the native
3230 if (current_target
.to_stratum
>= process_stratum
)
3231 t
= current_target
.beneath
;
3233 t
= find_default_run_target (NULL
);
3235 for (; t
!= NULL
; t
= t
->beneath
)
3237 if (t
->to_info_proc
!= NULL
)
3239 t
->to_info_proc (t
, args
, what
);
3242 fprintf_unfiltered (gdb_stdlog
,
3243 "target_info_proc (\"%s\", %d)\n", args
, what
);
3253 find_default_supports_disable_randomization (struct target_ops
*self
)
3255 struct target_ops
*t
;
3257 t
= find_default_run_target (NULL
);
3258 if (t
&& t
->to_supports_disable_randomization
)
3259 return (t
->to_supports_disable_randomization
) (t
);
3264 target_supports_disable_randomization (void)
3266 struct target_ops
*t
;
3268 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3269 if (t
->to_supports_disable_randomization
)
3270 return t
->to_supports_disable_randomization (t
);
3276 target_get_osdata (const char *type
)
3278 struct target_ops
*t
;
3280 /* If we're already connected to something that can get us OS
3281 related data, use it. Otherwise, try using the native
3283 if (current_target
.to_stratum
>= process_stratum
)
3284 t
= current_target
.beneath
;
3286 t
= find_default_run_target ("get OS data");
3291 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3294 /* Determine the current address space of thread PTID. */
3296 struct address_space
*
3297 target_thread_address_space (ptid_t ptid
)
3299 struct address_space
*aspace
;
3300 struct inferior
*inf
;
3301 struct target_ops
*t
;
3303 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3305 if (t
->to_thread_address_space
!= NULL
)
3307 aspace
= t
->to_thread_address_space (t
, ptid
);
3308 gdb_assert (aspace
);
3311 fprintf_unfiltered (gdb_stdlog
,
3312 "target_thread_address_space (%s) = %d\n",
3313 target_pid_to_str (ptid
),
3314 address_space_num (aspace
));
3319 /* Fall-back to the "main" address space of the inferior. */
3320 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3322 if (inf
== NULL
|| inf
->aspace
== NULL
)
3323 internal_error (__FILE__
, __LINE__
,
3324 _("Can't determine the current "
3325 "address space of thread %s\n"),
3326 target_pid_to_str (ptid
));
3332 /* Target file operations. */
3334 static struct target_ops
*
3335 default_fileio_target (void)
3337 /* If we're already connected to something that can perform
3338 file I/O, use it. Otherwise, try using the native target. */
3339 if (current_target
.to_stratum
>= process_stratum
)
3340 return current_target
.beneath
;
3342 return find_default_run_target ("file I/O");
3345 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3346 target file descriptor, or -1 if an error occurs (and set
3349 target_fileio_open (const char *filename
, int flags
, int mode
,
3352 struct target_ops
*t
;
3354 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3356 if (t
->to_fileio_open
!= NULL
)
3358 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3361 fprintf_unfiltered (gdb_stdlog
,
3362 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3363 filename
, flags
, mode
,
3364 fd
, fd
!= -1 ? 0 : *target_errno
);
3369 *target_errno
= FILEIO_ENOSYS
;
3373 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3374 Return the number of bytes written, or -1 if an error occurs
3375 (and set *TARGET_ERRNO). */
3377 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3378 ULONGEST offset
, int *target_errno
)
3380 struct target_ops
*t
;
3382 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3384 if (t
->to_fileio_pwrite
!= NULL
)
3386 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3390 fprintf_unfiltered (gdb_stdlog
,
3391 "target_fileio_pwrite (%d,...,%d,%s) "
3393 fd
, len
, pulongest (offset
),
3394 ret
, ret
!= -1 ? 0 : *target_errno
);
3399 *target_errno
= FILEIO_ENOSYS
;
3403 /* Read up to LEN bytes FD on the target into READ_BUF.
3404 Return the number of bytes read, or -1 if an error occurs
3405 (and set *TARGET_ERRNO). */
3407 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3408 ULONGEST offset
, int *target_errno
)
3410 struct target_ops
*t
;
3412 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3414 if (t
->to_fileio_pread
!= NULL
)
3416 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3420 fprintf_unfiltered (gdb_stdlog
,
3421 "target_fileio_pread (%d,...,%d,%s) "
3423 fd
, len
, pulongest (offset
),
3424 ret
, ret
!= -1 ? 0 : *target_errno
);
3429 *target_errno
= FILEIO_ENOSYS
;
3433 /* Close FD on the target. Return 0, or -1 if an error occurs
3434 (and set *TARGET_ERRNO). */
3436 target_fileio_close (int fd
, int *target_errno
)
3438 struct target_ops
*t
;
3440 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3442 if (t
->to_fileio_close
!= NULL
)
3444 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3447 fprintf_unfiltered (gdb_stdlog
,
3448 "target_fileio_close (%d) = %d (%d)\n",
3449 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3454 *target_errno
= FILEIO_ENOSYS
;
3458 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3459 occurs (and set *TARGET_ERRNO). */
3461 target_fileio_unlink (const char *filename
, int *target_errno
)
3463 struct target_ops
*t
;
3465 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3467 if (t
->to_fileio_unlink
!= NULL
)
3469 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3472 fprintf_unfiltered (gdb_stdlog
,
3473 "target_fileio_unlink (%s) = %d (%d)\n",
3474 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3479 *target_errno
= FILEIO_ENOSYS
;
3483 /* Read value of symbolic link FILENAME on the target. Return a
3484 null-terminated string allocated via xmalloc, or NULL if an error
3485 occurs (and set *TARGET_ERRNO). */
3487 target_fileio_readlink (const char *filename
, int *target_errno
)
3489 struct target_ops
*t
;
3491 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3493 if (t
->to_fileio_readlink
!= NULL
)
3495 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3498 fprintf_unfiltered (gdb_stdlog
,
3499 "target_fileio_readlink (%s) = %s (%d)\n",
3500 filename
, ret
? ret
: "(nil)",
3501 ret
? 0 : *target_errno
);
3506 *target_errno
= FILEIO_ENOSYS
;
3511 target_fileio_close_cleanup (void *opaque
)
3513 int fd
= *(int *) opaque
;
3516 target_fileio_close (fd
, &target_errno
);
3519 /* Read target file FILENAME. Store the result in *BUF_P and
3520 return the size of the transferred data. PADDING additional bytes are
3521 available in *BUF_P. This is a helper function for
3522 target_fileio_read_alloc; see the declaration of that function for more
3526 target_fileio_read_alloc_1 (const char *filename
,
3527 gdb_byte
**buf_p
, int padding
)
3529 struct cleanup
*close_cleanup
;
3530 size_t buf_alloc
, buf_pos
;
3536 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3540 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3542 /* Start by reading up to 4K at a time. The target will throttle
3543 this number down if necessary. */
3545 buf
= xmalloc (buf_alloc
);
3549 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3550 buf_alloc
- buf_pos
- padding
, buf_pos
,
3554 /* An error occurred. */
3555 do_cleanups (close_cleanup
);
3561 /* Read all there was. */
3562 do_cleanups (close_cleanup
);
3572 /* If the buffer is filling up, expand it. */
3573 if (buf_alloc
< buf_pos
* 2)
3576 buf
= xrealloc (buf
, buf_alloc
);
3583 /* Read target file FILENAME. Store the result in *BUF_P and return
3584 the size of the transferred data. See the declaration in "target.h"
3585 function for more information about the return value. */
3588 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3590 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3593 /* Read target file FILENAME. The result is NUL-terminated and
3594 returned as a string, allocated using xmalloc. If an error occurs
3595 or the transfer is unsupported, NULL is returned. Empty objects
3596 are returned as allocated but empty strings. A warning is issued
3597 if the result contains any embedded NUL bytes. */
3600 target_fileio_read_stralloc (const char *filename
)
3604 LONGEST i
, transferred
;
3606 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3607 bufstr
= (char *) buffer
;
3609 if (transferred
< 0)
3612 if (transferred
== 0)
3613 return xstrdup ("");
3615 bufstr
[transferred
] = 0;
3617 /* Check for embedded NUL bytes; but allow trailing NULs. */
3618 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3621 warning (_("target file %s "
3622 "contained unexpected null characters"),
3632 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3633 CORE_ADDR addr
, int len
)
3635 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3639 default_watchpoint_addr_within_range (struct target_ops
*target
,
3641 CORE_ADDR start
, int length
)
3643 return addr
>= start
&& addr
< start
+ length
;
3646 static struct gdbarch
*
3647 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3649 return target_gdbarch ();
3665 return_minus_one (void)
3677 * Find the next target down the stack from the specified target.
3681 find_target_beneath (struct target_ops
*t
)
3689 find_target_at (enum strata stratum
)
3691 struct target_ops
*t
;
3693 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3694 if (t
->to_stratum
== stratum
)
3701 /* The inferior process has died. Long live the inferior! */
3704 generic_mourn_inferior (void)
3708 ptid
= inferior_ptid
;
3709 inferior_ptid
= null_ptid
;
3711 /* Mark breakpoints uninserted in case something tries to delete a
3712 breakpoint while we delete the inferior's threads (which would
3713 fail, since the inferior is long gone). */
3714 mark_breakpoints_out ();
3716 if (!ptid_equal (ptid
, null_ptid
))
3718 int pid
= ptid_get_pid (ptid
);
3719 exit_inferior (pid
);
3722 /* Note this wipes step-resume breakpoints, so needs to be done
3723 after exit_inferior, which ends up referencing the step-resume
3724 breakpoints through clear_thread_inferior_resources. */
3725 breakpoint_init_inferior (inf_exited
);
3727 registers_changed ();
3729 reopen_exec_file ();
3730 reinit_frame_cache ();
3732 if (deprecated_detach_hook
)
3733 deprecated_detach_hook ();
3736 /* Convert a normal process ID to a string. Returns the string in a
3740 normal_pid_to_str (ptid_t ptid
)
3742 static char buf
[32];
3744 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3749 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3751 return normal_pid_to_str (ptid
);
3754 /* Error-catcher for target_find_memory_regions. */
3756 dummy_find_memory_regions (struct target_ops
*self
,
3757 find_memory_region_ftype ignore1
, void *ignore2
)
3759 error (_("Command not implemented for this target."));
3763 /* Error-catcher for target_make_corefile_notes. */
3765 dummy_make_corefile_notes (struct target_ops
*self
,
3766 bfd
*ignore1
, int *ignore2
)
3768 error (_("Command not implemented for this target."));
3772 /* Error-catcher for target_get_bookmark. */
3774 dummy_get_bookmark (struct target_ops
*self
, char *ignore1
, int ignore2
)
3780 /* Error-catcher for target_goto_bookmark. */
3782 dummy_goto_bookmark (struct target_ops
*self
, gdb_byte
*ignore
, int from_tty
)
3787 /* Set up the handful of non-empty slots needed by the dummy target
3791 init_dummy_target (void)
3793 dummy_target
.to_shortname
= "None";
3794 dummy_target
.to_longname
= "None";
3795 dummy_target
.to_doc
= "";
3796 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3797 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3798 dummy_target
.to_supports_disable_randomization
3799 = find_default_supports_disable_randomization
;
3800 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3801 dummy_target
.to_stratum
= dummy_stratum
;
3802 dummy_target
.to_find_memory_regions
= dummy_find_memory_regions
;
3803 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3804 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3805 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3806 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3807 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3808 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3809 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3810 dummy_target
.to_has_execution
3811 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3812 dummy_target
.to_magic
= OPS_MAGIC
;
3814 install_dummy_methods (&dummy_target
);
3818 debug_to_open (char *args
, int from_tty
)
3820 debug_target
.to_open (args
, from_tty
);
3822 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3826 target_close (struct target_ops
*targ
)
3828 gdb_assert (!target_is_pushed (targ
));
3830 if (targ
->to_xclose
!= NULL
)
3831 targ
->to_xclose (targ
);
3832 else if (targ
->to_close
!= NULL
)
3833 targ
->to_close (targ
);
3836 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3840 target_attach (char *args
, int from_tty
)
3842 current_target
.to_attach (¤t_target
, args
, from_tty
);
3844 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3849 target_thread_alive (ptid_t ptid
)
3851 struct target_ops
*t
;
3853 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3855 if (t
->to_thread_alive
!= NULL
)
3859 retval
= t
->to_thread_alive (t
, ptid
);
3861 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3862 ptid_get_pid (ptid
), retval
);
3872 target_find_new_threads (void)
3874 struct target_ops
*t
;
3876 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3878 if (t
->to_find_new_threads
!= NULL
)
3880 t
->to_find_new_threads (t
);
3882 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3890 target_stop (ptid_t ptid
)
3894 warning (_("May not interrupt or stop the target, ignoring attempt"));
3898 (*current_target
.to_stop
) (¤t_target
, ptid
);
3902 debug_to_post_attach (struct target_ops
*self
, int pid
)
3904 debug_target
.to_post_attach (&debug_target
, pid
);
3906 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3909 /* Concatenate ELEM to LIST, a comma separate list, and return the
3910 result. The LIST incoming argument is released. */
3913 str_comma_list_concat_elem (char *list
, const char *elem
)
3916 return xstrdup (elem
);
3918 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3921 /* Helper for target_options_to_string. If OPT is present in
3922 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3923 Returns the new resulting string. OPT is removed from
3927 do_option (int *target_options
, char *ret
,
3928 int opt
, char *opt_str
)
3930 if ((*target_options
& opt
) != 0)
3932 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3933 *target_options
&= ~opt
;
3940 target_options_to_string (int target_options
)
3944 #define DO_TARG_OPTION(OPT) \
3945 ret = do_option (&target_options, ret, OPT, #OPT)
3947 DO_TARG_OPTION (TARGET_WNOHANG
);
3949 if (target_options
!= 0)
3950 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3958 debug_print_register (const char * func
,
3959 struct regcache
*regcache
, int regno
)
3961 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3963 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3964 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3965 && gdbarch_register_name (gdbarch
, regno
) != NULL
3966 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3967 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3968 gdbarch_register_name (gdbarch
, regno
));
3970 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3971 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3973 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3974 int i
, size
= register_size (gdbarch
, regno
);
3975 gdb_byte buf
[MAX_REGISTER_SIZE
];
3977 regcache_raw_collect (regcache
, regno
, buf
);
3978 fprintf_unfiltered (gdb_stdlog
, " = ");
3979 for (i
= 0; i
< size
; i
++)
3981 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3983 if (size
<= sizeof (LONGEST
))
3985 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3987 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3988 core_addr_to_string_nz (val
), plongest (val
));
3991 fprintf_unfiltered (gdb_stdlog
, "\n");
3995 target_fetch_registers (struct regcache
*regcache
, int regno
)
3997 struct target_ops
*t
;
3999 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4001 if (t
->to_fetch_registers
!= NULL
)
4003 t
->to_fetch_registers (t
, regcache
, regno
);
4005 debug_print_register ("target_fetch_registers", regcache
, regno
);
4012 target_store_registers (struct regcache
*regcache
, int regno
)
4014 struct target_ops
*t
;
4016 if (!may_write_registers
)
4017 error (_("Writing to registers is not allowed (regno %d)"), regno
);
4019 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
4022 debug_print_register ("target_store_registers", regcache
, regno
);
4027 target_core_of_thread (ptid_t ptid
)
4029 struct target_ops
*t
;
4031 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4033 if (t
->to_core_of_thread
!= NULL
)
4035 int retval
= t
->to_core_of_thread (t
, ptid
);
4038 fprintf_unfiltered (gdb_stdlog
,
4039 "target_core_of_thread (%d) = %d\n",
4040 ptid_get_pid (ptid
), retval
);
4049 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
4051 struct target_ops
*t
;
4053 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4055 if (t
->to_verify_memory
!= NULL
)
4057 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
4060 fprintf_unfiltered (gdb_stdlog
,
4061 "target_verify_memory (%s, %s) = %d\n",
4062 paddress (target_gdbarch (), memaddr
),
4072 /* The documentation for this function is in its prototype declaration in
4076 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4078 struct target_ops
*t
;
4080 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4081 if (t
->to_insert_mask_watchpoint
!= NULL
)
4085 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
4088 fprintf_unfiltered (gdb_stdlog
, "\
4089 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4090 core_addr_to_string (addr
),
4091 core_addr_to_string (mask
), rw
, ret
);
4099 /* The documentation for this function is in its prototype declaration in
4103 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4105 struct target_ops
*t
;
4107 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4108 if (t
->to_remove_mask_watchpoint
!= NULL
)
4112 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
4115 fprintf_unfiltered (gdb_stdlog
, "\
4116 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4117 core_addr_to_string (addr
),
4118 core_addr_to_string (mask
), rw
, ret
);
4126 /* The documentation for this function is in its prototype declaration
4130 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4132 struct target_ops
*t
;
4134 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4135 if (t
->to_masked_watch_num_registers
!= NULL
)
4136 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
4141 /* The documentation for this function is in its prototype declaration
4145 target_ranged_break_num_registers (void)
4147 struct target_ops
*t
;
4149 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4150 if (t
->to_ranged_break_num_registers
!= NULL
)
4151 return t
->to_ranged_break_num_registers (t
);
4158 struct btrace_target_info
*
4159 target_enable_btrace (ptid_t ptid
)
4161 struct target_ops
*t
;
4163 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4164 if (t
->to_enable_btrace
!= NULL
)
4165 return t
->to_enable_btrace (t
, ptid
);
4174 target_disable_btrace (struct btrace_target_info
*btinfo
)
4176 struct target_ops
*t
;
4178 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4179 if (t
->to_disable_btrace
!= NULL
)
4181 t
->to_disable_btrace (t
, btinfo
);
4191 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4193 struct target_ops
*t
;
4195 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4196 if (t
->to_teardown_btrace
!= NULL
)
4198 t
->to_teardown_btrace (t
, btinfo
);
4208 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4209 struct btrace_target_info
*btinfo
,
4210 enum btrace_read_type type
)
4212 struct target_ops
*t
;
4214 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4215 if (t
->to_read_btrace
!= NULL
)
4216 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
4219 return BTRACE_ERR_NOT_SUPPORTED
;
4225 target_stop_recording (void)
4227 struct target_ops
*t
;
4229 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4230 if (t
->to_stop_recording
!= NULL
)
4232 t
->to_stop_recording (t
);
4236 /* This is optional. */
4242 target_info_record (void)
4244 struct target_ops
*t
;
4246 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4247 if (t
->to_info_record
!= NULL
)
4249 t
->to_info_record (t
);
4259 target_save_record (const char *filename
)
4261 struct target_ops
*t
;
4263 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4264 if (t
->to_save_record
!= NULL
)
4266 t
->to_save_record (t
, filename
);
4276 target_supports_delete_record (void)
4278 struct target_ops
*t
;
4280 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4281 if (t
->to_delete_record
!= NULL
)
4290 target_delete_record (void)
4292 struct target_ops
*t
;
4294 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4295 if (t
->to_delete_record
!= NULL
)
4297 t
->to_delete_record (t
);
4307 target_record_is_replaying (void)
4309 struct target_ops
*t
;
4311 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4312 if (t
->to_record_is_replaying
!= NULL
)
4313 return t
->to_record_is_replaying (t
);
4321 target_goto_record_begin (void)
4323 struct target_ops
*t
;
4325 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4326 if (t
->to_goto_record_begin
!= NULL
)
4328 t
->to_goto_record_begin (t
);
4338 target_goto_record_end (void)
4340 struct target_ops
*t
;
4342 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4343 if (t
->to_goto_record_end
!= NULL
)
4345 t
->to_goto_record_end (t
);
4355 target_goto_record (ULONGEST insn
)
4357 struct target_ops
*t
;
4359 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4360 if (t
->to_goto_record
!= NULL
)
4362 t
->to_goto_record (t
, insn
);
4372 target_insn_history (int size
, int flags
)
4374 struct target_ops
*t
;
4376 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4377 if (t
->to_insn_history
!= NULL
)
4379 t
->to_insn_history (t
, size
, flags
);
4389 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4391 struct target_ops
*t
;
4393 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4394 if (t
->to_insn_history_from
!= NULL
)
4396 t
->to_insn_history_from (t
, from
, size
, flags
);
4406 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4408 struct target_ops
*t
;
4410 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4411 if (t
->to_insn_history_range
!= NULL
)
4413 t
->to_insn_history_range (t
, begin
, end
, flags
);
4423 target_call_history (int size
, int flags
)
4425 struct target_ops
*t
;
4427 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4428 if (t
->to_call_history
!= NULL
)
4430 t
->to_call_history (t
, size
, flags
);
4440 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4442 struct target_ops
*t
;
4444 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4445 if (t
->to_call_history_from
!= NULL
)
4447 t
->to_call_history_from (t
, begin
, size
, flags
);
4457 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4459 struct target_ops
*t
;
4461 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4462 if (t
->to_call_history_range
!= NULL
)
4464 t
->to_call_history_range (t
, begin
, end
, flags
);
4472 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4474 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4476 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4481 const struct frame_unwind
*
4482 target_get_unwinder (void)
4484 struct target_ops
*t
;
4486 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4487 if (t
->to_get_unwinder
!= NULL
)
4488 return t
->to_get_unwinder
;
4495 const struct frame_unwind
*
4496 target_get_tailcall_unwinder (void)
4498 struct target_ops
*t
;
4500 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4501 if (t
->to_get_tailcall_unwinder
!= NULL
)
4502 return t
->to_get_tailcall_unwinder
;
4510 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4511 struct gdbarch
*gdbarch
)
4513 for (; ops
!= NULL
; ops
= ops
->beneath
)
4514 if (ops
->to_decr_pc_after_break
!= NULL
)
4515 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4517 return gdbarch_decr_pc_after_break (gdbarch
);
4523 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4525 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4529 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4530 int write
, struct mem_attrib
*attrib
,
4531 struct target_ops
*target
)
4535 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4538 fprintf_unfiltered (gdb_stdlog
,
4539 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4540 paddress (target_gdbarch (), memaddr
), len
,
4541 write
? "write" : "read", retval
);
4547 fputs_unfiltered (", bytes =", gdb_stdlog
);
4548 for (i
= 0; i
< retval
; i
++)
4550 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4552 if (targetdebug
< 2 && i
> 0)
4554 fprintf_unfiltered (gdb_stdlog
, " ...");
4557 fprintf_unfiltered (gdb_stdlog
, "\n");
4560 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4564 fputc_unfiltered ('\n', gdb_stdlog
);
4570 debug_to_files_info (struct target_ops
*target
)
4572 debug_target
.to_files_info (target
);
4574 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4578 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4579 struct bp_target_info
*bp_tgt
)
4583 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4585 fprintf_unfiltered (gdb_stdlog
,
4586 "target_insert_breakpoint (%s, xxx) = %ld\n",
4587 core_addr_to_string (bp_tgt
->placed_address
),
4588 (unsigned long) retval
);
4593 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4594 struct bp_target_info
*bp_tgt
)
4598 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4600 fprintf_unfiltered (gdb_stdlog
,
4601 "target_remove_breakpoint (%s, xxx) = %ld\n",
4602 core_addr_to_string (bp_tgt
->placed_address
),
4603 (unsigned long) retval
);
4608 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4609 int type
, int cnt
, int from_tty
)
4613 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4614 type
, cnt
, from_tty
);
4616 fprintf_unfiltered (gdb_stdlog
,
4617 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4618 (unsigned long) type
,
4619 (unsigned long) cnt
,
4620 (unsigned long) from_tty
,
4621 (unsigned long) retval
);
4626 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4627 CORE_ADDR addr
, int len
)
4631 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4634 fprintf_unfiltered (gdb_stdlog
,
4635 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4636 core_addr_to_string (addr
), (unsigned long) len
,
4637 core_addr_to_string (retval
));
4642 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4643 CORE_ADDR addr
, int len
, int rw
,
4644 struct expression
*cond
)
4648 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4652 fprintf_unfiltered (gdb_stdlog
,
4653 "target_can_accel_watchpoint_condition "
4654 "(%s, %d, %d, %s) = %ld\n",
4655 core_addr_to_string (addr
), len
, rw
,
4656 host_address_to_string (cond
), (unsigned long) retval
);
4661 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4665 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4667 fprintf_unfiltered (gdb_stdlog
,
4668 "target_stopped_by_watchpoint () = %ld\n",
4669 (unsigned long) retval
);
4674 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4678 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4680 fprintf_unfiltered (gdb_stdlog
,
4681 "target_stopped_data_address ([%s]) = %ld\n",
4682 core_addr_to_string (*addr
),
4683 (unsigned long)retval
);
4688 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4690 CORE_ADDR start
, int length
)
4694 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4697 fprintf_filtered (gdb_stdlog
,
4698 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4699 core_addr_to_string (addr
), core_addr_to_string (start
),
4705 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4706 struct gdbarch
*gdbarch
,
4707 struct bp_target_info
*bp_tgt
)
4711 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4714 fprintf_unfiltered (gdb_stdlog
,
4715 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4716 core_addr_to_string (bp_tgt
->placed_address
),
4717 (unsigned long) retval
);
4722 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4723 struct gdbarch
*gdbarch
,
4724 struct bp_target_info
*bp_tgt
)
4728 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4731 fprintf_unfiltered (gdb_stdlog
,
4732 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4733 core_addr_to_string (bp_tgt
->placed_address
),
4734 (unsigned long) retval
);
4739 debug_to_insert_watchpoint (struct target_ops
*self
,
4740 CORE_ADDR addr
, int len
, int type
,
4741 struct expression
*cond
)
4745 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4746 addr
, len
, type
, cond
);
4748 fprintf_unfiltered (gdb_stdlog
,
4749 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4750 core_addr_to_string (addr
), len
, type
,
4751 host_address_to_string (cond
), (unsigned long) retval
);
4756 debug_to_remove_watchpoint (struct target_ops
*self
,
4757 CORE_ADDR addr
, int len
, int type
,
4758 struct expression
*cond
)
4762 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4763 addr
, len
, type
, cond
);
4765 fprintf_unfiltered (gdb_stdlog
,
4766 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4767 core_addr_to_string (addr
), len
, type
,
4768 host_address_to_string (cond
), (unsigned long) retval
);
4773 debug_to_terminal_init (struct target_ops
*self
)
4775 debug_target
.to_terminal_init (&debug_target
);
4777 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4781 debug_to_terminal_inferior (struct target_ops
*self
)
4783 debug_target
.to_terminal_inferior (&debug_target
);
4785 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4789 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4791 debug_target
.to_terminal_ours_for_output (&debug_target
);
4793 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4797 debug_to_terminal_ours (struct target_ops
*self
)
4799 debug_target
.to_terminal_ours (&debug_target
);
4801 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4805 debug_to_terminal_save_ours (struct target_ops
*self
)
4807 debug_target
.to_terminal_save_ours (&debug_target
);
4809 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4813 debug_to_terminal_info (struct target_ops
*self
,
4814 const char *arg
, int from_tty
)
4816 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4818 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4823 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4825 debug_target
.to_load (&debug_target
, args
, from_tty
);
4827 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4831 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4833 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4835 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4836 ptid_get_pid (ptid
));
4840 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4844 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4846 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4853 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4857 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4859 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4866 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4870 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4872 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4879 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4883 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4885 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4892 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4896 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4898 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4905 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4909 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4911 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4918 debug_to_has_exited (struct target_ops
*self
,
4919 int pid
, int wait_status
, int *exit_status
)
4923 has_exited
= debug_target
.to_has_exited (&debug_target
,
4924 pid
, wait_status
, exit_status
);
4926 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4927 pid
, wait_status
, *exit_status
, has_exited
);
4933 debug_to_can_run (struct target_ops
*self
)
4937 retval
= debug_target
.to_can_run (&debug_target
);
4939 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4944 static struct gdbarch
*
4945 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4947 struct gdbarch
*retval
;
4949 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4951 fprintf_unfiltered (gdb_stdlog
,
4952 "target_thread_architecture (%s) = %s [%s]\n",
4953 target_pid_to_str (ptid
),
4954 host_address_to_string (retval
),
4955 gdbarch_bfd_arch_info (retval
)->printable_name
);
4960 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4962 debug_target
.to_stop (&debug_target
, ptid
);
4964 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4965 target_pid_to_str (ptid
));
4969 debug_to_rcmd (struct target_ops
*self
, char *command
,
4970 struct ui_file
*outbuf
)
4972 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4973 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4977 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4981 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4983 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4990 setup_target_debug (void)
4992 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4994 current_target
.to_open
= debug_to_open
;
4995 current_target
.to_post_attach
= debug_to_post_attach
;
4996 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4997 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4998 current_target
.to_files_info
= debug_to_files_info
;
4999 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
5000 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
5001 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
5002 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
5003 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
5004 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
5005 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
5006 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
5007 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
5008 current_target
.to_watchpoint_addr_within_range
5009 = debug_to_watchpoint_addr_within_range
;
5010 current_target
.to_region_ok_for_hw_watchpoint
5011 = debug_to_region_ok_for_hw_watchpoint
;
5012 current_target
.to_can_accel_watchpoint_condition
5013 = debug_to_can_accel_watchpoint_condition
;
5014 current_target
.to_terminal_init
= debug_to_terminal_init
;
5015 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
5016 current_target
.to_terminal_ours_for_output
5017 = debug_to_terminal_ours_for_output
;
5018 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
5019 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
5020 current_target
.to_terminal_info
= debug_to_terminal_info
;
5021 current_target
.to_load
= debug_to_load
;
5022 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
5023 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
5024 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
5025 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
5026 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
5027 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
5028 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
5029 current_target
.to_has_exited
= debug_to_has_exited
;
5030 current_target
.to_can_run
= debug_to_can_run
;
5031 current_target
.to_stop
= debug_to_stop
;
5032 current_target
.to_rcmd
= debug_to_rcmd
;
5033 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
5034 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
5038 static char targ_desc
[] =
5039 "Names of targets and files being debugged.\nShows the entire \
5040 stack of targets currently in use (including the exec-file,\n\
5041 core-file, and process, if any), as well as the symbol file name.";
5044 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
5046 error (_("\"monitor\" command not supported by this target."));
5050 do_monitor_command (char *cmd
,
5053 target_rcmd (cmd
, gdb_stdtarg
);
5056 /* Print the name of each layers of our target stack. */
5059 maintenance_print_target_stack (char *cmd
, int from_tty
)
5061 struct target_ops
*t
;
5063 printf_filtered (_("The current target stack is:\n"));
5065 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
5067 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
5071 /* Controls if async mode is permitted. */
5072 int target_async_permitted
= 0;
5074 /* The set command writes to this variable. If the inferior is
5075 executing, target_async_permitted is *not* updated. */
5076 static int target_async_permitted_1
= 0;
5079 set_target_async_command (char *args
, int from_tty
,
5080 struct cmd_list_element
*c
)
5082 if (have_live_inferiors ())
5084 target_async_permitted_1
= target_async_permitted
;
5085 error (_("Cannot change this setting while the inferior is running."));
5088 target_async_permitted
= target_async_permitted_1
;
5092 show_target_async_command (struct ui_file
*file
, int from_tty
,
5093 struct cmd_list_element
*c
,
5096 fprintf_filtered (file
,
5097 _("Controlling the inferior in "
5098 "asynchronous mode is %s.\n"), value
);
5101 /* Temporary copies of permission settings. */
5103 static int may_write_registers_1
= 1;
5104 static int may_write_memory_1
= 1;
5105 static int may_insert_breakpoints_1
= 1;
5106 static int may_insert_tracepoints_1
= 1;
5107 static int may_insert_fast_tracepoints_1
= 1;
5108 static int may_stop_1
= 1;
5110 /* Make the user-set values match the real values again. */
5113 update_target_permissions (void)
5115 may_write_registers_1
= may_write_registers
;
5116 may_write_memory_1
= may_write_memory
;
5117 may_insert_breakpoints_1
= may_insert_breakpoints
;
5118 may_insert_tracepoints_1
= may_insert_tracepoints
;
5119 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
5120 may_stop_1
= may_stop
;
5123 /* The one function handles (most of) the permission flags in the same
5127 set_target_permissions (char *args
, int from_tty
,
5128 struct cmd_list_element
*c
)
5130 if (target_has_execution
)
5132 update_target_permissions ();
5133 error (_("Cannot change this setting while the inferior is running."));
5136 /* Make the real values match the user-changed values. */
5137 may_write_registers
= may_write_registers_1
;
5138 may_insert_breakpoints
= may_insert_breakpoints_1
;
5139 may_insert_tracepoints
= may_insert_tracepoints_1
;
5140 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
5141 may_stop
= may_stop_1
;
5142 update_observer_mode ();
5145 /* Set memory write permission independently of observer mode. */
5148 set_write_memory_permission (char *args
, int from_tty
,
5149 struct cmd_list_element
*c
)
5151 /* Make the real values match the user-changed values. */
5152 may_write_memory
= may_write_memory_1
;
5153 update_observer_mode ();
5158 initialize_targets (void)
5160 init_dummy_target ();
5161 push_target (&dummy_target
);
5163 add_info ("target", target_info
, targ_desc
);
5164 add_info ("files", target_info
, targ_desc
);
5166 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
5167 Set target debugging."), _("\
5168 Show target debugging."), _("\
5169 When non-zero, target debugging is enabled. Higher numbers are more\n\
5170 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5174 &setdebuglist
, &showdebuglist
);
5176 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
5177 &trust_readonly
, _("\
5178 Set mode for reading from readonly sections."), _("\
5179 Show mode for reading from readonly sections."), _("\
5180 When this mode is on, memory reads from readonly sections (such as .text)\n\
5181 will be read from the object file instead of from the target. This will\n\
5182 result in significant performance improvement for remote targets."),
5184 show_trust_readonly
,
5185 &setlist
, &showlist
);
5187 add_com ("monitor", class_obscure
, do_monitor_command
,
5188 _("Send a command to the remote monitor (remote targets only)."));
5190 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5191 _("Print the name of each layer of the internal target stack."),
5192 &maintenanceprintlist
);
5194 add_setshow_boolean_cmd ("target-async", no_class
,
5195 &target_async_permitted_1
, _("\
5196 Set whether gdb controls the inferior in asynchronous mode."), _("\
5197 Show whether gdb controls the inferior in asynchronous mode."), _("\
5198 Tells gdb whether to control the inferior in asynchronous mode."),
5199 set_target_async_command
,
5200 show_target_async_command
,
5204 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5205 &may_write_registers_1
, _("\
5206 Set permission to write into registers."), _("\
5207 Show permission to write into registers."), _("\
5208 When this permission is on, GDB may write into the target's registers.\n\
5209 Otherwise, any sort of write attempt will result in an error."),
5210 set_target_permissions
, NULL
,
5211 &setlist
, &showlist
);
5213 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5214 &may_write_memory_1
, _("\
5215 Set permission to write into target memory."), _("\
5216 Show permission to write into target memory."), _("\
5217 When this permission is on, GDB may write into the target's memory.\n\
5218 Otherwise, any sort of write attempt will result in an error."),
5219 set_write_memory_permission
, NULL
,
5220 &setlist
, &showlist
);
5222 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5223 &may_insert_breakpoints_1
, _("\
5224 Set permission to insert breakpoints in the target."), _("\
5225 Show permission to insert breakpoints in the target."), _("\
5226 When this permission is on, GDB may insert breakpoints in the program.\n\
5227 Otherwise, any sort of insertion attempt will result in an error."),
5228 set_target_permissions
, NULL
,
5229 &setlist
, &showlist
);
5231 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5232 &may_insert_tracepoints_1
, _("\
5233 Set permission to insert tracepoints in the target."), _("\
5234 Show permission to insert tracepoints in the target."), _("\
5235 When this permission is on, GDB may insert tracepoints in the program.\n\
5236 Otherwise, any sort of insertion attempt will result in an error."),
5237 set_target_permissions
, NULL
,
5238 &setlist
, &showlist
);
5240 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5241 &may_insert_fast_tracepoints_1
, _("\
5242 Set permission to insert fast tracepoints in the target."), _("\
5243 Show permission to insert fast tracepoints in the target."), _("\
5244 When this permission is on, GDB may insert fast tracepoints.\n\
5245 Otherwise, any sort of insertion attempt will result in an error."),
5246 set_target_permissions
, NULL
,
5247 &setlist
, &showlist
);
5249 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5251 Set permission to interrupt or signal the target."), _("\
5252 Show permission to interrupt or signal the target."), _("\
5253 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5254 Otherwise, any attempt to interrupt or stop will be ignored."),
5255 set_target_permissions
, NULL
,
5256 &setlist
, &showlist
);