1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR
, int);
57 static void tcomplain (void) ATTRIBUTE_NORETURN
;
59 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
61 static int return_zero (void);
63 static int return_one (void);
65 static int return_minus_one (void);
67 static void *return_null (void);
69 void target_ignore (void);
71 static void target_command (char *, int);
73 static struct target_ops
*find_default_run_target (char *);
75 static target_xfer_partial_ftype default_xfer_partial
;
77 static target_xfer_partial_ftype current_xfer_partial
;
79 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
82 static void init_dummy_target (void);
84 static struct target_ops debug_target
;
86 static void debug_to_open (char *, int);
88 static void debug_to_prepare_to_store (struct target_ops
*self
,
91 static void debug_to_files_info (struct target_ops
*);
93 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
94 struct bp_target_info
*);
96 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
97 struct bp_target_info
*);
99 static int debug_to_can_use_hw_breakpoint (int, int, int);
101 static int debug_to_insert_hw_breakpoint (struct gdbarch
*,
102 struct bp_target_info
*);
104 static int debug_to_remove_hw_breakpoint (struct gdbarch
*,
105 struct bp_target_info
*);
107 static int debug_to_insert_watchpoint (CORE_ADDR
, int, int,
108 struct expression
*);
110 static int debug_to_remove_watchpoint (CORE_ADDR
, int, int,
111 struct expression
*);
113 static int debug_to_stopped_by_watchpoint (void);
115 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
117 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
118 CORE_ADDR
, CORE_ADDR
, int);
120 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR
, int);
122 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR
, int, int,
123 struct expression
*);
125 static void debug_to_terminal_init (void);
127 static void debug_to_terminal_inferior (void);
129 static void debug_to_terminal_ours_for_output (void);
131 static void debug_to_terminal_save_ours (void);
133 static void debug_to_terminal_ours (void);
135 static void debug_to_load (char *, int);
137 static int debug_to_can_run (void);
139 static void debug_to_stop (ptid_t
);
141 /* Pointer to array of target architecture structures; the size of the
142 array; the current index into the array; the allocated size of the
144 struct target_ops
**target_structs
;
145 unsigned target_struct_size
;
146 unsigned target_struct_allocsize
;
147 #define DEFAULT_ALLOCSIZE 10
149 /* The initial current target, so that there is always a semi-valid
152 static struct target_ops dummy_target
;
154 /* Top of target stack. */
156 static struct target_ops
*target_stack
;
158 /* The target structure we are currently using to talk to a process
159 or file or whatever "inferior" we have. */
161 struct target_ops current_target
;
163 /* Command list for target. */
165 static struct cmd_list_element
*targetlist
= NULL
;
167 /* Nonzero if we should trust readonly sections from the
168 executable when reading memory. */
170 static int trust_readonly
= 0;
172 /* Nonzero if we should show true memory content including
173 memory breakpoint inserted by gdb. */
175 static int show_memory_breakpoints
= 0;
177 /* These globals control whether GDB attempts to perform these
178 operations; they are useful for targets that need to prevent
179 inadvertant disruption, such as in non-stop mode. */
181 int may_write_registers
= 1;
183 int may_write_memory
= 1;
185 int may_insert_breakpoints
= 1;
187 int may_insert_tracepoints
= 1;
189 int may_insert_fast_tracepoints
= 1;
193 /* Non-zero if we want to see trace of target level stuff. */
195 static unsigned int targetdebug
= 0;
197 show_targetdebug (struct ui_file
*file
, int from_tty
,
198 struct cmd_list_element
*c
, const char *value
)
200 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
203 static void setup_target_debug (void);
205 /* The user just typed 'target' without the name of a target. */
208 target_command (char *arg
, int from_tty
)
210 fputs_filtered ("Argument required (target name). Try `help target'\n",
214 /* Default target_has_* methods for process_stratum targets. */
217 default_child_has_all_memory (struct target_ops
*ops
)
219 /* If no inferior selected, then we can't read memory here. */
220 if (ptid_equal (inferior_ptid
, null_ptid
))
227 default_child_has_memory (struct target_ops
*ops
)
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid
, null_ptid
))
237 default_child_has_stack (struct target_ops
*ops
)
239 /* If no inferior selected, there's no stack. */
240 if (ptid_equal (inferior_ptid
, null_ptid
))
247 default_child_has_registers (struct target_ops
*ops
)
249 /* Can't read registers from no inferior. */
250 if (ptid_equal (inferior_ptid
, null_ptid
))
257 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
259 /* If there's no thread selected, then we can't make it run through
261 if (ptid_equal (the_ptid
, null_ptid
))
269 target_has_all_memory_1 (void)
271 struct target_ops
*t
;
273 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
274 if (t
->to_has_all_memory (t
))
281 target_has_memory_1 (void)
283 struct target_ops
*t
;
285 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
286 if (t
->to_has_memory (t
))
293 target_has_stack_1 (void)
295 struct target_ops
*t
;
297 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
298 if (t
->to_has_stack (t
))
305 target_has_registers_1 (void)
307 struct target_ops
*t
;
309 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
310 if (t
->to_has_registers (t
))
317 target_has_execution_1 (ptid_t the_ptid
)
319 struct target_ops
*t
;
321 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
322 if (t
->to_has_execution (t
, the_ptid
))
329 target_has_execution_current (void)
331 return target_has_execution_1 (inferior_ptid
);
334 /* Complete initialization of T. This ensures that various fields in
335 T are set, if needed by the target implementation. */
338 complete_target_initialization (struct target_ops
*t
)
340 /* Provide default values for all "must have" methods. */
341 if (t
->to_xfer_partial
== NULL
)
342 t
->to_xfer_partial
= default_xfer_partial
;
344 if (t
->to_has_all_memory
== NULL
)
345 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
347 if (t
->to_has_memory
== NULL
)
348 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
350 if (t
->to_has_stack
== NULL
)
351 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
353 if (t
->to_has_registers
== NULL
)
354 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
356 if (t
->to_has_execution
== NULL
)
357 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
360 /* Add possible target architecture T to the list and add a new
361 command 'target T->to_shortname'. Set COMPLETER as the command's
362 completer if not NULL. */
365 add_target_with_completer (struct target_ops
*t
,
366 completer_ftype
*completer
)
368 struct cmd_list_element
*c
;
370 complete_target_initialization (t
);
374 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
375 target_structs
= (struct target_ops
**) xmalloc
376 (target_struct_allocsize
* sizeof (*target_structs
));
378 if (target_struct_size
>= target_struct_allocsize
)
380 target_struct_allocsize
*= 2;
381 target_structs
= (struct target_ops
**)
382 xrealloc ((char *) target_structs
,
383 target_struct_allocsize
* sizeof (*target_structs
));
385 target_structs
[target_struct_size
++] = t
;
387 if (targetlist
== NULL
)
388 add_prefix_cmd ("target", class_run
, target_command
, _("\
389 Connect to a target machine or process.\n\
390 The first argument is the type or protocol of the target machine.\n\
391 Remaining arguments are interpreted by the target protocol. For more\n\
392 information on the arguments for a particular protocol, type\n\
393 `help target ' followed by the protocol name."),
394 &targetlist
, "target ", 0, &cmdlist
);
395 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
397 if (completer
!= NULL
)
398 set_cmd_completer (c
, completer
);
401 /* Add a possible target architecture to the list. */
404 add_target (struct target_ops
*t
)
406 add_target_with_completer (t
, NULL
);
412 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
414 struct cmd_list_element
*c
;
417 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
419 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
420 alt
= xstrprintf ("target %s", t
->to_shortname
);
421 deprecate_cmd (c
, alt
);
434 struct target_ops
*t
;
436 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
437 if (t
->to_kill
!= NULL
)
440 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
450 target_load (char *arg
, int from_tty
)
452 target_dcache_invalidate ();
453 (*current_target
.to_load
) (arg
, from_tty
);
457 target_create_inferior (char *exec_file
, char *args
,
458 char **env
, int from_tty
)
460 struct target_ops
*t
;
462 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
464 if (t
->to_create_inferior
!= NULL
)
466 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
468 fprintf_unfiltered (gdb_stdlog
,
469 "target_create_inferior (%s, %s, xxx, %d)\n",
470 exec_file
, args
, from_tty
);
475 internal_error (__FILE__
, __LINE__
,
476 _("could not find a target to create inferior"));
480 target_terminal_inferior (void)
482 /* A background resume (``run&'') should leave GDB in control of the
483 terminal. Use target_can_async_p, not target_is_async_p, since at
484 this point the target is not async yet. However, if sync_execution
485 is not set, we know it will become async prior to resume. */
486 if (target_can_async_p () && !sync_execution
)
489 /* If GDB is resuming the inferior in the foreground, install
490 inferior's terminal modes. */
491 (*current_target
.to_terminal_inferior
) ();
495 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
496 struct target_ops
*t
)
498 errno
= EIO
; /* Can't read/write this location. */
499 return 0; /* No bytes handled. */
505 error (_("You can't do that when your target is `%s'"),
506 current_target
.to_shortname
);
512 error (_("You can't do that without a process to debug."));
516 default_terminal_info (const char *args
, int from_tty
)
518 printf_unfiltered (_("No saved terminal information.\n"));
521 /* A default implementation for the to_get_ada_task_ptid target method.
523 This function builds the PTID by using both LWP and TID as part of
524 the PTID lwp and tid elements. The pid used is the pid of the
528 default_get_ada_task_ptid (long lwp
, long tid
)
530 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
533 static enum exec_direction_kind
534 default_execution_direction (void)
536 if (!target_can_execute_reverse
)
538 else if (!target_can_async_p ())
541 gdb_assert_not_reached ("\
542 to_execution_direction must be implemented for reverse async");
545 /* Go through the target stack from top to bottom, copying over zero
546 entries in current_target, then filling in still empty entries. In
547 effect, we are doing class inheritance through the pushed target
550 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
551 is currently implemented, is that it discards any knowledge of
552 which target an inherited method originally belonged to.
553 Consequently, new new target methods should instead explicitly and
554 locally search the target stack for the target that can handle the
558 update_current_target (void)
560 struct target_ops
*t
;
562 /* First, reset current's contents. */
563 memset (¤t_target
, 0, sizeof (current_target
));
565 #define INHERIT(FIELD, TARGET) \
566 if (!current_target.FIELD) \
567 current_target.FIELD = (TARGET)->FIELD
569 for (t
= target_stack
; t
; t
= t
->beneath
)
571 INHERIT (to_shortname
, t
);
572 INHERIT (to_longname
, t
);
574 /* Do not inherit to_open. */
575 /* Do not inherit to_close. */
576 /* Do not inherit to_attach. */
577 INHERIT (to_post_attach
, t
);
578 INHERIT (to_attach_no_wait
, t
);
579 /* Do not inherit to_detach. */
580 /* Do not inherit to_disconnect. */
581 /* Do not inherit to_resume. */
582 /* Do not inherit to_wait. */
583 /* Do not inherit to_fetch_registers. */
584 /* Do not inherit to_store_registers. */
585 INHERIT (to_prepare_to_store
, t
);
586 INHERIT (deprecated_xfer_memory
, t
);
587 INHERIT (to_files_info
, t
);
588 /* Do not inherit to_insert_breakpoint. */
589 /* Do not inherit to_remove_breakpoint. */
590 INHERIT (to_can_use_hw_breakpoint
, t
);
591 INHERIT (to_insert_hw_breakpoint
, t
);
592 INHERIT (to_remove_hw_breakpoint
, t
);
593 /* Do not inherit to_ranged_break_num_registers. */
594 INHERIT (to_insert_watchpoint
, t
);
595 INHERIT (to_remove_watchpoint
, t
);
596 /* Do not inherit to_insert_mask_watchpoint. */
597 /* Do not inherit to_remove_mask_watchpoint. */
598 INHERIT (to_stopped_data_address
, t
);
599 INHERIT (to_have_steppable_watchpoint
, t
);
600 INHERIT (to_have_continuable_watchpoint
, t
);
601 INHERIT (to_stopped_by_watchpoint
, t
);
602 INHERIT (to_watchpoint_addr_within_range
, t
);
603 INHERIT (to_region_ok_for_hw_watchpoint
, t
);
604 INHERIT (to_can_accel_watchpoint_condition
, t
);
605 /* Do not inherit to_masked_watch_num_registers. */
606 INHERIT (to_terminal_init
, t
);
607 INHERIT (to_terminal_inferior
, t
);
608 INHERIT (to_terminal_ours_for_output
, t
);
609 INHERIT (to_terminal_ours
, t
);
610 INHERIT (to_terminal_save_ours
, t
);
611 INHERIT (to_terminal_info
, t
);
612 /* Do not inherit to_kill. */
613 INHERIT (to_load
, t
);
614 /* Do no inherit to_create_inferior. */
615 INHERIT (to_post_startup_inferior
, t
);
616 INHERIT (to_insert_fork_catchpoint
, t
);
617 INHERIT (to_remove_fork_catchpoint
, t
);
618 INHERIT (to_insert_vfork_catchpoint
, t
);
619 INHERIT (to_remove_vfork_catchpoint
, t
);
620 /* Do not inherit to_follow_fork. */
621 INHERIT (to_insert_exec_catchpoint
, t
);
622 INHERIT (to_remove_exec_catchpoint
, t
);
623 INHERIT (to_set_syscall_catchpoint
, t
);
624 INHERIT (to_has_exited
, t
);
625 /* Do not inherit to_mourn_inferior. */
626 INHERIT (to_can_run
, t
);
627 /* Do not inherit to_pass_signals. */
628 /* Do not inherit to_program_signals. */
629 /* Do not inherit to_thread_alive. */
630 /* Do not inherit to_find_new_threads. */
631 /* Do not inherit to_pid_to_str. */
632 INHERIT (to_extra_thread_info
, t
);
633 INHERIT (to_thread_name
, t
);
634 INHERIT (to_stop
, t
);
635 /* Do not inherit to_xfer_partial. */
636 INHERIT (to_rcmd
, t
);
637 INHERIT (to_pid_to_exec_file
, t
);
638 INHERIT (to_log_command
, t
);
639 INHERIT (to_stratum
, t
);
640 /* Do not inherit to_has_all_memory. */
641 /* Do not inherit to_has_memory. */
642 /* Do not inherit to_has_stack. */
643 /* Do not inherit to_has_registers. */
644 /* Do not inherit to_has_execution. */
645 INHERIT (to_has_thread_control
, t
);
646 INHERIT (to_can_async_p
, t
);
647 INHERIT (to_is_async_p
, t
);
648 INHERIT (to_async
, t
);
649 INHERIT (to_find_memory_regions
, t
);
650 INHERIT (to_make_corefile_notes
, t
);
651 INHERIT (to_get_bookmark
, t
);
652 INHERIT (to_goto_bookmark
, t
);
653 /* Do not inherit to_get_thread_local_address. */
654 INHERIT (to_can_execute_reverse
, t
);
655 INHERIT (to_execution_direction
, t
);
656 INHERIT (to_thread_architecture
, t
);
657 /* Do not inherit to_read_description. */
658 INHERIT (to_get_ada_task_ptid
, t
);
659 /* Do not inherit to_search_memory. */
660 INHERIT (to_supports_multi_process
, t
);
661 INHERIT (to_supports_enable_disable_tracepoint
, t
);
662 INHERIT (to_supports_string_tracing
, t
);
663 INHERIT (to_trace_init
, t
);
664 INHERIT (to_download_tracepoint
, t
);
665 INHERIT (to_can_download_tracepoint
, t
);
666 INHERIT (to_download_trace_state_variable
, t
);
667 INHERIT (to_enable_tracepoint
, t
);
668 INHERIT (to_disable_tracepoint
, t
);
669 INHERIT (to_trace_set_readonly_regions
, t
);
670 INHERIT (to_trace_start
, t
);
671 INHERIT (to_get_trace_status
, t
);
672 INHERIT (to_get_tracepoint_status
, t
);
673 INHERIT (to_trace_stop
, t
);
674 INHERIT (to_trace_find
, t
);
675 INHERIT (to_get_trace_state_variable_value
, t
);
676 INHERIT (to_save_trace_data
, t
);
677 INHERIT (to_upload_tracepoints
, t
);
678 INHERIT (to_upload_trace_state_variables
, t
);
679 INHERIT (to_get_raw_trace_data
, t
);
680 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
681 INHERIT (to_set_disconnected_tracing
, t
);
682 INHERIT (to_set_circular_trace_buffer
, t
);
683 INHERIT (to_set_trace_buffer_size
, t
);
684 INHERIT (to_set_trace_notes
, t
);
685 INHERIT (to_get_tib_address
, t
);
686 INHERIT (to_set_permissions
, t
);
687 INHERIT (to_static_tracepoint_marker_at
, t
);
688 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
689 INHERIT (to_traceframe_info
, t
);
690 INHERIT (to_use_agent
, t
);
691 INHERIT (to_can_use_agent
, t
);
692 INHERIT (to_augmented_libraries_svr4_read
, t
);
693 INHERIT (to_magic
, t
);
694 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
695 INHERIT (to_can_run_breakpoint_commands
, t
);
696 /* Do not inherit to_memory_map. */
697 /* Do not inherit to_flash_erase. */
698 /* Do not inherit to_flash_done. */
702 /* Clean up a target struct so it no longer has any zero pointers in
703 it. Some entries are defaulted to a method that print an error,
704 others are hard-wired to a standard recursive default. */
706 #define de_fault(field, value) \
707 if (!current_target.field) \
708 current_target.field = value
711 (void (*) (char *, int))
716 de_fault (to_post_attach
,
719 de_fault (to_prepare_to_store
,
720 (void (*) (struct target_ops
*, struct regcache
*))
722 de_fault (deprecated_xfer_memory
,
723 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
724 struct mem_attrib
*, struct target_ops
*))
726 de_fault (to_files_info
,
727 (void (*) (struct target_ops
*))
729 de_fault (to_can_use_hw_breakpoint
,
730 (int (*) (int, int, int))
732 de_fault (to_insert_hw_breakpoint
,
733 (int (*) (struct gdbarch
*, struct bp_target_info
*))
735 de_fault (to_remove_hw_breakpoint
,
736 (int (*) (struct gdbarch
*, struct bp_target_info
*))
738 de_fault (to_insert_watchpoint
,
739 (int (*) (CORE_ADDR
, int, int, struct expression
*))
741 de_fault (to_remove_watchpoint
,
742 (int (*) (CORE_ADDR
, int, int, struct expression
*))
744 de_fault (to_stopped_by_watchpoint
,
747 de_fault (to_stopped_data_address
,
748 (int (*) (struct target_ops
*, CORE_ADDR
*))
750 de_fault (to_watchpoint_addr_within_range
,
751 default_watchpoint_addr_within_range
);
752 de_fault (to_region_ok_for_hw_watchpoint
,
753 default_region_ok_for_hw_watchpoint
);
754 de_fault (to_can_accel_watchpoint_condition
,
755 (int (*) (CORE_ADDR
, int, int, struct expression
*))
757 de_fault (to_terminal_init
,
760 de_fault (to_terminal_inferior
,
763 de_fault (to_terminal_ours_for_output
,
766 de_fault (to_terminal_ours
,
769 de_fault (to_terminal_save_ours
,
772 de_fault (to_terminal_info
,
773 default_terminal_info
);
775 (void (*) (char *, int))
777 de_fault (to_post_startup_inferior
,
780 de_fault (to_insert_fork_catchpoint
,
783 de_fault (to_remove_fork_catchpoint
,
786 de_fault (to_insert_vfork_catchpoint
,
789 de_fault (to_remove_vfork_catchpoint
,
792 de_fault (to_insert_exec_catchpoint
,
795 de_fault (to_remove_exec_catchpoint
,
798 de_fault (to_set_syscall_catchpoint
,
799 (int (*) (int, int, int, int, int *))
801 de_fault (to_has_exited
,
802 (int (*) (int, int, int *))
804 de_fault (to_can_run
,
806 de_fault (to_extra_thread_info
,
807 (char *(*) (struct thread_info
*))
809 de_fault (to_thread_name
,
810 (char *(*) (struct thread_info
*))
815 current_target
.to_xfer_partial
= current_xfer_partial
;
817 (void (*) (char *, struct ui_file
*))
819 de_fault (to_pid_to_exec_file
,
823 (void (*) (void (*) (enum inferior_event_type
, void*), void*))
825 de_fault (to_thread_architecture
,
826 default_thread_architecture
);
827 current_target
.to_read_description
= NULL
;
828 de_fault (to_get_ada_task_ptid
,
829 (ptid_t (*) (long, long))
830 default_get_ada_task_ptid
);
831 de_fault (to_supports_multi_process
,
834 de_fault (to_supports_enable_disable_tracepoint
,
837 de_fault (to_supports_string_tracing
,
840 de_fault (to_trace_init
,
843 de_fault (to_download_tracepoint
,
844 (void (*) (struct bp_location
*))
846 de_fault (to_can_download_tracepoint
,
849 de_fault (to_download_trace_state_variable
,
850 (void (*) (struct trace_state_variable
*))
852 de_fault (to_enable_tracepoint
,
853 (void (*) (struct bp_location
*))
855 de_fault (to_disable_tracepoint
,
856 (void (*) (struct bp_location
*))
858 de_fault (to_trace_set_readonly_regions
,
861 de_fault (to_trace_start
,
864 de_fault (to_get_trace_status
,
865 (int (*) (struct trace_status
*))
867 de_fault (to_get_tracepoint_status
,
868 (void (*) (struct breakpoint
*, struct uploaded_tp
*))
870 de_fault (to_trace_stop
,
873 de_fault (to_trace_find
,
874 (int (*) (enum trace_find_type
, int, CORE_ADDR
, CORE_ADDR
, int *))
876 de_fault (to_get_trace_state_variable_value
,
877 (int (*) (int, LONGEST
*))
879 de_fault (to_save_trace_data
,
880 (int (*) (const char *))
882 de_fault (to_upload_tracepoints
,
883 (int (*) (struct uploaded_tp
**))
885 de_fault (to_upload_trace_state_variables
,
886 (int (*) (struct uploaded_tsv
**))
888 de_fault (to_get_raw_trace_data
,
889 (LONGEST (*) (gdb_byte
*, ULONGEST
, LONGEST
))
891 de_fault (to_get_min_fast_tracepoint_insn_len
,
894 de_fault (to_set_disconnected_tracing
,
897 de_fault (to_set_circular_trace_buffer
,
900 de_fault (to_set_trace_buffer_size
,
903 de_fault (to_set_trace_notes
,
904 (int (*) (const char *, const char *, const char *))
906 de_fault (to_get_tib_address
,
907 (int (*) (ptid_t
, CORE_ADDR
*))
909 de_fault (to_set_permissions
,
912 de_fault (to_static_tracepoint_marker_at
,
913 (int (*) (CORE_ADDR
, struct static_tracepoint_marker
*))
915 de_fault (to_static_tracepoint_markers_by_strid
,
916 (VEC(static_tracepoint_marker_p
) * (*) (const char *))
918 de_fault (to_traceframe_info
,
919 (struct traceframe_info
* (*) (void))
921 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
924 de_fault (to_can_run_breakpoint_commands
,
927 de_fault (to_use_agent
,
930 de_fault (to_can_use_agent
,
933 de_fault (to_augmented_libraries_svr4_read
,
936 de_fault (to_execution_direction
, default_execution_direction
);
940 /* Finally, position the target-stack beneath the squashed
941 "current_target". That way code looking for a non-inherited
942 target method can quickly and simply find it. */
943 current_target
.beneath
= target_stack
;
946 setup_target_debug ();
949 /* Push a new target type into the stack of the existing target accessors,
950 possibly superseding some of the existing accessors.
952 Rather than allow an empty stack, we always have the dummy target at
953 the bottom stratum, so we can call the function vectors without
957 push_target (struct target_ops
*t
)
959 struct target_ops
**cur
;
961 /* Check magic number. If wrong, it probably means someone changed
962 the struct definition, but not all the places that initialize one. */
963 if (t
->to_magic
!= OPS_MAGIC
)
965 fprintf_unfiltered (gdb_stderr
,
966 "Magic number of %s target struct wrong\n",
968 internal_error (__FILE__
, __LINE__
,
969 _("failed internal consistency check"));
972 /* Find the proper stratum to install this target in. */
973 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
975 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
979 /* If there's already targets at this stratum, remove them. */
980 /* FIXME: cagney/2003-10-15: I think this should be popping all
981 targets to CUR, and not just those at this stratum level. */
982 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
984 /* There's already something at this stratum level. Close it,
985 and un-hook it from the stack. */
986 struct target_ops
*tmp
= (*cur
);
988 (*cur
) = (*cur
)->beneath
;
993 /* We have removed all targets in our stratum, now add the new one. */
997 update_current_target ();
1000 /* Remove a target_ops vector from the stack, wherever it may be.
1001 Return how many times it was removed (0 or 1). */
1004 unpush_target (struct target_ops
*t
)
1006 struct target_ops
**cur
;
1007 struct target_ops
*tmp
;
1009 if (t
->to_stratum
== dummy_stratum
)
1010 internal_error (__FILE__
, __LINE__
,
1011 _("Attempt to unpush the dummy target"));
1013 /* Look for the specified target. Note that we assume that a target
1014 can only occur once in the target stack. */
1016 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1022 /* If we don't find target_ops, quit. Only open targets should be
1027 /* Unchain the target. */
1029 (*cur
) = (*cur
)->beneath
;
1030 tmp
->beneath
= NULL
;
1032 update_current_target ();
1034 /* Finally close the target. Note we do this after unchaining, so
1035 any target method calls from within the target_close
1036 implementation don't end up in T anymore. */
1043 pop_all_targets_above (enum strata above_stratum
)
1045 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
1047 if (!unpush_target (target_stack
))
1049 fprintf_unfiltered (gdb_stderr
,
1050 "pop_all_targets couldn't find target %s\n",
1051 target_stack
->to_shortname
);
1052 internal_error (__FILE__
, __LINE__
,
1053 _("failed internal consistency check"));
1060 pop_all_targets (void)
1062 pop_all_targets_above (dummy_stratum
);
1065 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1068 target_is_pushed (struct target_ops
*t
)
1070 struct target_ops
**cur
;
1072 /* Check magic number. If wrong, it probably means someone changed
1073 the struct definition, but not all the places that initialize one. */
1074 if (t
->to_magic
!= OPS_MAGIC
)
1076 fprintf_unfiltered (gdb_stderr
,
1077 "Magic number of %s target struct wrong\n",
1079 internal_error (__FILE__
, __LINE__
,
1080 _("failed internal consistency check"));
1083 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1090 /* Using the objfile specified in OBJFILE, find the address for the
1091 current thread's thread-local storage with offset OFFSET. */
1093 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1095 volatile CORE_ADDR addr
= 0;
1096 struct target_ops
*target
;
1098 for (target
= current_target
.beneath
;
1100 target
= target
->beneath
)
1102 if (target
->to_get_thread_local_address
!= NULL
)
1107 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1109 ptid_t ptid
= inferior_ptid
;
1110 volatile struct gdb_exception ex
;
1112 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1116 /* Fetch the load module address for this objfile. */
1117 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1119 /* If it's 0, throw the appropriate exception. */
1121 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1122 _("TLS load module not found"));
1124 addr
= target
->to_get_thread_local_address (target
, ptid
,
1127 /* If an error occurred, print TLS related messages here. Otherwise,
1128 throw the error to some higher catcher. */
1131 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1135 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1136 error (_("Cannot find thread-local variables "
1137 "in this thread library."));
1139 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1140 if (objfile_is_library
)
1141 error (_("Cannot find shared library `%s' in dynamic"
1142 " linker's load module list"), objfile_name (objfile
));
1144 error (_("Cannot find executable file `%s' in dynamic"
1145 " linker's load module list"), objfile_name (objfile
));
1147 case TLS_NOT_ALLOCATED_YET_ERROR
:
1148 if (objfile_is_library
)
1149 error (_("The inferior has not yet allocated storage for"
1150 " thread-local variables in\n"
1151 "the shared library `%s'\n"
1153 objfile_name (objfile
), target_pid_to_str (ptid
));
1155 error (_("The inferior has not yet allocated storage for"
1156 " thread-local variables in\n"
1157 "the executable `%s'\n"
1159 objfile_name (objfile
), target_pid_to_str (ptid
));
1161 case TLS_GENERIC_ERROR
:
1162 if (objfile_is_library
)
1163 error (_("Cannot find thread-local storage for %s, "
1164 "shared library %s:\n%s"),
1165 target_pid_to_str (ptid
),
1166 objfile_name (objfile
), ex
.message
);
1168 error (_("Cannot find thread-local storage for %s, "
1169 "executable file %s:\n%s"),
1170 target_pid_to_str (ptid
),
1171 objfile_name (objfile
), ex
.message
);
1174 throw_exception (ex
);
1179 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1180 TLS is an ABI-specific thing. But we don't do that yet. */
1182 error (_("Cannot find thread-local variables on this target"));
1188 target_xfer_status_to_string (enum target_xfer_status err
)
1190 #define CASE(X) case X: return #X
1193 CASE(TARGET_XFER_E_IO
);
1194 CASE(TARGET_XFER_E_UNAVAILABLE
);
1203 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1205 /* target_read_string -- read a null terminated string, up to LEN bytes,
1206 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1207 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1208 is responsible for freeing it. Return the number of bytes successfully
1212 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1214 int tlen
, offset
, i
;
1218 int buffer_allocated
;
1220 unsigned int nbytes_read
= 0;
1222 gdb_assert (string
);
1224 /* Small for testing. */
1225 buffer_allocated
= 4;
1226 buffer
= xmalloc (buffer_allocated
);
1231 tlen
= MIN (len
, 4 - (memaddr
& 3));
1232 offset
= memaddr
& 3;
1234 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1237 /* The transfer request might have crossed the boundary to an
1238 unallocated region of memory. Retry the transfer, requesting
1242 errcode
= target_read_memory (memaddr
, buf
, 1);
1247 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1251 bytes
= bufptr
- buffer
;
1252 buffer_allocated
*= 2;
1253 buffer
= xrealloc (buffer
, buffer_allocated
);
1254 bufptr
= buffer
+ bytes
;
1257 for (i
= 0; i
< tlen
; i
++)
1259 *bufptr
++ = buf
[i
+ offset
];
1260 if (buf
[i
+ offset
] == '\000')
1262 nbytes_read
+= i
+ 1;
1269 nbytes_read
+= tlen
;
1278 struct target_section_table
*
1279 target_get_section_table (struct target_ops
*target
)
1281 struct target_ops
*t
;
1284 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1286 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1287 if (t
->to_get_section_table
!= NULL
)
1288 return (*t
->to_get_section_table
) (t
);
1293 /* Find a section containing ADDR. */
1295 struct target_section
*
1296 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1298 struct target_section_table
*table
= target_get_section_table (target
);
1299 struct target_section
*secp
;
1304 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1306 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1312 /* Read memory from the live target, even if currently inspecting a
1313 traceframe. The return is the same as that of target_read. */
1315 static enum target_xfer_status
1316 target_read_live_memory (enum target_object object
,
1317 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1318 ULONGEST
*xfered_len
)
1320 enum target_xfer_status ret
;
1321 struct cleanup
*cleanup
;
1323 /* Switch momentarily out of tfind mode so to access live memory.
1324 Note that this must not clear global state, such as the frame
1325 cache, which must still remain valid for the previous traceframe.
1326 We may be _building_ the frame cache at this point. */
1327 cleanup
= make_cleanup_restore_traceframe_number ();
1328 set_traceframe_number (-1);
1330 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1331 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1333 do_cleanups (cleanup
);
1337 /* Using the set of read-only target sections of OPS, read live
1338 read-only memory. Note that the actual reads start from the
1339 top-most target again.
1341 For interface/parameters/return description see target.h,
1344 static enum target_xfer_status
1345 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1346 enum target_object object
,
1347 gdb_byte
*readbuf
, ULONGEST memaddr
,
1348 ULONGEST len
, ULONGEST
*xfered_len
)
1350 struct target_section
*secp
;
1351 struct target_section_table
*table
;
1353 secp
= target_section_by_addr (ops
, memaddr
);
1355 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1356 secp
->the_bfd_section
)
1359 struct target_section
*p
;
1360 ULONGEST memend
= memaddr
+ len
;
1362 table
= target_get_section_table (ops
);
1364 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1366 if (memaddr
>= p
->addr
)
1368 if (memend
<= p
->endaddr
)
1370 /* Entire transfer is within this section. */
1371 return target_read_live_memory (object
, memaddr
,
1372 readbuf
, len
, xfered_len
);
1374 else if (memaddr
>= p
->endaddr
)
1376 /* This section ends before the transfer starts. */
1381 /* This section overlaps the transfer. Just do half. */
1382 len
= p
->endaddr
- memaddr
;
1383 return target_read_live_memory (object
, memaddr
,
1384 readbuf
, len
, xfered_len
);
1390 return TARGET_XFER_EOF
;
1393 /* Read memory from more than one valid target. A core file, for
1394 instance, could have some of memory but delegate other bits to
1395 the target below it. So, we must manually try all targets. */
1397 static enum target_xfer_status
1398 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1399 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1400 ULONGEST
*xfered_len
)
1402 enum target_xfer_status res
;
1406 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1407 readbuf
, writebuf
, memaddr
, len
,
1409 if (res
== TARGET_XFER_OK
)
1412 /* Stop if the target reports that the memory is not available. */
1413 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1416 /* We want to continue past core files to executables, but not
1417 past a running target's memory. */
1418 if (ops
->to_has_all_memory (ops
))
1423 while (ops
!= NULL
);
1428 /* Perform a partial memory transfer.
1429 For docs see target.h, to_xfer_partial. */
1431 static enum target_xfer_status
1432 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1433 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1434 ULONGEST len
, ULONGEST
*xfered_len
)
1436 enum target_xfer_status res
;
1438 struct mem_region
*region
;
1439 struct inferior
*inf
;
1441 /* For accesses to unmapped overlay sections, read directly from
1442 files. Must do this first, as MEMADDR may need adjustment. */
1443 if (readbuf
!= NULL
&& overlay_debugging
)
1445 struct obj_section
*section
= find_pc_overlay (memaddr
);
1447 if (pc_in_unmapped_range (memaddr
, section
))
1449 struct target_section_table
*table
1450 = target_get_section_table (ops
);
1451 const char *section_name
= section
->the_bfd_section
->name
;
1453 memaddr
= overlay_mapped_address (memaddr
, section
);
1454 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1455 memaddr
, len
, xfered_len
,
1457 table
->sections_end
,
1462 /* Try the executable files, if "trust-readonly-sections" is set. */
1463 if (readbuf
!= NULL
&& trust_readonly
)
1465 struct target_section
*secp
;
1466 struct target_section_table
*table
;
1468 secp
= target_section_by_addr (ops
, memaddr
);
1470 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1471 secp
->the_bfd_section
)
1474 table
= target_get_section_table (ops
);
1475 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1476 memaddr
, len
, xfered_len
,
1478 table
->sections_end
,
1483 /* If reading unavailable memory in the context of traceframes, and
1484 this address falls within a read-only section, fallback to
1485 reading from live memory. */
1486 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1488 VEC(mem_range_s
) *available
;
1490 /* If we fail to get the set of available memory, then the
1491 target does not support querying traceframe info, and so we
1492 attempt reading from the traceframe anyway (assuming the
1493 target implements the old QTro packet then). */
1494 if (traceframe_available_memory (&available
, memaddr
, len
))
1496 struct cleanup
*old_chain
;
1498 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1500 if (VEC_empty (mem_range_s
, available
)
1501 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1503 /* Don't read into the traceframe's available
1505 if (!VEC_empty (mem_range_s
, available
))
1507 LONGEST oldlen
= len
;
1509 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1510 gdb_assert (len
<= oldlen
);
1513 do_cleanups (old_chain
);
1515 /* This goes through the topmost target again. */
1516 res
= memory_xfer_live_readonly_partial (ops
, object
,
1519 if (res
== TARGET_XFER_OK
)
1520 return TARGET_XFER_OK
;
1523 /* No use trying further, we know some memory starting
1524 at MEMADDR isn't available. */
1526 return TARGET_XFER_E_UNAVAILABLE
;
1530 /* Don't try to read more than how much is available, in
1531 case the target implements the deprecated QTro packet to
1532 cater for older GDBs (the target's knowledge of read-only
1533 sections may be outdated by now). */
1534 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1536 do_cleanups (old_chain
);
1540 /* Try GDB's internal data cache. */
1541 region
= lookup_mem_region (memaddr
);
1542 /* region->hi == 0 means there's no upper bound. */
1543 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1546 reg_len
= region
->hi
- memaddr
;
1548 switch (region
->attrib
.mode
)
1551 if (writebuf
!= NULL
)
1552 return TARGET_XFER_E_IO
;
1556 if (readbuf
!= NULL
)
1557 return TARGET_XFER_E_IO
;
1561 /* We only support writing to flash during "load" for now. */
1562 if (writebuf
!= NULL
)
1563 error (_("Writing to flash memory forbidden in this context"));
1567 return TARGET_XFER_E_IO
;
1570 if (!ptid_equal (inferior_ptid
, null_ptid
))
1571 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1576 /* The dcache reads whole cache lines; that doesn't play well
1577 with reading from a trace buffer, because reading outside of
1578 the collected memory range fails. */
1579 && get_traceframe_number () == -1
1580 && (region
->attrib
.cache
1581 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1582 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1584 DCACHE
*dcache
= target_dcache_get_or_init ();
1587 if (readbuf
!= NULL
)
1588 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1590 /* FIXME drow/2006-08-09: If we're going to preserve const
1591 correctness dcache_xfer_memory should take readbuf and
1593 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1596 return TARGET_XFER_E_IO
;
1599 *xfered_len
= (ULONGEST
) l
;
1600 return TARGET_XFER_OK
;
1604 /* If none of those methods found the memory we wanted, fall back
1605 to a target partial transfer. Normally a single call to
1606 to_xfer_partial is enough; if it doesn't recognize an object
1607 it will call the to_xfer_partial of the next target down.
1608 But for memory this won't do. Memory is the only target
1609 object which can be read from more than one valid target.
1610 A core file, for instance, could have some of memory but
1611 delegate other bits to the target below it. So, we must
1612 manually try all targets. */
1614 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1617 /* Make sure the cache gets updated no matter what - if we are writing
1618 to the stack. Even if this write is not tagged as such, we still need
1619 to update the cache. */
1621 if (res
== TARGET_XFER_OK
1624 && target_dcache_init_p ()
1625 && !region
->attrib
.cache
1626 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1627 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1629 DCACHE
*dcache
= target_dcache_get ();
1631 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1634 /* If we still haven't got anything, return the last error. We
1639 /* Perform a partial memory transfer. For docs see target.h,
1642 static enum target_xfer_status
1643 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1644 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1645 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1647 enum target_xfer_status res
;
1649 /* Zero length requests are ok and require no work. */
1651 return TARGET_XFER_EOF
;
1653 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1654 breakpoint insns, thus hiding out from higher layers whether
1655 there are software breakpoints inserted in the code stream. */
1656 if (readbuf
!= NULL
)
1658 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1661 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1662 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1667 struct cleanup
*old_chain
;
1669 /* A large write request is likely to be partially satisfied
1670 by memory_xfer_partial_1. We will continually malloc
1671 and free a copy of the entire write request for breakpoint
1672 shadow handling even though we only end up writing a small
1673 subset of it. Cap writes to 4KB to mitigate this. */
1674 len
= min (4096, len
);
1676 buf
= xmalloc (len
);
1677 old_chain
= make_cleanup (xfree
, buf
);
1678 memcpy (buf
, writebuf
, len
);
1680 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1681 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1684 do_cleanups (old_chain
);
1691 restore_show_memory_breakpoints (void *arg
)
1693 show_memory_breakpoints
= (uintptr_t) arg
;
1697 make_show_memory_breakpoints_cleanup (int show
)
1699 int current
= show_memory_breakpoints
;
1701 show_memory_breakpoints
= show
;
1702 return make_cleanup (restore_show_memory_breakpoints
,
1703 (void *) (uintptr_t) current
);
1706 /* For docs see target.h, to_xfer_partial. */
1708 enum target_xfer_status
1709 target_xfer_partial (struct target_ops
*ops
,
1710 enum target_object object
, const char *annex
,
1711 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1712 ULONGEST offset
, ULONGEST len
,
1713 ULONGEST
*xfered_len
)
1715 enum target_xfer_status retval
;
1717 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1719 /* Transfer is done when LEN is zero. */
1721 return TARGET_XFER_EOF
;
1723 if (writebuf
&& !may_write_memory
)
1724 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1725 core_addr_to_string_nz (offset
), plongest (len
));
1729 /* If this is a memory transfer, let the memory-specific code
1730 have a look at it instead. Memory transfers are more
1732 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1733 || object
== TARGET_OBJECT_CODE_MEMORY
)
1734 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1735 writebuf
, offset
, len
, xfered_len
);
1736 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1738 /* Request the normal memory object from other layers. */
1739 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1743 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1744 writebuf
, offset
, len
, xfered_len
);
1748 const unsigned char *myaddr
= NULL
;
1750 fprintf_unfiltered (gdb_stdlog
,
1751 "%s:target_xfer_partial "
1752 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1755 (annex
? annex
: "(null)"),
1756 host_address_to_string (readbuf
),
1757 host_address_to_string (writebuf
),
1758 core_addr_to_string_nz (offset
),
1759 pulongest (len
), retval
,
1760 pulongest (*xfered_len
));
1766 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1770 fputs_unfiltered (", bytes =", gdb_stdlog
);
1771 for (i
= 0; i
< *xfered_len
; i
++)
1773 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1775 if (targetdebug
< 2 && i
> 0)
1777 fprintf_unfiltered (gdb_stdlog
, " ...");
1780 fprintf_unfiltered (gdb_stdlog
, "\n");
1783 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1787 fputc_unfiltered ('\n', gdb_stdlog
);
1790 /* Check implementations of to_xfer_partial update *XFERED_LEN
1791 properly. Do assertion after printing debug messages, so that we
1792 can find more clues on assertion failure from debugging messages. */
1793 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1794 gdb_assert (*xfered_len
> 0);
1799 /* Read LEN bytes of target memory at address MEMADDR, placing the
1800 results in GDB's memory at MYADDR. Returns either 0 for success or
1801 TARGET_XFER_E_IO if any error occurs.
1803 If an error occurs, no guarantee is made about the contents of the data at
1804 MYADDR. In particular, the caller should not depend upon partial reads
1805 filling the buffer with good data. There is no way for the caller to know
1806 how much good data might have been transfered anyway. Callers that can
1807 deal with partial reads should call target_read (which will retry until
1808 it makes no progress, and then return how much was transferred). */
1811 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1813 /* Dispatch to the topmost target, not the flattened current_target.
1814 Memory accesses check target->to_has_(all_)memory, and the
1815 flattened target doesn't inherit those. */
1816 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1817 myaddr
, memaddr
, len
) == len
)
1820 return TARGET_XFER_E_IO
;
1823 /* Like target_read_memory, but specify explicitly that this is a read
1824 from the target's raw memory. That is, this read bypasses the
1825 dcache, breakpoint shadowing, etc. */
1828 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1830 /* See comment in target_read_memory about why the request starts at
1831 current_target.beneath. */
1832 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1833 myaddr
, memaddr
, len
) == len
)
1836 return TARGET_XFER_E_IO
;
1839 /* Like target_read_memory, but specify explicitly that this is a read from
1840 the target's stack. This may trigger different cache behavior. */
1843 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1845 /* See comment in target_read_memory about why the request starts at
1846 current_target.beneath. */
1847 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1848 myaddr
, memaddr
, len
) == len
)
1851 return TARGET_XFER_E_IO
;
1854 /* Like target_read_memory, but specify explicitly that this is a read from
1855 the target's code. This may trigger different cache behavior. */
1858 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1860 /* See comment in target_read_memory about why the request starts at
1861 current_target.beneath. */
1862 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1863 myaddr
, memaddr
, len
) == len
)
1866 return TARGET_XFER_E_IO
;
1869 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1870 Returns either 0 for success or TARGET_XFER_E_IO if any
1871 error occurs. If an error occurs, no guarantee is made about how
1872 much data got written. Callers that can deal with partial writes
1873 should call target_write. */
1876 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1878 /* See comment in target_read_memory about why the request starts at
1879 current_target.beneath. */
1880 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1881 myaddr
, memaddr
, len
) == len
)
1884 return TARGET_XFER_E_IO
;
1887 /* Write LEN bytes from MYADDR to target raw memory at address
1888 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1889 if any error occurs. If an error occurs, no guarantee is made
1890 about how much data got written. Callers that can deal with
1891 partial writes should call target_write. */
1894 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1896 /* See comment in target_read_memory about why the request starts at
1897 current_target.beneath. */
1898 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1899 myaddr
, memaddr
, len
) == len
)
1902 return TARGET_XFER_E_IO
;
1905 /* Fetch the target's memory map. */
1908 target_memory_map (void)
1910 VEC(mem_region_s
) *result
;
1911 struct mem_region
*last_one
, *this_one
;
1913 struct target_ops
*t
;
1916 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1918 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1919 if (t
->to_memory_map
!= NULL
)
1925 result
= t
->to_memory_map (t
);
1929 qsort (VEC_address (mem_region_s
, result
),
1930 VEC_length (mem_region_s
, result
),
1931 sizeof (struct mem_region
), mem_region_cmp
);
1933 /* Check that regions do not overlap. Simultaneously assign
1934 a numbering for the "mem" commands to use to refer to
1937 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1939 this_one
->number
= ix
;
1941 if (last_one
&& last_one
->hi
> this_one
->lo
)
1943 warning (_("Overlapping regions in memory map: ignoring"));
1944 VEC_free (mem_region_s
, result
);
1947 last_one
= this_one
;
1954 target_flash_erase (ULONGEST address
, LONGEST length
)
1956 struct target_ops
*t
;
1958 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1959 if (t
->to_flash_erase
!= NULL
)
1962 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1963 hex_string (address
), phex (length
, 0));
1964 t
->to_flash_erase (t
, address
, length
);
1972 target_flash_done (void)
1974 struct target_ops
*t
;
1976 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1977 if (t
->to_flash_done
!= NULL
)
1980 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1981 t
->to_flash_done (t
);
1989 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1990 struct cmd_list_element
*c
, const char *value
)
1992 fprintf_filtered (file
,
1993 _("Mode for reading from readonly sections is %s.\n"),
1997 /* More generic transfers. */
1999 static enum target_xfer_status
2000 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
2001 const char *annex
, gdb_byte
*readbuf
,
2002 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
2003 ULONGEST
*xfered_len
)
2005 if (object
== TARGET_OBJECT_MEMORY
2006 && ops
->deprecated_xfer_memory
!= NULL
)
2007 /* If available, fall back to the target's
2008 "deprecated_xfer_memory" method. */
2013 if (writebuf
!= NULL
)
2015 void *buffer
= xmalloc (len
);
2016 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
2018 memcpy (buffer
, writebuf
, len
);
2019 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
2020 1/*write*/, NULL
, ops
);
2021 do_cleanups (cleanup
);
2023 if (readbuf
!= NULL
)
2024 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
2025 0/*read*/, NULL
, ops
);
2028 *xfered_len
= (ULONGEST
) xfered
;
2029 return TARGET_XFER_E_IO
;
2031 else if (xfered
== 0 && errno
== 0)
2032 /* "deprecated_xfer_memory" uses 0, cross checked against
2033 ERRNO as one indication of an error. */
2034 return TARGET_XFER_EOF
;
2036 return TARGET_XFER_E_IO
;
2038 else if (ops
->beneath
!= NULL
)
2039 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
2040 readbuf
, writebuf
, offset
, len
,
2043 return TARGET_XFER_E_IO
;
2046 /* The xfer_partial handler for the topmost target. Unlike the default,
2047 it does not need to handle memory specially; it just passes all
2048 requests down the stack. */
2050 static enum target_xfer_status
2051 current_xfer_partial (struct target_ops
*ops
, enum target_object object
,
2052 const char *annex
, gdb_byte
*readbuf
,
2053 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
2054 ULONGEST
*xfered_len
)
2056 if (ops
->beneath
!= NULL
)
2057 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
2058 readbuf
, writebuf
, offset
, len
,
2061 return TARGET_XFER_E_IO
;
2064 /* Target vector read/write partial wrapper functions. */
2066 static enum target_xfer_status
2067 target_read_partial (struct target_ops
*ops
,
2068 enum target_object object
,
2069 const char *annex
, gdb_byte
*buf
,
2070 ULONGEST offset
, ULONGEST len
,
2071 ULONGEST
*xfered_len
)
2073 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
2077 static enum target_xfer_status
2078 target_write_partial (struct target_ops
*ops
,
2079 enum target_object object
,
2080 const char *annex
, const gdb_byte
*buf
,
2081 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
2083 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
2087 /* Wrappers to perform the full transfer. */
2089 /* For docs on target_read see target.h. */
2092 target_read (struct target_ops
*ops
,
2093 enum target_object object
,
2094 const char *annex
, gdb_byte
*buf
,
2095 ULONGEST offset
, LONGEST len
)
2099 while (xfered
< len
)
2101 ULONGEST xfered_len
;
2102 enum target_xfer_status status
;
2104 status
= target_read_partial (ops
, object
, annex
,
2105 (gdb_byte
*) buf
+ xfered
,
2106 offset
+ xfered
, len
- xfered
,
2109 /* Call an observer, notifying them of the xfer progress? */
2110 if (status
== TARGET_XFER_EOF
)
2112 else if (status
== TARGET_XFER_OK
)
2114 xfered
+= xfered_len
;
2124 /* Assuming that the entire [begin, end) range of memory cannot be
2125 read, try to read whatever subrange is possible to read.
2127 The function returns, in RESULT, either zero or one memory block.
2128 If there's a readable subrange at the beginning, it is completely
2129 read and returned. Any further readable subrange will not be read.
2130 Otherwise, if there's a readable subrange at the end, it will be
2131 completely read and returned. Any readable subranges before it
2132 (obviously, not starting at the beginning), will be ignored. In
2133 other cases -- either no readable subrange, or readable subrange(s)
2134 that is neither at the beginning, or end, nothing is returned.
2136 The purpose of this function is to handle a read across a boundary
2137 of accessible memory in a case when memory map is not available.
2138 The above restrictions are fine for this case, but will give
2139 incorrect results if the memory is 'patchy'. However, supporting
2140 'patchy' memory would require trying to read every single byte,
2141 and it seems unacceptable solution. Explicit memory map is
2142 recommended for this case -- and target_read_memory_robust will
2143 take care of reading multiple ranges then. */
2146 read_whatever_is_readable (struct target_ops
*ops
,
2147 ULONGEST begin
, ULONGEST end
,
2148 VEC(memory_read_result_s
) **result
)
2150 gdb_byte
*buf
= xmalloc (end
- begin
);
2151 ULONGEST current_begin
= begin
;
2152 ULONGEST current_end
= end
;
2154 memory_read_result_s r
;
2155 ULONGEST xfered_len
;
2157 /* If we previously failed to read 1 byte, nothing can be done here. */
2158 if (end
- begin
<= 1)
2164 /* Check that either first or the last byte is readable, and give up
2165 if not. This heuristic is meant to permit reading accessible memory
2166 at the boundary of accessible region. */
2167 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2168 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2173 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2174 buf
+ (end
-begin
) - 1, end
- 1, 1,
2175 &xfered_len
) == TARGET_XFER_OK
)
2186 /* Loop invariant is that the [current_begin, current_end) was previously
2187 found to be not readable as a whole.
2189 Note loop condition -- if the range has 1 byte, we can't divide the range
2190 so there's no point trying further. */
2191 while (current_end
- current_begin
> 1)
2193 ULONGEST first_half_begin
, first_half_end
;
2194 ULONGEST second_half_begin
, second_half_end
;
2196 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2200 first_half_begin
= current_begin
;
2201 first_half_end
= middle
;
2202 second_half_begin
= middle
;
2203 second_half_end
= current_end
;
2207 first_half_begin
= middle
;
2208 first_half_end
= current_end
;
2209 second_half_begin
= current_begin
;
2210 second_half_end
= middle
;
2213 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2214 buf
+ (first_half_begin
- begin
),
2216 first_half_end
- first_half_begin
);
2218 if (xfer
== first_half_end
- first_half_begin
)
2220 /* This half reads up fine. So, the error must be in the
2222 current_begin
= second_half_begin
;
2223 current_end
= second_half_end
;
2227 /* This half is not readable. Because we've tried one byte, we
2228 know some part of this half if actually redable. Go to the next
2229 iteration to divide again and try to read.
2231 We don't handle the other half, because this function only tries
2232 to read a single readable subrange. */
2233 current_begin
= first_half_begin
;
2234 current_end
= first_half_end
;
2240 /* The [begin, current_begin) range has been read. */
2242 r
.end
= current_begin
;
2247 /* The [current_end, end) range has been read. */
2248 LONGEST rlen
= end
- current_end
;
2250 r
.data
= xmalloc (rlen
);
2251 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2252 r
.begin
= current_end
;
2256 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2260 free_memory_read_result_vector (void *x
)
2262 VEC(memory_read_result_s
) *v
= x
;
2263 memory_read_result_s
*current
;
2266 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2268 xfree (current
->data
);
2270 VEC_free (memory_read_result_s
, v
);
2273 VEC(memory_read_result_s
) *
2274 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2276 VEC(memory_read_result_s
) *result
= 0;
2279 while (xfered
< len
)
2281 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2284 /* If there is no explicit region, a fake one should be created. */
2285 gdb_assert (region
);
2287 if (region
->hi
== 0)
2288 rlen
= len
- xfered
;
2290 rlen
= region
->hi
- offset
;
2292 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2294 /* Cannot read this region. Note that we can end up here only
2295 if the region is explicitly marked inaccessible, or
2296 'inaccessible-by-default' is in effect. */
2301 LONGEST to_read
= min (len
- xfered
, rlen
);
2302 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2304 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2305 (gdb_byte
*) buffer
,
2306 offset
+ xfered
, to_read
);
2307 /* Call an observer, notifying them of the xfer progress? */
2310 /* Got an error reading full chunk. See if maybe we can read
2313 read_whatever_is_readable (ops
, offset
+ xfered
,
2314 offset
+ xfered
+ to_read
, &result
);
2319 struct memory_read_result r
;
2321 r
.begin
= offset
+ xfered
;
2322 r
.end
= r
.begin
+ xfer
;
2323 VEC_safe_push (memory_read_result_s
, result
, &r
);
2333 /* An alternative to target_write with progress callbacks. */
2336 target_write_with_progress (struct target_ops
*ops
,
2337 enum target_object object
,
2338 const char *annex
, const gdb_byte
*buf
,
2339 ULONGEST offset
, LONGEST len
,
2340 void (*progress
) (ULONGEST
, void *), void *baton
)
2344 /* Give the progress callback a chance to set up. */
2346 (*progress
) (0, baton
);
2348 while (xfered
< len
)
2350 ULONGEST xfered_len
;
2351 enum target_xfer_status status
;
2353 status
= target_write_partial (ops
, object
, annex
,
2354 (gdb_byte
*) buf
+ xfered
,
2355 offset
+ xfered
, len
- xfered
,
2358 if (status
== TARGET_XFER_EOF
)
2360 if (TARGET_XFER_STATUS_ERROR_P (status
))
2363 gdb_assert (status
== TARGET_XFER_OK
);
2365 (*progress
) (xfered_len
, baton
);
2367 xfered
+= xfered_len
;
2373 /* For docs on target_write see target.h. */
2376 target_write (struct target_ops
*ops
,
2377 enum target_object object
,
2378 const char *annex
, const gdb_byte
*buf
,
2379 ULONGEST offset
, LONGEST len
)
2381 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2385 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2386 the size of the transferred data. PADDING additional bytes are
2387 available in *BUF_P. This is a helper function for
2388 target_read_alloc; see the declaration of that function for more
2392 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2393 const char *annex
, gdb_byte
**buf_p
, int padding
)
2395 size_t buf_alloc
, buf_pos
;
2398 /* This function does not have a length parameter; it reads the
2399 entire OBJECT). Also, it doesn't support objects fetched partly
2400 from one target and partly from another (in a different stratum,
2401 e.g. a core file and an executable). Both reasons make it
2402 unsuitable for reading memory. */
2403 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2405 /* Start by reading up to 4K at a time. The target will throttle
2406 this number down if necessary. */
2408 buf
= xmalloc (buf_alloc
);
2412 ULONGEST xfered_len
;
2413 enum target_xfer_status status
;
2415 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2416 buf_pos
, buf_alloc
- buf_pos
- padding
,
2419 if (status
== TARGET_XFER_EOF
)
2421 /* Read all there was. */
2428 else if (status
!= TARGET_XFER_OK
)
2430 /* An error occurred. */
2432 return TARGET_XFER_E_IO
;
2435 buf_pos
+= xfered_len
;
2437 /* If the buffer is filling up, expand it. */
2438 if (buf_alloc
< buf_pos
* 2)
2441 buf
= xrealloc (buf
, buf_alloc
);
2448 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2449 the size of the transferred data. See the declaration in "target.h"
2450 function for more information about the return value. */
2453 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2454 const char *annex
, gdb_byte
**buf_p
)
2456 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2459 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2460 returned as a string, allocated using xmalloc. If an error occurs
2461 or the transfer is unsupported, NULL is returned. Empty objects
2462 are returned as allocated but empty strings. A warning is issued
2463 if the result contains any embedded NUL bytes. */
2466 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2471 LONGEST i
, transferred
;
2473 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2474 bufstr
= (char *) buffer
;
2476 if (transferred
< 0)
2479 if (transferred
== 0)
2480 return xstrdup ("");
2482 bufstr
[transferred
] = 0;
2484 /* Check for embedded NUL bytes; but allow trailing NULs. */
2485 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2488 warning (_("target object %d, annex %s, "
2489 "contained unexpected null characters"),
2490 (int) object
, annex
? annex
: "(none)");
2497 /* Memory transfer methods. */
2500 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2503 /* This method is used to read from an alternate, non-current
2504 target. This read must bypass the overlay support (as symbols
2505 don't match this target), and GDB's internal cache (wrong cache
2506 for this target). */
2507 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2509 memory_error (TARGET_XFER_E_IO
, addr
);
2513 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2514 int len
, enum bfd_endian byte_order
)
2516 gdb_byte buf
[sizeof (ULONGEST
)];
2518 gdb_assert (len
<= sizeof (buf
));
2519 get_target_memory (ops
, addr
, buf
, len
);
2520 return extract_unsigned_integer (buf
, len
, byte_order
);
2526 forward_target_insert_breakpoint (struct target_ops
*ops
,
2527 struct gdbarch
*gdbarch
,
2528 struct bp_target_info
*bp_tgt
)
2530 for (; ops
!= NULL
; ops
= ops
->beneath
)
2531 if (ops
->to_insert_breakpoint
!= NULL
)
2532 return ops
->to_insert_breakpoint (ops
, gdbarch
, bp_tgt
);
2534 return memory_insert_breakpoint (ops
, gdbarch
, bp_tgt
);
2540 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2541 struct bp_target_info
*bp_tgt
)
2543 if (!may_insert_breakpoints
)
2545 warning (_("May not insert breakpoints"));
2549 return forward_target_insert_breakpoint (¤t_target
, gdbarch
, bp_tgt
);
2555 forward_target_remove_breakpoint (struct target_ops
*ops
,
2556 struct gdbarch
*gdbarch
,
2557 struct bp_target_info
*bp_tgt
)
2559 /* This is kind of a weird case to handle, but the permission might
2560 have been changed after breakpoints were inserted - in which case
2561 we should just take the user literally and assume that any
2562 breakpoints should be left in place. */
2563 if (!may_insert_breakpoints
)
2565 warning (_("May not remove breakpoints"));
2569 for (; ops
!= NULL
; ops
= ops
->beneath
)
2570 if (ops
->to_remove_breakpoint
!= NULL
)
2571 return ops
->to_remove_breakpoint (ops
, gdbarch
, bp_tgt
);
2573 return memory_remove_breakpoint (ops
, gdbarch
, bp_tgt
);
2579 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2580 struct bp_target_info
*bp_tgt
)
2582 return forward_target_remove_breakpoint (¤t_target
, gdbarch
, bp_tgt
);
2586 target_info (char *args
, int from_tty
)
2588 struct target_ops
*t
;
2589 int has_all_mem
= 0;
2591 if (symfile_objfile
!= NULL
)
2592 printf_unfiltered (_("Symbols from \"%s\".\n"),
2593 objfile_name (symfile_objfile
));
2595 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2597 if (!(*t
->to_has_memory
) (t
))
2600 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2603 printf_unfiltered (_("\tWhile running this, "
2604 "GDB does not access memory from...\n"));
2605 printf_unfiltered ("%s:\n", t
->to_longname
);
2606 (t
->to_files_info
) (t
);
2607 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2611 /* This function is called before any new inferior is created, e.g.
2612 by running a program, attaching, or connecting to a target.
2613 It cleans up any state from previous invocations which might
2614 change between runs. This is a subset of what target_preopen
2615 resets (things which might change between targets). */
2618 target_pre_inferior (int from_tty
)
2620 /* Clear out solib state. Otherwise the solib state of the previous
2621 inferior might have survived and is entirely wrong for the new
2622 target. This has been observed on GNU/Linux using glibc 2.3. How
2634 Cannot access memory at address 0xdeadbeef
2637 /* In some OSs, the shared library list is the same/global/shared
2638 across inferiors. If code is shared between processes, so are
2639 memory regions and features. */
2640 if (!gdbarch_has_global_solist (target_gdbarch ()))
2642 no_shared_libraries (NULL
, from_tty
);
2644 invalidate_target_mem_regions ();
2646 target_clear_description ();
2649 agent_capability_invalidate ();
2652 /* Callback for iterate_over_inferiors. Gets rid of the given
2656 dispose_inferior (struct inferior
*inf
, void *args
)
2658 struct thread_info
*thread
;
2660 thread
= any_thread_of_process (inf
->pid
);
2663 switch_to_thread (thread
->ptid
);
2665 /* Core inferiors actually should be detached, not killed. */
2666 if (target_has_execution
)
2669 target_detach (NULL
, 0);
2675 /* This is to be called by the open routine before it does
2679 target_preopen (int from_tty
)
2683 if (have_inferiors ())
2686 || !have_live_inferiors ()
2687 || query (_("A program is being debugged already. Kill it? ")))
2688 iterate_over_inferiors (dispose_inferior
, NULL
);
2690 error (_("Program not killed."));
2693 /* Calling target_kill may remove the target from the stack. But if
2694 it doesn't (which seems like a win for UDI), remove it now. */
2695 /* Leave the exec target, though. The user may be switching from a
2696 live process to a core of the same program. */
2697 pop_all_targets_above (file_stratum
);
2699 target_pre_inferior (from_tty
);
2702 /* Detach a target after doing deferred register stores. */
2705 target_detach (const char *args
, int from_tty
)
2707 struct target_ops
* t
;
2709 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2710 /* Don't remove global breakpoints here. They're removed on
2711 disconnection from the target. */
2714 /* If we're in breakpoints-always-inserted mode, have to remove
2715 them before detaching. */
2716 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2718 prepare_for_detach ();
2720 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2722 if (t
->to_detach
!= NULL
)
2724 t
->to_detach (t
, args
, from_tty
);
2726 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2732 internal_error (__FILE__
, __LINE__
, _("could not find a target to detach"));
2736 target_disconnect (char *args
, int from_tty
)
2738 struct target_ops
*t
;
2740 /* If we're in breakpoints-always-inserted mode or if breakpoints
2741 are global across processes, we have to remove them before
2743 remove_breakpoints ();
2745 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2746 if (t
->to_disconnect
!= NULL
)
2749 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2751 t
->to_disconnect (t
, args
, from_tty
);
2759 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2761 struct target_ops
*t
;
2763 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2765 if (t
->to_wait
!= NULL
)
2767 ptid_t retval
= (*t
->to_wait
) (t
, ptid
, status
, options
);
2771 char *status_string
;
2772 char *options_string
;
2774 status_string
= target_waitstatus_to_string (status
);
2775 options_string
= target_options_to_string (options
);
2776 fprintf_unfiltered (gdb_stdlog
,
2777 "target_wait (%d, status, options={%s})"
2779 ptid_get_pid (ptid
), options_string
,
2780 ptid_get_pid (retval
), status_string
);
2781 xfree (status_string
);
2782 xfree (options_string
);
2793 target_pid_to_str (ptid_t ptid
)
2795 struct target_ops
*t
;
2797 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2799 if (t
->to_pid_to_str
!= NULL
)
2800 return (*t
->to_pid_to_str
) (t
, ptid
);
2803 return normal_pid_to_str (ptid
);
2807 target_thread_name (struct thread_info
*info
)
2809 struct target_ops
*t
;
2811 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2813 if (t
->to_thread_name
!= NULL
)
2814 return (*t
->to_thread_name
) (info
);
2821 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2823 struct target_ops
*t
;
2825 target_dcache_invalidate ();
2827 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2829 if (t
->to_resume
!= NULL
)
2831 t
->to_resume (t
, ptid
, step
, signal
);
2833 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2834 ptid_get_pid (ptid
),
2835 step
? "step" : "continue",
2836 gdb_signal_to_name (signal
));
2838 registers_changed_ptid (ptid
);
2839 set_executing (ptid
, 1);
2840 set_running (ptid
, 1);
2841 clear_inline_frame_state (ptid
);
2850 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2852 struct target_ops
*t
;
2854 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2856 if (t
->to_pass_signals
!= NULL
)
2862 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2865 for (i
= 0; i
< numsigs
; i
++)
2866 if (pass_signals
[i
])
2867 fprintf_unfiltered (gdb_stdlog
, " %s",
2868 gdb_signal_to_name (i
));
2870 fprintf_unfiltered (gdb_stdlog
, " })\n");
2873 (*t
->to_pass_signals
) (numsigs
, pass_signals
);
2880 target_program_signals (int numsigs
, unsigned char *program_signals
)
2882 struct target_ops
*t
;
2884 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2886 if (t
->to_program_signals
!= NULL
)
2892 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2895 for (i
= 0; i
< numsigs
; i
++)
2896 if (program_signals
[i
])
2897 fprintf_unfiltered (gdb_stdlog
, " %s",
2898 gdb_signal_to_name (i
));
2900 fprintf_unfiltered (gdb_stdlog
, " })\n");
2903 (*t
->to_program_signals
) (numsigs
, program_signals
);
2909 /* Look through the list of possible targets for a target that can
2913 target_follow_fork (int follow_child
, int detach_fork
)
2915 struct target_ops
*t
;
2917 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2919 if (t
->to_follow_fork
!= NULL
)
2921 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2924 fprintf_unfiltered (gdb_stdlog
,
2925 "target_follow_fork (%d, %d) = %d\n",
2926 follow_child
, detach_fork
, retval
);
2931 /* Some target returned a fork event, but did not know how to follow it. */
2932 internal_error (__FILE__
, __LINE__
,
2933 _("could not find a target to follow fork"));
2937 target_mourn_inferior (void)
2939 struct target_ops
*t
;
2941 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2943 if (t
->to_mourn_inferior
!= NULL
)
2945 t
->to_mourn_inferior (t
);
2947 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2949 /* We no longer need to keep handles on any of the object files.
2950 Make sure to release them to avoid unnecessarily locking any
2951 of them while we're not actually debugging. */
2952 bfd_cache_close_all ();
2958 internal_error (__FILE__
, __LINE__
,
2959 _("could not find a target to follow mourn inferior"));
2962 /* Look for a target which can describe architectural features, starting
2963 from TARGET. If we find one, return its description. */
2965 const struct target_desc
*
2966 target_read_description (struct target_ops
*target
)
2968 struct target_ops
*t
;
2970 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2971 if (t
->to_read_description
!= NULL
)
2973 const struct target_desc
*tdesc
;
2975 tdesc
= t
->to_read_description (t
);
2983 /* The default implementation of to_search_memory.
2984 This implements a basic search of memory, reading target memory and
2985 performing the search here (as opposed to performing the search in on the
2986 target side with, for example, gdbserver). */
2989 simple_search_memory (struct target_ops
*ops
,
2990 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2991 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2992 CORE_ADDR
*found_addrp
)
2994 /* NOTE: also defined in find.c testcase. */
2995 #define SEARCH_CHUNK_SIZE 16000
2996 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2997 /* Buffer to hold memory contents for searching. */
2998 gdb_byte
*search_buf
;
2999 unsigned search_buf_size
;
3000 struct cleanup
*old_cleanups
;
3002 search_buf_size
= chunk_size
+ pattern_len
- 1;
3004 /* No point in trying to allocate a buffer larger than the search space. */
3005 if (search_space_len
< search_buf_size
)
3006 search_buf_size
= search_space_len
;
3008 search_buf
= malloc (search_buf_size
);
3009 if (search_buf
== NULL
)
3010 error (_("Unable to allocate memory to perform the search."));
3011 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
3013 /* Prime the search buffer. */
3015 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3016 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
3018 warning (_("Unable to access %s bytes of target "
3019 "memory at %s, halting search."),
3020 pulongest (search_buf_size
), hex_string (start_addr
));
3021 do_cleanups (old_cleanups
);
3025 /* Perform the search.
3027 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
3028 When we've scanned N bytes we copy the trailing bytes to the start and
3029 read in another N bytes. */
3031 while (search_space_len
>= pattern_len
)
3033 gdb_byte
*found_ptr
;
3034 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
3036 found_ptr
= memmem (search_buf
, nr_search_bytes
,
3037 pattern
, pattern_len
);
3039 if (found_ptr
!= NULL
)
3041 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
3043 *found_addrp
= found_addr
;
3044 do_cleanups (old_cleanups
);
3048 /* Not found in this chunk, skip to next chunk. */
3050 /* Don't let search_space_len wrap here, it's unsigned. */
3051 if (search_space_len
>= chunk_size
)
3052 search_space_len
-= chunk_size
;
3054 search_space_len
= 0;
3056 if (search_space_len
>= pattern_len
)
3058 unsigned keep_len
= search_buf_size
- chunk_size
;
3059 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
3062 /* Copy the trailing part of the previous iteration to the front
3063 of the buffer for the next iteration. */
3064 gdb_assert (keep_len
== pattern_len
- 1);
3065 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
3067 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
3069 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3070 search_buf
+ keep_len
, read_addr
,
3071 nr_to_read
) != nr_to_read
)
3073 warning (_("Unable to access %s bytes of target "
3074 "memory at %s, halting search."),
3075 plongest (nr_to_read
),
3076 hex_string (read_addr
));
3077 do_cleanups (old_cleanups
);
3081 start_addr
+= chunk_size
;
3087 do_cleanups (old_cleanups
);
3091 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3092 sequence of bytes in PATTERN with length PATTERN_LEN.
3094 The result is 1 if found, 0 if not found, and -1 if there was an error
3095 requiring halting of the search (e.g. memory read error).
3096 If the pattern is found the address is recorded in FOUND_ADDRP. */
3099 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
3100 const gdb_byte
*pattern
, ULONGEST pattern_len
,
3101 CORE_ADDR
*found_addrp
)
3103 struct target_ops
*t
;
3106 /* We don't use INHERIT to set current_target.to_search_memory,
3107 so we have to scan the target stack and handle targetdebug
3111 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
3112 hex_string (start_addr
));
3114 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3115 if (t
->to_search_memory
!= NULL
)
3120 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
3121 pattern
, pattern_len
, found_addrp
);
3125 /* If a special version of to_search_memory isn't available, use the
3127 found
= simple_search_memory (current_target
.beneath
,
3128 start_addr
, search_space_len
,
3129 pattern
, pattern_len
, found_addrp
);
3133 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
3138 /* Look through the currently pushed targets. If none of them will
3139 be able to restart the currently running process, issue an error
3143 target_require_runnable (void)
3145 struct target_ops
*t
;
3147 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
3149 /* If this target knows how to create a new program, then
3150 assume we will still be able to after killing the current
3151 one. Either killing and mourning will not pop T, or else
3152 find_default_run_target will find it again. */
3153 if (t
->to_create_inferior
!= NULL
)
3156 /* Do not worry about thread_stratum targets that can not
3157 create inferiors. Assume they will be pushed again if
3158 necessary, and continue to the process_stratum. */
3159 if (t
->to_stratum
== thread_stratum
3160 || t
->to_stratum
== arch_stratum
)
3163 error (_("The \"%s\" target does not support \"run\". "
3164 "Try \"help target\" or \"continue\"."),
3168 /* This function is only called if the target is running. In that
3169 case there should have been a process_stratum target and it
3170 should either know how to create inferiors, or not... */
3171 internal_error (__FILE__
, __LINE__
, _("No targets found"));
3174 /* Look through the list of possible targets for a target that can
3175 execute a run or attach command without any other data. This is
3176 used to locate the default process stratum.
3178 If DO_MESG is not NULL, the result is always valid (error() is
3179 called for errors); else, return NULL on error. */
3181 static struct target_ops
*
3182 find_default_run_target (char *do_mesg
)
3184 struct target_ops
**t
;
3185 struct target_ops
*runable
= NULL
;
3190 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
3193 if ((*t
)->to_can_run
&& target_can_run (*t
))
3203 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3212 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3214 struct target_ops
*t
;
3216 t
= find_default_run_target ("attach");
3217 (t
->to_attach
) (t
, args
, from_tty
);
3222 find_default_create_inferior (struct target_ops
*ops
,
3223 char *exec_file
, char *allargs
, char **env
,
3226 struct target_ops
*t
;
3228 t
= find_default_run_target ("run");
3229 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3234 find_default_can_async_p (void)
3236 struct target_ops
*t
;
3238 /* This may be called before the target is pushed on the stack;
3239 look for the default process stratum. If there's none, gdb isn't
3240 configured with a native debugger, and target remote isn't
3242 t
= find_default_run_target (NULL
);
3243 if (t
&& t
->to_can_async_p
)
3244 return (t
->to_can_async_p
) ();
3249 find_default_is_async_p (void)
3251 struct target_ops
*t
;
3253 /* This may be called before the target is pushed on the stack;
3254 look for the default process stratum. If there's none, gdb isn't
3255 configured with a native debugger, and target remote isn't
3257 t
= find_default_run_target (NULL
);
3258 if (t
&& t
->to_is_async_p
)
3259 return (t
->to_is_async_p
) ();
3264 find_default_supports_non_stop (void)
3266 struct target_ops
*t
;
3268 t
= find_default_run_target (NULL
);
3269 if (t
&& t
->to_supports_non_stop
)
3270 return (t
->to_supports_non_stop
) ();
3275 target_supports_non_stop (void)
3277 struct target_ops
*t
;
3279 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3280 if (t
->to_supports_non_stop
)
3281 return t
->to_supports_non_stop ();
3286 /* Implement the "info proc" command. */
3289 target_info_proc (char *args
, enum info_proc_what what
)
3291 struct target_ops
*t
;
3293 /* If we're already connected to something that can get us OS
3294 related data, use it. Otherwise, try using the native
3296 if (current_target
.to_stratum
>= process_stratum
)
3297 t
= current_target
.beneath
;
3299 t
= find_default_run_target (NULL
);
3301 for (; t
!= NULL
; t
= t
->beneath
)
3303 if (t
->to_info_proc
!= NULL
)
3305 t
->to_info_proc (t
, args
, what
);
3308 fprintf_unfiltered (gdb_stdlog
,
3309 "target_info_proc (\"%s\", %d)\n", args
, what
);
3319 find_default_supports_disable_randomization (void)
3321 struct target_ops
*t
;
3323 t
= find_default_run_target (NULL
);
3324 if (t
&& t
->to_supports_disable_randomization
)
3325 return (t
->to_supports_disable_randomization
) ();
3330 target_supports_disable_randomization (void)
3332 struct target_ops
*t
;
3334 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3335 if (t
->to_supports_disable_randomization
)
3336 return t
->to_supports_disable_randomization ();
3342 target_get_osdata (const char *type
)
3344 struct target_ops
*t
;
3346 /* If we're already connected to something that can get us OS
3347 related data, use it. Otherwise, try using the native
3349 if (current_target
.to_stratum
>= process_stratum
)
3350 t
= current_target
.beneath
;
3352 t
= find_default_run_target ("get OS data");
3357 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3360 /* Determine the current address space of thread PTID. */
3362 struct address_space
*
3363 target_thread_address_space (ptid_t ptid
)
3365 struct address_space
*aspace
;
3366 struct inferior
*inf
;
3367 struct target_ops
*t
;
3369 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3371 if (t
->to_thread_address_space
!= NULL
)
3373 aspace
= t
->to_thread_address_space (t
, ptid
);
3374 gdb_assert (aspace
);
3377 fprintf_unfiltered (gdb_stdlog
,
3378 "target_thread_address_space (%s) = %d\n",
3379 target_pid_to_str (ptid
),
3380 address_space_num (aspace
));
3385 /* Fall-back to the "main" address space of the inferior. */
3386 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3388 if (inf
== NULL
|| inf
->aspace
== NULL
)
3389 internal_error (__FILE__
, __LINE__
,
3390 _("Can't determine the current "
3391 "address space of thread %s\n"),
3392 target_pid_to_str (ptid
));
3398 /* Target file operations. */
3400 static struct target_ops
*
3401 default_fileio_target (void)
3403 /* If we're already connected to something that can perform
3404 file I/O, use it. Otherwise, try using the native target. */
3405 if (current_target
.to_stratum
>= process_stratum
)
3406 return current_target
.beneath
;
3408 return find_default_run_target ("file I/O");
3411 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3412 target file descriptor, or -1 if an error occurs (and set
3415 target_fileio_open (const char *filename
, int flags
, int mode
,
3418 struct target_ops
*t
;
3420 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3422 if (t
->to_fileio_open
!= NULL
)
3424 int fd
= t
->to_fileio_open (filename
, flags
, mode
, target_errno
);
3427 fprintf_unfiltered (gdb_stdlog
,
3428 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3429 filename
, flags
, mode
,
3430 fd
, fd
!= -1 ? 0 : *target_errno
);
3435 *target_errno
= FILEIO_ENOSYS
;
3439 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3440 Return the number of bytes written, or -1 if an error occurs
3441 (and set *TARGET_ERRNO). */
3443 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3444 ULONGEST offset
, int *target_errno
)
3446 struct target_ops
*t
;
3448 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3450 if (t
->to_fileio_pwrite
!= NULL
)
3452 int ret
= t
->to_fileio_pwrite (fd
, write_buf
, len
, offset
,
3456 fprintf_unfiltered (gdb_stdlog
,
3457 "target_fileio_pwrite (%d,...,%d,%s) "
3459 fd
, len
, pulongest (offset
),
3460 ret
, ret
!= -1 ? 0 : *target_errno
);
3465 *target_errno
= FILEIO_ENOSYS
;
3469 /* Read up to LEN bytes FD on the target into READ_BUF.
3470 Return the number of bytes read, or -1 if an error occurs
3471 (and set *TARGET_ERRNO). */
3473 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3474 ULONGEST offset
, int *target_errno
)
3476 struct target_ops
*t
;
3478 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3480 if (t
->to_fileio_pread
!= NULL
)
3482 int ret
= t
->to_fileio_pread (fd
, read_buf
, len
, offset
,
3486 fprintf_unfiltered (gdb_stdlog
,
3487 "target_fileio_pread (%d,...,%d,%s) "
3489 fd
, len
, pulongest (offset
),
3490 ret
, ret
!= -1 ? 0 : *target_errno
);
3495 *target_errno
= FILEIO_ENOSYS
;
3499 /* Close FD on the target. Return 0, or -1 if an error occurs
3500 (and set *TARGET_ERRNO). */
3502 target_fileio_close (int fd
, int *target_errno
)
3504 struct target_ops
*t
;
3506 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3508 if (t
->to_fileio_close
!= NULL
)
3510 int ret
= t
->to_fileio_close (fd
, target_errno
);
3513 fprintf_unfiltered (gdb_stdlog
,
3514 "target_fileio_close (%d) = %d (%d)\n",
3515 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3520 *target_errno
= FILEIO_ENOSYS
;
3524 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3525 occurs (and set *TARGET_ERRNO). */
3527 target_fileio_unlink (const char *filename
, int *target_errno
)
3529 struct target_ops
*t
;
3531 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3533 if (t
->to_fileio_unlink
!= NULL
)
3535 int ret
= t
->to_fileio_unlink (filename
, target_errno
);
3538 fprintf_unfiltered (gdb_stdlog
,
3539 "target_fileio_unlink (%s) = %d (%d)\n",
3540 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3545 *target_errno
= FILEIO_ENOSYS
;
3549 /* Read value of symbolic link FILENAME on the target. Return a
3550 null-terminated string allocated via xmalloc, or NULL if an error
3551 occurs (and set *TARGET_ERRNO). */
3553 target_fileio_readlink (const char *filename
, int *target_errno
)
3555 struct target_ops
*t
;
3557 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3559 if (t
->to_fileio_readlink
!= NULL
)
3561 char *ret
= t
->to_fileio_readlink (filename
, target_errno
);
3564 fprintf_unfiltered (gdb_stdlog
,
3565 "target_fileio_readlink (%s) = %s (%d)\n",
3566 filename
, ret
? ret
: "(nil)",
3567 ret
? 0 : *target_errno
);
3572 *target_errno
= FILEIO_ENOSYS
;
3577 target_fileio_close_cleanup (void *opaque
)
3579 int fd
= *(int *) opaque
;
3582 target_fileio_close (fd
, &target_errno
);
3585 /* Read target file FILENAME. Store the result in *BUF_P and
3586 return the size of the transferred data. PADDING additional bytes are
3587 available in *BUF_P. This is a helper function for
3588 target_fileio_read_alloc; see the declaration of that function for more
3592 target_fileio_read_alloc_1 (const char *filename
,
3593 gdb_byte
**buf_p
, int padding
)
3595 struct cleanup
*close_cleanup
;
3596 size_t buf_alloc
, buf_pos
;
3602 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3606 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3608 /* Start by reading up to 4K at a time. The target will throttle
3609 this number down if necessary. */
3611 buf
= xmalloc (buf_alloc
);
3615 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3616 buf_alloc
- buf_pos
- padding
, buf_pos
,
3620 /* An error occurred. */
3621 do_cleanups (close_cleanup
);
3627 /* Read all there was. */
3628 do_cleanups (close_cleanup
);
3638 /* If the buffer is filling up, expand it. */
3639 if (buf_alloc
< buf_pos
* 2)
3642 buf
= xrealloc (buf
, buf_alloc
);
3649 /* Read target file FILENAME. Store the result in *BUF_P and return
3650 the size of the transferred data. See the declaration in "target.h"
3651 function for more information about the return value. */
3654 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3656 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3659 /* Read target file FILENAME. The result is NUL-terminated and
3660 returned as a string, allocated using xmalloc. If an error occurs
3661 or the transfer is unsupported, NULL is returned. Empty objects
3662 are returned as allocated but empty strings. A warning is issued
3663 if the result contains any embedded NUL bytes. */
3666 target_fileio_read_stralloc (const char *filename
)
3670 LONGEST i
, transferred
;
3672 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3673 bufstr
= (char *) buffer
;
3675 if (transferred
< 0)
3678 if (transferred
== 0)
3679 return xstrdup ("");
3681 bufstr
[transferred
] = 0;
3683 /* Check for embedded NUL bytes; but allow trailing NULs. */
3684 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3687 warning (_("target file %s "
3688 "contained unexpected null characters"),
3698 default_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
3700 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3704 default_watchpoint_addr_within_range (struct target_ops
*target
,
3706 CORE_ADDR start
, int length
)
3708 return addr
>= start
&& addr
< start
+ length
;
3711 static struct gdbarch
*
3712 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3714 return target_gdbarch ();
3730 return_minus_one (void)
3742 * Find the next target down the stack from the specified target.
3746 find_target_beneath (struct target_ops
*t
)
3752 /* The inferior process has died. Long live the inferior! */
3755 generic_mourn_inferior (void)
3759 ptid
= inferior_ptid
;
3760 inferior_ptid
= null_ptid
;
3762 /* Mark breakpoints uninserted in case something tries to delete a
3763 breakpoint while we delete the inferior's threads (which would
3764 fail, since the inferior is long gone). */
3765 mark_breakpoints_out ();
3767 if (!ptid_equal (ptid
, null_ptid
))
3769 int pid
= ptid_get_pid (ptid
);
3770 exit_inferior (pid
);
3773 /* Note this wipes step-resume breakpoints, so needs to be done
3774 after exit_inferior, which ends up referencing the step-resume
3775 breakpoints through clear_thread_inferior_resources. */
3776 breakpoint_init_inferior (inf_exited
);
3778 registers_changed ();
3780 reopen_exec_file ();
3781 reinit_frame_cache ();
3783 if (deprecated_detach_hook
)
3784 deprecated_detach_hook ();
3787 /* Convert a normal process ID to a string. Returns the string in a
3791 normal_pid_to_str (ptid_t ptid
)
3793 static char buf
[32];
3795 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3800 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3802 return normal_pid_to_str (ptid
);
3805 /* Error-catcher for target_find_memory_regions. */
3807 dummy_find_memory_regions (find_memory_region_ftype ignore1
, void *ignore2
)
3809 error (_("Command not implemented for this target."));
3813 /* Error-catcher for target_make_corefile_notes. */
3815 dummy_make_corefile_notes (bfd
*ignore1
, int *ignore2
)
3817 error (_("Command not implemented for this target."));
3821 /* Error-catcher for target_get_bookmark. */
3823 dummy_get_bookmark (char *ignore1
, int ignore2
)
3829 /* Error-catcher for target_goto_bookmark. */
3831 dummy_goto_bookmark (gdb_byte
*ignore
, int from_tty
)
3836 /* Set up the handful of non-empty slots needed by the dummy target
3840 init_dummy_target (void)
3842 dummy_target
.to_shortname
= "None";
3843 dummy_target
.to_longname
= "None";
3844 dummy_target
.to_doc
= "";
3845 dummy_target
.to_attach
= find_default_attach
;
3846 dummy_target
.to_detach
=
3847 (void (*)(struct target_ops
*, const char *, int))target_ignore
;
3848 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3849 dummy_target
.to_can_async_p
= find_default_can_async_p
;
3850 dummy_target
.to_is_async_p
= find_default_is_async_p
;
3851 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3852 dummy_target
.to_supports_disable_randomization
3853 = find_default_supports_disable_randomization
;
3854 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3855 dummy_target
.to_stratum
= dummy_stratum
;
3856 dummy_target
.to_find_memory_regions
= dummy_find_memory_regions
;
3857 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3858 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3859 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3860 dummy_target
.to_xfer_partial
= default_xfer_partial
;
3861 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3862 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3863 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3864 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3865 dummy_target
.to_has_execution
3866 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3867 dummy_target
.to_stopped_by_watchpoint
= return_zero
;
3868 dummy_target
.to_stopped_data_address
=
3869 (int (*) (struct target_ops
*, CORE_ADDR
*)) return_zero
;
3870 dummy_target
.to_magic
= OPS_MAGIC
;
3874 debug_to_open (char *args
, int from_tty
)
3876 debug_target
.to_open (args
, from_tty
);
3878 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3882 target_close (struct target_ops
*targ
)
3884 gdb_assert (!target_is_pushed (targ
));
3886 if (targ
->to_xclose
!= NULL
)
3887 targ
->to_xclose (targ
);
3888 else if (targ
->to_close
!= NULL
)
3892 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3896 target_attach (char *args
, int from_tty
)
3898 struct target_ops
*t
;
3900 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3902 if (t
->to_attach
!= NULL
)
3904 t
->to_attach (t
, args
, from_tty
);
3906 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3912 internal_error (__FILE__
, __LINE__
,
3913 _("could not find a target to attach"));
3917 target_thread_alive (ptid_t ptid
)
3919 struct target_ops
*t
;
3921 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3923 if (t
->to_thread_alive
!= NULL
)
3927 retval
= t
->to_thread_alive (t
, ptid
);
3929 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3930 ptid_get_pid (ptid
), retval
);
3940 target_find_new_threads (void)
3942 struct target_ops
*t
;
3944 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3946 if (t
->to_find_new_threads
!= NULL
)
3948 t
->to_find_new_threads (t
);
3950 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3958 target_stop (ptid_t ptid
)
3962 warning (_("May not interrupt or stop the target, ignoring attempt"));
3966 (*current_target
.to_stop
) (ptid
);
3970 debug_to_post_attach (int pid
)
3972 debug_target
.to_post_attach (pid
);
3974 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3977 /* Concatenate ELEM to LIST, a comma separate list, and return the
3978 result. The LIST incoming argument is released. */
3981 str_comma_list_concat_elem (char *list
, const char *elem
)
3984 return xstrdup (elem
);
3986 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3989 /* Helper for target_options_to_string. If OPT is present in
3990 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3991 Returns the new resulting string. OPT is removed from
3995 do_option (int *target_options
, char *ret
,
3996 int opt
, char *opt_str
)
3998 if ((*target_options
& opt
) != 0)
4000 ret
= str_comma_list_concat_elem (ret
, opt_str
);
4001 *target_options
&= ~opt
;
4008 target_options_to_string (int target_options
)
4012 #define DO_TARG_OPTION(OPT) \
4013 ret = do_option (&target_options, ret, OPT, #OPT)
4015 DO_TARG_OPTION (TARGET_WNOHANG
);
4017 if (target_options
!= 0)
4018 ret
= str_comma_list_concat_elem (ret
, "unknown???");
4026 debug_print_register (const char * func
,
4027 struct regcache
*regcache
, int regno
)
4029 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
4031 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
4032 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
4033 && gdbarch_register_name (gdbarch
, regno
) != NULL
4034 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
4035 fprintf_unfiltered (gdb_stdlog
, "(%s)",
4036 gdbarch_register_name (gdbarch
, regno
));
4038 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
4039 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
4041 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4042 int i
, size
= register_size (gdbarch
, regno
);
4043 gdb_byte buf
[MAX_REGISTER_SIZE
];
4045 regcache_raw_collect (regcache
, regno
, buf
);
4046 fprintf_unfiltered (gdb_stdlog
, " = ");
4047 for (i
= 0; i
< size
; i
++)
4049 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
4051 if (size
<= sizeof (LONGEST
))
4053 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
4055 fprintf_unfiltered (gdb_stdlog
, " %s %s",
4056 core_addr_to_string_nz (val
), plongest (val
));
4059 fprintf_unfiltered (gdb_stdlog
, "\n");
4063 target_fetch_registers (struct regcache
*regcache
, int regno
)
4065 struct target_ops
*t
;
4067 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4069 if (t
->to_fetch_registers
!= NULL
)
4071 t
->to_fetch_registers (t
, regcache
, regno
);
4073 debug_print_register ("target_fetch_registers", regcache
, regno
);
4080 target_store_registers (struct regcache
*regcache
, int regno
)
4082 struct target_ops
*t
;
4084 if (!may_write_registers
)
4085 error (_("Writing to registers is not allowed (regno %d)"), regno
);
4087 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4089 if (t
->to_store_registers
!= NULL
)
4091 t
->to_store_registers (t
, regcache
, regno
);
4094 debug_print_register ("target_store_registers", regcache
, regno
);
4104 target_core_of_thread (ptid_t ptid
)
4106 struct target_ops
*t
;
4108 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4110 if (t
->to_core_of_thread
!= NULL
)
4112 int retval
= t
->to_core_of_thread (t
, ptid
);
4115 fprintf_unfiltered (gdb_stdlog
,
4116 "target_core_of_thread (%d) = %d\n",
4117 ptid_get_pid (ptid
), retval
);
4126 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
4128 struct target_ops
*t
;
4130 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4132 if (t
->to_verify_memory
!= NULL
)
4134 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
4137 fprintf_unfiltered (gdb_stdlog
,
4138 "target_verify_memory (%s, %s) = %d\n",
4139 paddress (target_gdbarch (), memaddr
),
4149 /* The documentation for this function is in its prototype declaration in
4153 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4155 struct target_ops
*t
;
4157 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4158 if (t
->to_insert_mask_watchpoint
!= NULL
)
4162 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
4165 fprintf_unfiltered (gdb_stdlog
, "\
4166 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4167 core_addr_to_string (addr
),
4168 core_addr_to_string (mask
), rw
, ret
);
4176 /* The documentation for this function is in its prototype declaration in
4180 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4182 struct target_ops
*t
;
4184 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4185 if (t
->to_remove_mask_watchpoint
!= NULL
)
4189 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
4192 fprintf_unfiltered (gdb_stdlog
, "\
4193 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4194 core_addr_to_string (addr
),
4195 core_addr_to_string (mask
), rw
, ret
);
4203 /* The documentation for this function is in its prototype declaration
4207 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4209 struct target_ops
*t
;
4211 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4212 if (t
->to_masked_watch_num_registers
!= NULL
)
4213 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
4218 /* The documentation for this function is in its prototype declaration
4222 target_ranged_break_num_registers (void)
4224 struct target_ops
*t
;
4226 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4227 if (t
->to_ranged_break_num_registers
!= NULL
)
4228 return t
->to_ranged_break_num_registers (t
);
4236 target_supports_btrace (void)
4238 struct target_ops
*t
;
4240 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4241 if (t
->to_supports_btrace
!= NULL
)
4242 return t
->to_supports_btrace ();
4249 struct btrace_target_info
*
4250 target_enable_btrace (ptid_t ptid
)
4252 struct target_ops
*t
;
4254 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4255 if (t
->to_enable_btrace
!= NULL
)
4256 return t
->to_enable_btrace (ptid
);
4265 target_disable_btrace (struct btrace_target_info
*btinfo
)
4267 struct target_ops
*t
;
4269 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4270 if (t
->to_disable_btrace
!= NULL
)
4272 t
->to_disable_btrace (btinfo
);
4282 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4284 struct target_ops
*t
;
4286 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4287 if (t
->to_teardown_btrace
!= NULL
)
4289 t
->to_teardown_btrace (btinfo
);
4299 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4300 struct btrace_target_info
*btinfo
,
4301 enum btrace_read_type type
)
4303 struct target_ops
*t
;
4305 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4306 if (t
->to_read_btrace
!= NULL
)
4307 return t
->to_read_btrace (btrace
, btinfo
, type
);
4310 return BTRACE_ERR_NOT_SUPPORTED
;
4316 target_stop_recording (void)
4318 struct target_ops
*t
;
4320 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4321 if (t
->to_stop_recording
!= NULL
)
4323 t
->to_stop_recording ();
4327 /* This is optional. */
4333 target_info_record (void)
4335 struct target_ops
*t
;
4337 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4338 if (t
->to_info_record
!= NULL
)
4340 t
->to_info_record ();
4350 target_save_record (const char *filename
)
4352 struct target_ops
*t
;
4354 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4355 if (t
->to_save_record
!= NULL
)
4357 t
->to_save_record (filename
);
4367 target_supports_delete_record (void)
4369 struct target_ops
*t
;
4371 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4372 if (t
->to_delete_record
!= NULL
)
4381 target_delete_record (void)
4383 struct target_ops
*t
;
4385 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4386 if (t
->to_delete_record
!= NULL
)
4388 t
->to_delete_record ();
4398 target_record_is_replaying (void)
4400 struct target_ops
*t
;
4402 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4403 if (t
->to_record_is_replaying
!= NULL
)
4404 return t
->to_record_is_replaying ();
4412 target_goto_record_begin (void)
4414 struct target_ops
*t
;
4416 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4417 if (t
->to_goto_record_begin
!= NULL
)
4419 t
->to_goto_record_begin ();
4429 target_goto_record_end (void)
4431 struct target_ops
*t
;
4433 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4434 if (t
->to_goto_record_end
!= NULL
)
4436 t
->to_goto_record_end ();
4446 target_goto_record (ULONGEST insn
)
4448 struct target_ops
*t
;
4450 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4451 if (t
->to_goto_record
!= NULL
)
4453 t
->to_goto_record (insn
);
4463 target_insn_history (int size
, int flags
)
4465 struct target_ops
*t
;
4467 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4468 if (t
->to_insn_history
!= NULL
)
4470 t
->to_insn_history (size
, flags
);
4480 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4482 struct target_ops
*t
;
4484 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4485 if (t
->to_insn_history_from
!= NULL
)
4487 t
->to_insn_history_from (from
, size
, flags
);
4497 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4499 struct target_ops
*t
;
4501 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4502 if (t
->to_insn_history_range
!= NULL
)
4504 t
->to_insn_history_range (begin
, end
, flags
);
4514 target_call_history (int size
, int flags
)
4516 struct target_ops
*t
;
4518 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4519 if (t
->to_call_history
!= NULL
)
4521 t
->to_call_history (size
, flags
);
4531 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4533 struct target_ops
*t
;
4535 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4536 if (t
->to_call_history_from
!= NULL
)
4538 t
->to_call_history_from (begin
, size
, flags
);
4548 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4550 struct target_ops
*t
;
4552 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4553 if (t
->to_call_history_range
!= NULL
)
4555 t
->to_call_history_range (begin
, end
, flags
);
4563 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4565 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4567 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4572 const struct frame_unwind
*
4573 target_get_unwinder (void)
4575 struct target_ops
*t
;
4577 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4578 if (t
->to_get_unwinder
!= NULL
)
4579 return t
->to_get_unwinder
;
4586 const struct frame_unwind
*
4587 target_get_tailcall_unwinder (void)
4589 struct target_ops
*t
;
4591 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4592 if (t
->to_get_tailcall_unwinder
!= NULL
)
4593 return t
->to_get_tailcall_unwinder
;
4601 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4602 struct gdbarch
*gdbarch
)
4604 for (; ops
!= NULL
; ops
= ops
->beneath
)
4605 if (ops
->to_decr_pc_after_break
!= NULL
)
4606 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4608 return gdbarch_decr_pc_after_break (gdbarch
);
4614 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4616 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4620 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4621 int write
, struct mem_attrib
*attrib
,
4622 struct target_ops
*target
)
4626 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4629 fprintf_unfiltered (gdb_stdlog
,
4630 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4631 paddress (target_gdbarch (), memaddr
), len
,
4632 write
? "write" : "read", retval
);
4638 fputs_unfiltered (", bytes =", gdb_stdlog
);
4639 for (i
= 0; i
< retval
; i
++)
4641 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4643 if (targetdebug
< 2 && i
> 0)
4645 fprintf_unfiltered (gdb_stdlog
, " ...");
4648 fprintf_unfiltered (gdb_stdlog
, "\n");
4651 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4655 fputc_unfiltered ('\n', gdb_stdlog
);
4661 debug_to_files_info (struct target_ops
*target
)
4663 debug_target
.to_files_info (target
);
4665 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4669 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4670 struct bp_target_info
*bp_tgt
)
4674 retval
= forward_target_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4676 fprintf_unfiltered (gdb_stdlog
,
4677 "target_insert_breakpoint (%s, xxx) = %ld\n",
4678 core_addr_to_string (bp_tgt
->placed_address
),
4679 (unsigned long) retval
);
4684 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4685 struct bp_target_info
*bp_tgt
)
4689 retval
= forward_target_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4691 fprintf_unfiltered (gdb_stdlog
,
4692 "target_remove_breakpoint (%s, xxx) = %ld\n",
4693 core_addr_to_string (bp_tgt
->placed_address
),
4694 (unsigned long) retval
);
4699 debug_to_can_use_hw_breakpoint (int type
, int cnt
, int from_tty
)
4703 retval
= debug_target
.to_can_use_hw_breakpoint (type
, cnt
, from_tty
);
4705 fprintf_unfiltered (gdb_stdlog
,
4706 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4707 (unsigned long) type
,
4708 (unsigned long) cnt
,
4709 (unsigned long) from_tty
,
4710 (unsigned long) retval
);
4715 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
4719 retval
= debug_target
.to_region_ok_for_hw_watchpoint (addr
, len
);
4721 fprintf_unfiltered (gdb_stdlog
,
4722 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4723 core_addr_to_string (addr
), (unsigned long) len
,
4724 core_addr_to_string (retval
));
4729 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int rw
,
4730 struct expression
*cond
)
4734 retval
= debug_target
.to_can_accel_watchpoint_condition (addr
, len
,
4737 fprintf_unfiltered (gdb_stdlog
,
4738 "target_can_accel_watchpoint_condition "
4739 "(%s, %d, %d, %s) = %ld\n",
4740 core_addr_to_string (addr
), len
, rw
,
4741 host_address_to_string (cond
), (unsigned long) retval
);
4746 debug_to_stopped_by_watchpoint (void)
4750 retval
= debug_target
.to_stopped_by_watchpoint ();
4752 fprintf_unfiltered (gdb_stdlog
,
4753 "target_stopped_by_watchpoint () = %ld\n",
4754 (unsigned long) retval
);
4759 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4763 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4765 fprintf_unfiltered (gdb_stdlog
,
4766 "target_stopped_data_address ([%s]) = %ld\n",
4767 core_addr_to_string (*addr
),
4768 (unsigned long)retval
);
4773 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4775 CORE_ADDR start
, int length
)
4779 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4782 fprintf_filtered (gdb_stdlog
,
4783 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4784 core_addr_to_string (addr
), core_addr_to_string (start
),
4790 debug_to_insert_hw_breakpoint (struct gdbarch
*gdbarch
,
4791 struct bp_target_info
*bp_tgt
)
4795 retval
= debug_target
.to_insert_hw_breakpoint (gdbarch
, bp_tgt
);
4797 fprintf_unfiltered (gdb_stdlog
,
4798 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4799 core_addr_to_string (bp_tgt
->placed_address
),
4800 (unsigned long) retval
);
4805 debug_to_remove_hw_breakpoint (struct gdbarch
*gdbarch
,
4806 struct bp_target_info
*bp_tgt
)
4810 retval
= debug_target
.to_remove_hw_breakpoint (gdbarch
, bp_tgt
);
4812 fprintf_unfiltered (gdb_stdlog
,
4813 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4814 core_addr_to_string (bp_tgt
->placed_address
),
4815 (unsigned long) retval
);
4820 debug_to_insert_watchpoint (CORE_ADDR addr
, int len
, int type
,
4821 struct expression
*cond
)
4825 retval
= debug_target
.to_insert_watchpoint (addr
, len
, type
, cond
);
4827 fprintf_unfiltered (gdb_stdlog
,
4828 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4829 core_addr_to_string (addr
), len
, type
,
4830 host_address_to_string (cond
), (unsigned long) retval
);
4835 debug_to_remove_watchpoint (CORE_ADDR addr
, int len
, int type
,
4836 struct expression
*cond
)
4840 retval
= debug_target
.to_remove_watchpoint (addr
, len
, type
, cond
);
4842 fprintf_unfiltered (gdb_stdlog
,
4843 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4844 core_addr_to_string (addr
), len
, type
,
4845 host_address_to_string (cond
), (unsigned long) retval
);
4850 debug_to_terminal_init (void)
4852 debug_target
.to_terminal_init ();
4854 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4858 debug_to_terminal_inferior (void)
4860 debug_target
.to_terminal_inferior ();
4862 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4866 debug_to_terminal_ours_for_output (void)
4868 debug_target
.to_terminal_ours_for_output ();
4870 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4874 debug_to_terminal_ours (void)
4876 debug_target
.to_terminal_ours ();
4878 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4882 debug_to_terminal_save_ours (void)
4884 debug_target
.to_terminal_save_ours ();
4886 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4890 debug_to_terminal_info (const char *arg
, int from_tty
)
4892 debug_target
.to_terminal_info (arg
, from_tty
);
4894 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4899 debug_to_load (char *args
, int from_tty
)
4901 debug_target
.to_load (args
, from_tty
);
4903 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4907 debug_to_post_startup_inferior (ptid_t ptid
)
4909 debug_target
.to_post_startup_inferior (ptid
);
4911 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4912 ptid_get_pid (ptid
));
4916 debug_to_insert_fork_catchpoint (int pid
)
4920 retval
= debug_target
.to_insert_fork_catchpoint (pid
);
4922 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4929 debug_to_remove_fork_catchpoint (int pid
)
4933 retval
= debug_target
.to_remove_fork_catchpoint (pid
);
4935 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4942 debug_to_insert_vfork_catchpoint (int pid
)
4946 retval
= debug_target
.to_insert_vfork_catchpoint (pid
);
4948 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4955 debug_to_remove_vfork_catchpoint (int pid
)
4959 retval
= debug_target
.to_remove_vfork_catchpoint (pid
);
4961 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4968 debug_to_insert_exec_catchpoint (int pid
)
4972 retval
= debug_target
.to_insert_exec_catchpoint (pid
);
4974 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4981 debug_to_remove_exec_catchpoint (int pid
)
4985 retval
= debug_target
.to_remove_exec_catchpoint (pid
);
4987 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4994 debug_to_has_exited (int pid
, int wait_status
, int *exit_status
)
4998 has_exited
= debug_target
.to_has_exited (pid
, wait_status
, exit_status
);
5000 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
5001 pid
, wait_status
, *exit_status
, has_exited
);
5007 debug_to_can_run (void)
5011 retval
= debug_target
.to_can_run ();
5013 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
5018 static struct gdbarch
*
5019 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
5021 struct gdbarch
*retval
;
5023 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
5025 fprintf_unfiltered (gdb_stdlog
,
5026 "target_thread_architecture (%s) = %s [%s]\n",
5027 target_pid_to_str (ptid
),
5028 host_address_to_string (retval
),
5029 gdbarch_bfd_arch_info (retval
)->printable_name
);
5034 debug_to_stop (ptid_t ptid
)
5036 debug_target
.to_stop (ptid
);
5038 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
5039 target_pid_to_str (ptid
));
5043 debug_to_rcmd (char *command
,
5044 struct ui_file
*outbuf
)
5046 debug_target
.to_rcmd (command
, outbuf
);
5047 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
5051 debug_to_pid_to_exec_file (int pid
)
5055 exec_file
= debug_target
.to_pid_to_exec_file (pid
);
5057 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
5064 setup_target_debug (void)
5066 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
5068 current_target
.to_open
= debug_to_open
;
5069 current_target
.to_post_attach
= debug_to_post_attach
;
5070 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
5071 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
5072 current_target
.to_files_info
= debug_to_files_info
;
5073 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
5074 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
5075 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
5076 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
5077 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
5078 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
5079 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
5080 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
5081 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
5082 current_target
.to_watchpoint_addr_within_range
5083 = debug_to_watchpoint_addr_within_range
;
5084 current_target
.to_region_ok_for_hw_watchpoint
5085 = debug_to_region_ok_for_hw_watchpoint
;
5086 current_target
.to_can_accel_watchpoint_condition
5087 = debug_to_can_accel_watchpoint_condition
;
5088 current_target
.to_terminal_init
= debug_to_terminal_init
;
5089 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
5090 current_target
.to_terminal_ours_for_output
5091 = debug_to_terminal_ours_for_output
;
5092 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
5093 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
5094 current_target
.to_terminal_info
= debug_to_terminal_info
;
5095 current_target
.to_load
= debug_to_load
;
5096 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
5097 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
5098 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
5099 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
5100 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
5101 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
5102 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
5103 current_target
.to_has_exited
= debug_to_has_exited
;
5104 current_target
.to_can_run
= debug_to_can_run
;
5105 current_target
.to_stop
= debug_to_stop
;
5106 current_target
.to_rcmd
= debug_to_rcmd
;
5107 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
5108 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
5112 static char targ_desc
[] =
5113 "Names of targets and files being debugged.\nShows the entire \
5114 stack of targets currently in use (including the exec-file,\n\
5115 core-file, and process, if any), as well as the symbol file name.";
5118 do_monitor_command (char *cmd
,
5121 if ((current_target
.to_rcmd
5122 == (void (*) (char *, struct ui_file
*)) tcomplain
)
5123 || (current_target
.to_rcmd
== debug_to_rcmd
5124 && (debug_target
.to_rcmd
5125 == (void (*) (char *, struct ui_file
*)) tcomplain
)))
5126 error (_("\"monitor\" command not supported by this target."));
5127 target_rcmd (cmd
, gdb_stdtarg
);
5130 /* Print the name of each layers of our target stack. */
5133 maintenance_print_target_stack (char *cmd
, int from_tty
)
5135 struct target_ops
*t
;
5137 printf_filtered (_("The current target stack is:\n"));
5139 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
5141 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
5145 /* Controls if async mode is permitted. */
5146 int target_async_permitted
= 0;
5148 /* The set command writes to this variable. If the inferior is
5149 executing, target_async_permitted is *not* updated. */
5150 static int target_async_permitted_1
= 0;
5153 set_target_async_command (char *args
, int from_tty
,
5154 struct cmd_list_element
*c
)
5156 if (have_live_inferiors ())
5158 target_async_permitted_1
= target_async_permitted
;
5159 error (_("Cannot change this setting while the inferior is running."));
5162 target_async_permitted
= target_async_permitted_1
;
5166 show_target_async_command (struct ui_file
*file
, int from_tty
,
5167 struct cmd_list_element
*c
,
5170 fprintf_filtered (file
,
5171 _("Controlling the inferior in "
5172 "asynchronous mode is %s.\n"), value
);
5175 /* Temporary copies of permission settings. */
5177 static int may_write_registers_1
= 1;
5178 static int may_write_memory_1
= 1;
5179 static int may_insert_breakpoints_1
= 1;
5180 static int may_insert_tracepoints_1
= 1;
5181 static int may_insert_fast_tracepoints_1
= 1;
5182 static int may_stop_1
= 1;
5184 /* Make the user-set values match the real values again. */
5187 update_target_permissions (void)
5189 may_write_registers_1
= may_write_registers
;
5190 may_write_memory_1
= may_write_memory
;
5191 may_insert_breakpoints_1
= may_insert_breakpoints
;
5192 may_insert_tracepoints_1
= may_insert_tracepoints
;
5193 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
5194 may_stop_1
= may_stop
;
5197 /* The one function handles (most of) the permission flags in the same
5201 set_target_permissions (char *args
, int from_tty
,
5202 struct cmd_list_element
*c
)
5204 if (target_has_execution
)
5206 update_target_permissions ();
5207 error (_("Cannot change this setting while the inferior is running."));
5210 /* Make the real values match the user-changed values. */
5211 may_write_registers
= may_write_registers_1
;
5212 may_insert_breakpoints
= may_insert_breakpoints_1
;
5213 may_insert_tracepoints
= may_insert_tracepoints_1
;
5214 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
5215 may_stop
= may_stop_1
;
5216 update_observer_mode ();
5219 /* Set memory write permission independently of observer mode. */
5222 set_write_memory_permission (char *args
, int from_tty
,
5223 struct cmd_list_element
*c
)
5225 /* Make the real values match the user-changed values. */
5226 may_write_memory
= may_write_memory_1
;
5227 update_observer_mode ();
5232 initialize_targets (void)
5234 init_dummy_target ();
5235 push_target (&dummy_target
);
5237 add_info ("target", target_info
, targ_desc
);
5238 add_info ("files", target_info
, targ_desc
);
5240 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
5241 Set target debugging."), _("\
5242 Show target debugging."), _("\
5243 When non-zero, target debugging is enabled. Higher numbers are more\n\
5244 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5248 &setdebuglist
, &showdebuglist
);
5250 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
5251 &trust_readonly
, _("\
5252 Set mode for reading from readonly sections."), _("\
5253 Show mode for reading from readonly sections."), _("\
5254 When this mode is on, memory reads from readonly sections (such as .text)\n\
5255 will be read from the object file instead of from the target. This will\n\
5256 result in significant performance improvement for remote targets."),
5258 show_trust_readonly
,
5259 &setlist
, &showlist
);
5261 add_com ("monitor", class_obscure
, do_monitor_command
,
5262 _("Send a command to the remote monitor (remote targets only)."));
5264 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5265 _("Print the name of each layer of the internal target stack."),
5266 &maintenanceprintlist
);
5268 add_setshow_boolean_cmd ("target-async", no_class
,
5269 &target_async_permitted_1
, _("\
5270 Set whether gdb controls the inferior in asynchronous mode."), _("\
5271 Show whether gdb controls the inferior in asynchronous mode."), _("\
5272 Tells gdb whether to control the inferior in asynchronous mode."),
5273 set_target_async_command
,
5274 show_target_async_command
,
5278 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5279 &may_write_registers_1
, _("\
5280 Set permission to write into registers."), _("\
5281 Show permission to write into registers."), _("\
5282 When this permission is on, GDB may write into the target's registers.\n\
5283 Otherwise, any sort of write attempt will result in an error."),
5284 set_target_permissions
, NULL
,
5285 &setlist
, &showlist
);
5287 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5288 &may_write_memory_1
, _("\
5289 Set permission to write into target memory."), _("\
5290 Show permission to write into target memory."), _("\
5291 When this permission is on, GDB may write into the target's memory.\n\
5292 Otherwise, any sort of write attempt will result in an error."),
5293 set_write_memory_permission
, NULL
,
5294 &setlist
, &showlist
);
5296 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5297 &may_insert_breakpoints_1
, _("\
5298 Set permission to insert breakpoints in the target."), _("\
5299 Show permission to insert breakpoints in the target."), _("\
5300 When this permission is on, GDB may insert breakpoints in the program.\n\
5301 Otherwise, any sort of insertion attempt will result in an error."),
5302 set_target_permissions
, NULL
,
5303 &setlist
, &showlist
);
5305 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5306 &may_insert_tracepoints_1
, _("\
5307 Set permission to insert tracepoints in the target."), _("\
5308 Show permission to insert tracepoints in the target."), _("\
5309 When this permission is on, GDB may insert tracepoints in the program.\n\
5310 Otherwise, any sort of insertion attempt will result in an error."),
5311 set_target_permissions
, NULL
,
5312 &setlist
, &showlist
);
5314 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5315 &may_insert_fast_tracepoints_1
, _("\
5316 Set permission to insert fast tracepoints in the target."), _("\
5317 Show permission to insert fast tracepoints in the target."), _("\
5318 When this permission is on, GDB may insert fast tracepoints.\n\
5319 Otherwise, any sort of insertion attempt will result in an error."),
5320 set_target_permissions
, NULL
,
5321 &setlist
, &showlist
);
5323 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5325 Set permission to interrupt or signal the target."), _("\
5326 Show permission to interrupt or signal the target."), _("\
5327 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5328 Otherwise, any attempt to interrupt or stop will be ignored."),
5329 set_target_permissions
, NULL
,
5330 &setlist
, &showlist
);