1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
35 #include "gdb_assert.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
47 static void target_info (char *, int);
49 static void default_terminal_info (char *, int);
51 static int default_watchpoint_addr_within_range (struct target_ops
*,
52 CORE_ADDR
, CORE_ADDR
, int);
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR
, int);
56 static void tcomplain (void) ATTRIBUTE_NORETURN
;
58 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
60 static int return_zero (void);
62 static int return_one (void);
64 static int return_minus_one (void);
66 void target_ignore (void);
68 static void target_command (char *, int);
70 static struct target_ops
*find_default_run_target (char *);
72 static LONGEST
default_xfer_partial (struct target_ops
*ops
,
73 enum target_object object
,
74 const char *annex
, gdb_byte
*readbuf
,
75 const gdb_byte
*writebuf
,
76 ULONGEST offset
, LONGEST len
);
78 static LONGEST
current_xfer_partial (struct target_ops
*ops
,
79 enum target_object object
,
80 const char *annex
, gdb_byte
*readbuf
,
81 const gdb_byte
*writebuf
,
82 ULONGEST offset
, LONGEST len
);
84 static LONGEST
target_xfer_partial (struct target_ops
*ops
,
85 enum target_object object
,
87 void *readbuf
, const void *writebuf
,
88 ULONGEST offset
, LONGEST len
);
90 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
93 static void init_dummy_target (void);
95 static struct target_ops debug_target
;
97 static void debug_to_open (char *, int);
99 static void debug_to_prepare_to_store (struct regcache
*);
101 static void debug_to_files_info (struct target_ops
*);
103 static int debug_to_insert_breakpoint (struct gdbarch
*,
104 struct bp_target_info
*);
106 static int debug_to_remove_breakpoint (struct gdbarch
*,
107 struct bp_target_info
*);
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
111 static int debug_to_insert_hw_breakpoint (struct gdbarch
*,
112 struct bp_target_info
*);
114 static int debug_to_remove_hw_breakpoint (struct gdbarch
*,
115 struct bp_target_info
*);
117 static int debug_to_insert_watchpoint (CORE_ADDR
, int, int,
118 struct expression
*);
120 static int debug_to_remove_watchpoint (CORE_ADDR
, int, int,
121 struct expression
*);
123 static int debug_to_stopped_by_watchpoint (void);
125 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
127 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
128 CORE_ADDR
, CORE_ADDR
, int);
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR
, int);
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR
, int, int,
133 struct expression
*);
135 static void debug_to_terminal_init (void);
137 static void debug_to_terminal_inferior (void);
139 static void debug_to_terminal_ours_for_output (void);
141 static void debug_to_terminal_save_ours (void);
143 static void debug_to_terminal_ours (void);
145 static void debug_to_terminal_info (char *, int);
147 static void debug_to_load (char *, int);
149 static int debug_to_can_run (void);
151 static void debug_to_stop (ptid_t
);
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
156 struct target_ops
**target_structs
;
157 unsigned target_struct_size
;
158 unsigned target_struct_index
;
159 unsigned target_struct_allocsize
;
160 #define DEFAULT_ALLOCSIZE 10
162 /* The initial current target, so that there is always a semi-valid
165 static struct target_ops dummy_target
;
167 /* Top of target stack. */
169 static struct target_ops
*target_stack
;
171 /* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
174 struct target_ops current_target
;
176 /* Command list for target. */
178 static struct cmd_list_element
*targetlist
= NULL
;
180 /* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
183 static int trust_readonly
= 0;
185 /* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
188 static int show_memory_breakpoints
= 0;
190 /* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
194 int may_write_registers
= 1;
196 int may_write_memory
= 1;
198 int may_insert_breakpoints
= 1;
200 int may_insert_tracepoints
= 1;
202 int may_insert_fast_tracepoints
= 1;
206 /* Non-zero if we want to see trace of target level stuff. */
208 static int targetdebug
= 0;
210 show_targetdebug (struct ui_file
*file
, int from_tty
,
211 struct cmd_list_element
*c
, const char *value
)
213 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
216 static void setup_target_debug (void);
218 /* The option sets this. */
219 static int stack_cache_enabled_p_1
= 1;
220 /* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223 static int stack_cache_enabled_p
= 1;
225 /* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
231 set_stack_cache_enabled_p (char *args
, int from_tty
,
232 struct cmd_list_element
*c
)
234 if (stack_cache_enabled_p
!= stack_cache_enabled_p_1
)
235 target_dcache_invalidate ();
237 stack_cache_enabled_p
= stack_cache_enabled_p_1
;
241 show_stack_cache_enabled_p (struct ui_file
*file
, int from_tty
,
242 struct cmd_list_element
*c
, const char *value
)
244 fprintf_filtered (file
, _("Cache use for stack accesses is %s.\n"), value
);
247 /* Cache of memory operations, to speed up remote access. */
248 static DCACHE
*target_dcache
;
250 /* Invalidate the target dcache. */
253 target_dcache_invalidate (void)
255 dcache_invalidate (target_dcache
);
258 /* The user just typed 'target' without the name of a target. */
261 target_command (char *arg
, int from_tty
)
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
267 /* Default target_has_* methods for process_stratum targets. */
270 default_child_has_all_memory (struct target_ops
*ops
)
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid
, null_ptid
))
280 default_child_has_memory (struct target_ops
*ops
)
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid
, null_ptid
))
290 default_child_has_stack (struct target_ops
*ops
)
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid
, null_ptid
))
300 default_child_has_registers (struct target_ops
*ops
)
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid
, null_ptid
))
310 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
312 /* If there's no thread selected, then we can't make it run through
314 if (ptid_equal (the_ptid
, null_ptid
))
322 target_has_all_memory_1 (void)
324 struct target_ops
*t
;
326 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
327 if (t
->to_has_all_memory (t
))
334 target_has_memory_1 (void)
336 struct target_ops
*t
;
338 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
339 if (t
->to_has_memory (t
))
346 target_has_stack_1 (void)
348 struct target_ops
*t
;
350 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
351 if (t
->to_has_stack (t
))
358 target_has_registers_1 (void)
360 struct target_ops
*t
;
362 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
363 if (t
->to_has_registers (t
))
370 target_has_execution_1 (ptid_t the_ptid
)
372 struct target_ops
*t
;
374 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
375 if (t
->to_has_execution (t
, the_ptid
))
382 target_has_execution_current (void)
384 return target_has_execution_1 (inferior_ptid
);
387 /* Add a possible target architecture to the list. */
390 add_target (struct target_ops
*t
)
392 /* Provide default values for all "must have" methods. */
393 if (t
->to_xfer_partial
== NULL
)
394 t
->to_xfer_partial
= default_xfer_partial
;
396 if (t
->to_has_all_memory
== NULL
)
397 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
399 if (t
->to_has_memory
== NULL
)
400 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
402 if (t
->to_has_stack
== NULL
)
403 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
405 if (t
->to_has_registers
== NULL
)
406 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
408 if (t
->to_has_execution
== NULL
)
409 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
413 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
414 target_structs
= (struct target_ops
**) xmalloc
415 (target_struct_allocsize
* sizeof (*target_structs
));
417 if (target_struct_size
>= target_struct_allocsize
)
419 target_struct_allocsize
*= 2;
420 target_structs
= (struct target_ops
**)
421 xrealloc ((char *) target_structs
,
422 target_struct_allocsize
* sizeof (*target_structs
));
424 target_structs
[target_struct_size
++] = t
;
426 if (targetlist
== NULL
)
427 add_prefix_cmd ("target", class_run
, target_command
, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist
, "target ", 0, &cmdlist
);
434 add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
447 struct target_ops
*t
;
449 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
450 if (t
->to_kill
!= NULL
)
453 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
463 target_load (char *arg
, int from_tty
)
465 target_dcache_invalidate ();
466 (*current_target
.to_load
) (arg
, from_tty
);
470 target_create_inferior (char *exec_file
, char *args
,
471 char **env
, int from_tty
)
473 struct target_ops
*t
;
475 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
477 if (t
->to_create_inferior
!= NULL
)
479 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
481 fprintf_unfiltered (gdb_stdlog
,
482 "target_create_inferior (%s, %s, xxx, %d)\n",
483 exec_file
, args
, from_tty
);
488 internal_error (__FILE__
, __LINE__
,
489 _("could not find a target to create inferior"));
493 target_terminal_inferior (void)
495 /* A background resume (``run&'') should leave GDB in control of the
496 terminal. Use target_can_async_p, not target_is_async_p, since at
497 this point the target is not async yet. However, if sync_execution
498 is not set, we know it will become async prior to resume. */
499 if (target_can_async_p () && !sync_execution
)
502 /* If GDB is resuming the inferior in the foreground, install
503 inferior's terminal modes. */
504 (*current_target
.to_terminal_inferior
) ();
508 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
509 struct target_ops
*t
)
511 errno
= EIO
; /* Can't read/write this location. */
512 return 0; /* No bytes handled. */
518 error (_("You can't do that when your target is `%s'"),
519 current_target
.to_shortname
);
525 error (_("You can't do that without a process to debug."));
529 default_terminal_info (char *args
, int from_tty
)
531 printf_unfiltered (_("No saved terminal information.\n"));
534 /* A default implementation for the to_get_ada_task_ptid target method.
536 This function builds the PTID by using both LWP and TID as part of
537 the PTID lwp and tid elements. The pid used is the pid of the
541 default_get_ada_task_ptid (long lwp
, long tid
)
543 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
546 static enum exec_direction_kind
547 default_execution_direction (void)
549 if (!target_can_execute_reverse
)
551 else if (!target_can_async_p ())
554 gdb_assert_not_reached ("\
555 to_execution_direction must be implemented for reverse async");
558 /* Go through the target stack from top to bottom, copying over zero
559 entries in current_target, then filling in still empty entries. In
560 effect, we are doing class inheritance through the pushed target
563 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
564 is currently implemented, is that it discards any knowledge of
565 which target an inherited method originally belonged to.
566 Consequently, new new target methods should instead explicitly and
567 locally search the target stack for the target that can handle the
571 update_current_target (void)
573 struct target_ops
*t
;
575 /* First, reset current's contents. */
576 memset (¤t_target
, 0, sizeof (current_target
));
578 #define INHERIT(FIELD, TARGET) \
579 if (!current_target.FIELD) \
580 current_target.FIELD = (TARGET)->FIELD
582 for (t
= target_stack
; t
; t
= t
->beneath
)
584 INHERIT (to_shortname
, t
);
585 INHERIT (to_longname
, t
);
587 /* Do not inherit to_open. */
588 /* Do not inherit to_close. */
589 /* Do not inherit to_attach. */
590 INHERIT (to_post_attach
, t
);
591 INHERIT (to_attach_no_wait
, t
);
592 /* Do not inherit to_detach. */
593 /* Do not inherit to_disconnect. */
594 /* Do not inherit to_resume. */
595 /* Do not inherit to_wait. */
596 /* Do not inherit to_fetch_registers. */
597 /* Do not inherit to_store_registers. */
598 INHERIT (to_prepare_to_store
, t
);
599 INHERIT (deprecated_xfer_memory
, t
);
600 INHERIT (to_files_info
, t
);
601 INHERIT (to_insert_breakpoint
, t
);
602 INHERIT (to_remove_breakpoint
, t
);
603 INHERIT (to_can_use_hw_breakpoint
, t
);
604 INHERIT (to_insert_hw_breakpoint
, t
);
605 INHERIT (to_remove_hw_breakpoint
, t
);
606 /* Do not inherit to_ranged_break_num_registers. */
607 INHERIT (to_insert_watchpoint
, t
);
608 INHERIT (to_remove_watchpoint
, t
);
609 /* Do not inherit to_insert_mask_watchpoint. */
610 /* Do not inherit to_remove_mask_watchpoint. */
611 INHERIT (to_stopped_data_address
, t
);
612 INHERIT (to_have_steppable_watchpoint
, t
);
613 INHERIT (to_have_continuable_watchpoint
, t
);
614 INHERIT (to_stopped_by_watchpoint
, t
);
615 INHERIT (to_watchpoint_addr_within_range
, t
);
616 INHERIT (to_region_ok_for_hw_watchpoint
, t
);
617 INHERIT (to_can_accel_watchpoint_condition
, t
);
618 /* Do not inherit to_masked_watch_num_registers. */
619 INHERIT (to_terminal_init
, t
);
620 INHERIT (to_terminal_inferior
, t
);
621 INHERIT (to_terminal_ours_for_output
, t
);
622 INHERIT (to_terminal_ours
, t
);
623 INHERIT (to_terminal_save_ours
, t
);
624 INHERIT (to_terminal_info
, t
);
625 /* Do not inherit to_kill. */
626 INHERIT (to_load
, t
);
627 /* Do no inherit to_create_inferior. */
628 INHERIT (to_post_startup_inferior
, t
);
629 INHERIT (to_insert_fork_catchpoint
, t
);
630 INHERIT (to_remove_fork_catchpoint
, t
);
631 INHERIT (to_insert_vfork_catchpoint
, t
);
632 INHERIT (to_remove_vfork_catchpoint
, t
);
633 /* Do not inherit to_follow_fork. */
634 INHERIT (to_insert_exec_catchpoint
, t
);
635 INHERIT (to_remove_exec_catchpoint
, t
);
636 INHERIT (to_set_syscall_catchpoint
, t
);
637 INHERIT (to_has_exited
, t
);
638 /* Do not inherit to_mourn_inferior. */
639 INHERIT (to_can_run
, t
);
640 /* Do not inherit to_pass_signals. */
641 /* Do not inherit to_program_signals. */
642 /* Do not inherit to_thread_alive. */
643 /* Do not inherit to_find_new_threads. */
644 /* Do not inherit to_pid_to_str. */
645 INHERIT (to_extra_thread_info
, t
);
646 INHERIT (to_thread_name
, t
);
647 INHERIT (to_stop
, t
);
648 /* Do not inherit to_xfer_partial. */
649 INHERIT (to_rcmd
, t
);
650 INHERIT (to_pid_to_exec_file
, t
);
651 INHERIT (to_log_command
, t
);
652 INHERIT (to_stratum
, t
);
653 /* Do not inherit to_has_all_memory. */
654 /* Do not inherit to_has_memory. */
655 /* Do not inherit to_has_stack. */
656 /* Do not inherit to_has_registers. */
657 /* Do not inherit to_has_execution. */
658 INHERIT (to_has_thread_control
, t
);
659 INHERIT (to_can_async_p
, t
);
660 INHERIT (to_is_async_p
, t
);
661 INHERIT (to_async
, t
);
662 INHERIT (to_find_memory_regions
, t
);
663 INHERIT (to_make_corefile_notes
, t
);
664 INHERIT (to_get_bookmark
, t
);
665 INHERIT (to_goto_bookmark
, t
);
666 /* Do not inherit to_get_thread_local_address. */
667 INHERIT (to_can_execute_reverse
, t
);
668 INHERIT (to_execution_direction
, t
);
669 INHERIT (to_thread_architecture
, t
);
670 /* Do not inherit to_read_description. */
671 INHERIT (to_get_ada_task_ptid
, t
);
672 /* Do not inherit to_search_memory. */
673 INHERIT (to_supports_multi_process
, t
);
674 INHERIT (to_supports_enable_disable_tracepoint
, t
);
675 INHERIT (to_supports_string_tracing
, t
);
676 INHERIT (to_trace_init
, t
);
677 INHERIT (to_download_tracepoint
, t
);
678 INHERIT (to_can_download_tracepoint
, t
);
679 INHERIT (to_download_trace_state_variable
, t
);
680 INHERIT (to_enable_tracepoint
, t
);
681 INHERIT (to_disable_tracepoint
, t
);
682 INHERIT (to_trace_set_readonly_regions
, t
);
683 INHERIT (to_trace_start
, t
);
684 INHERIT (to_get_trace_status
, t
);
685 INHERIT (to_get_tracepoint_status
, t
);
686 INHERIT (to_trace_stop
, t
);
687 INHERIT (to_trace_find
, t
);
688 INHERIT (to_get_trace_state_variable_value
, t
);
689 INHERIT (to_save_trace_data
, t
);
690 INHERIT (to_upload_tracepoints
, t
);
691 INHERIT (to_upload_trace_state_variables
, t
);
692 INHERIT (to_get_raw_trace_data
, t
);
693 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
694 INHERIT (to_set_disconnected_tracing
, t
);
695 INHERIT (to_set_circular_trace_buffer
, t
);
696 INHERIT (to_set_trace_notes
, t
);
697 INHERIT (to_get_tib_address
, t
);
698 INHERIT (to_set_permissions
, t
);
699 INHERIT (to_static_tracepoint_marker_at
, t
);
700 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
701 INHERIT (to_traceframe_info
, t
);
702 INHERIT (to_use_agent
, t
);
703 INHERIT (to_can_use_agent
, t
);
704 INHERIT (to_magic
, t
);
705 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
706 INHERIT (to_can_run_breakpoint_commands
, t
);
707 /* Do not inherit to_memory_map. */
708 /* Do not inherit to_flash_erase. */
709 /* Do not inherit to_flash_done. */
713 /* Clean up a target struct so it no longer has any zero pointers in
714 it. Some entries are defaulted to a method that print an error,
715 others are hard-wired to a standard recursive default. */
717 #define de_fault(field, value) \
718 if (!current_target.field) \
719 current_target.field = value
722 (void (*) (char *, int))
727 de_fault (to_post_attach
,
730 de_fault (to_prepare_to_store
,
731 (void (*) (struct regcache
*))
733 de_fault (deprecated_xfer_memory
,
734 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
735 struct mem_attrib
*, struct target_ops
*))
737 de_fault (to_files_info
,
738 (void (*) (struct target_ops
*))
740 de_fault (to_insert_breakpoint
,
741 memory_insert_breakpoint
);
742 de_fault (to_remove_breakpoint
,
743 memory_remove_breakpoint
);
744 de_fault (to_can_use_hw_breakpoint
,
745 (int (*) (int, int, int))
747 de_fault (to_insert_hw_breakpoint
,
748 (int (*) (struct gdbarch
*, struct bp_target_info
*))
750 de_fault (to_remove_hw_breakpoint
,
751 (int (*) (struct gdbarch
*, struct bp_target_info
*))
753 de_fault (to_insert_watchpoint
,
754 (int (*) (CORE_ADDR
, int, int, struct expression
*))
756 de_fault (to_remove_watchpoint
,
757 (int (*) (CORE_ADDR
, int, int, struct expression
*))
759 de_fault (to_stopped_by_watchpoint
,
762 de_fault (to_stopped_data_address
,
763 (int (*) (struct target_ops
*, CORE_ADDR
*))
765 de_fault (to_watchpoint_addr_within_range
,
766 default_watchpoint_addr_within_range
);
767 de_fault (to_region_ok_for_hw_watchpoint
,
768 default_region_ok_for_hw_watchpoint
);
769 de_fault (to_can_accel_watchpoint_condition
,
770 (int (*) (CORE_ADDR
, int, int, struct expression
*))
772 de_fault (to_terminal_init
,
775 de_fault (to_terminal_inferior
,
778 de_fault (to_terminal_ours_for_output
,
781 de_fault (to_terminal_ours
,
784 de_fault (to_terminal_save_ours
,
787 de_fault (to_terminal_info
,
788 default_terminal_info
);
790 (void (*) (char *, int))
792 de_fault (to_post_startup_inferior
,
795 de_fault (to_insert_fork_catchpoint
,
798 de_fault (to_remove_fork_catchpoint
,
801 de_fault (to_insert_vfork_catchpoint
,
804 de_fault (to_remove_vfork_catchpoint
,
807 de_fault (to_insert_exec_catchpoint
,
810 de_fault (to_remove_exec_catchpoint
,
813 de_fault (to_set_syscall_catchpoint
,
814 (int (*) (int, int, int, int, int *))
816 de_fault (to_has_exited
,
817 (int (*) (int, int, int *))
819 de_fault (to_can_run
,
821 de_fault (to_extra_thread_info
,
822 (char *(*) (struct thread_info
*))
824 de_fault (to_thread_name
,
825 (char *(*) (struct thread_info
*))
830 current_target
.to_xfer_partial
= current_xfer_partial
;
832 (void (*) (char *, struct ui_file
*))
834 de_fault (to_pid_to_exec_file
,
838 (void (*) (void (*) (enum inferior_event_type
, void*), void*))
840 de_fault (to_thread_architecture
,
841 default_thread_architecture
);
842 current_target
.to_read_description
= NULL
;
843 de_fault (to_get_ada_task_ptid
,
844 (ptid_t (*) (long, long))
845 default_get_ada_task_ptid
);
846 de_fault (to_supports_multi_process
,
849 de_fault (to_supports_enable_disable_tracepoint
,
852 de_fault (to_supports_string_tracing
,
855 de_fault (to_trace_init
,
858 de_fault (to_download_tracepoint
,
859 (void (*) (struct bp_location
*))
861 de_fault (to_can_download_tracepoint
,
864 de_fault (to_download_trace_state_variable
,
865 (void (*) (struct trace_state_variable
*))
867 de_fault (to_enable_tracepoint
,
868 (void (*) (struct bp_location
*))
870 de_fault (to_disable_tracepoint
,
871 (void (*) (struct bp_location
*))
873 de_fault (to_trace_set_readonly_regions
,
876 de_fault (to_trace_start
,
879 de_fault (to_get_trace_status
,
880 (int (*) (struct trace_status
*))
882 de_fault (to_get_tracepoint_status
,
883 (void (*) (struct breakpoint
*, struct uploaded_tp
*))
885 de_fault (to_trace_stop
,
888 de_fault (to_trace_find
,
889 (int (*) (enum trace_find_type
, int, ULONGEST
, ULONGEST
, int *))
891 de_fault (to_get_trace_state_variable_value
,
892 (int (*) (int, LONGEST
*))
894 de_fault (to_save_trace_data
,
895 (int (*) (const char *))
897 de_fault (to_upload_tracepoints
,
898 (int (*) (struct uploaded_tp
**))
900 de_fault (to_upload_trace_state_variables
,
901 (int (*) (struct uploaded_tsv
**))
903 de_fault (to_get_raw_trace_data
,
904 (LONGEST (*) (gdb_byte
*, ULONGEST
, LONGEST
))
906 de_fault (to_get_min_fast_tracepoint_insn_len
,
909 de_fault (to_set_disconnected_tracing
,
912 de_fault (to_set_circular_trace_buffer
,
915 de_fault (to_set_trace_notes
,
916 (int (*) (char *, char *, char *))
918 de_fault (to_get_tib_address
,
919 (int (*) (ptid_t
, CORE_ADDR
*))
921 de_fault (to_set_permissions
,
924 de_fault (to_static_tracepoint_marker_at
,
925 (int (*) (CORE_ADDR
, struct static_tracepoint_marker
*))
927 de_fault (to_static_tracepoint_markers_by_strid
,
928 (VEC(static_tracepoint_marker_p
) * (*) (const char *))
930 de_fault (to_traceframe_info
,
931 (struct traceframe_info
* (*) (void))
933 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
936 de_fault (to_can_run_breakpoint_commands
,
939 de_fault (to_use_agent
,
942 de_fault (to_can_use_agent
,
945 de_fault (to_execution_direction
, default_execution_direction
);
949 /* Finally, position the target-stack beneath the squashed
950 "current_target". That way code looking for a non-inherited
951 target method can quickly and simply find it. */
952 current_target
.beneath
= target_stack
;
955 setup_target_debug ();
958 /* Push a new target type into the stack of the existing target accessors,
959 possibly superseding some of the existing accessors.
961 Rather than allow an empty stack, we always have the dummy target at
962 the bottom stratum, so we can call the function vectors without
966 push_target (struct target_ops
*t
)
968 struct target_ops
**cur
;
970 /* Check magic number. If wrong, it probably means someone changed
971 the struct definition, but not all the places that initialize one. */
972 if (t
->to_magic
!= OPS_MAGIC
)
974 fprintf_unfiltered (gdb_stderr
,
975 "Magic number of %s target struct wrong\n",
977 internal_error (__FILE__
, __LINE__
,
978 _("failed internal consistency check"));
981 /* Find the proper stratum to install this target in. */
982 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
984 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
988 /* If there's already targets at this stratum, remove them. */
989 /* FIXME: cagney/2003-10-15: I think this should be popping all
990 targets to CUR, and not just those at this stratum level. */
991 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
993 /* There's already something at this stratum level. Close it,
994 and un-hook it from the stack. */
995 struct target_ops
*tmp
= (*cur
);
997 (*cur
) = (*cur
)->beneath
;
999 target_close (tmp
, 0);
1002 /* We have removed all targets in our stratum, now add the new one. */
1003 t
->beneath
= (*cur
);
1006 update_current_target ();
1009 /* Remove a target_ops vector from the stack, wherever it may be.
1010 Return how many times it was removed (0 or 1). */
1013 unpush_target (struct target_ops
*t
)
1015 struct target_ops
**cur
;
1016 struct target_ops
*tmp
;
1018 if (t
->to_stratum
== dummy_stratum
)
1019 internal_error (__FILE__
, __LINE__
,
1020 _("Attempt to unpush the dummy target"));
1022 /* Look for the specified target. Note that we assume that a target
1023 can only occur once in the target stack. */
1025 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1031 /* If we don't find target_ops, quit. Only open targets should be
1036 /* Unchain the target. */
1038 (*cur
) = (*cur
)->beneath
;
1039 tmp
->beneath
= NULL
;
1041 update_current_target ();
1043 /* Finally close the target. Note we do this after unchaining, so
1044 any target method calls from within the target_close
1045 implementation don't end up in T anymore. */
1046 target_close (t
, 0);
1054 target_close (target_stack
, 0); /* Let it clean up. */
1055 if (unpush_target (target_stack
) == 1)
1058 fprintf_unfiltered (gdb_stderr
,
1059 "pop_target couldn't find target %s\n",
1060 current_target
.to_shortname
);
1061 internal_error (__FILE__
, __LINE__
,
1062 _("failed internal consistency check"));
1066 pop_all_targets_above (enum strata above_stratum
, int quitting
)
1068 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
1070 target_close (target_stack
, quitting
);
1071 if (!unpush_target (target_stack
))
1073 fprintf_unfiltered (gdb_stderr
,
1074 "pop_all_targets couldn't find target %s\n",
1075 target_stack
->to_shortname
);
1076 internal_error (__FILE__
, __LINE__
,
1077 _("failed internal consistency check"));
1084 pop_all_targets (int quitting
)
1086 pop_all_targets_above (dummy_stratum
, quitting
);
1089 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1092 target_is_pushed (struct target_ops
*t
)
1094 struct target_ops
**cur
;
1096 /* Check magic number. If wrong, it probably means someone changed
1097 the struct definition, but not all the places that initialize one. */
1098 if (t
->to_magic
!= OPS_MAGIC
)
1100 fprintf_unfiltered (gdb_stderr
,
1101 "Magic number of %s target struct wrong\n",
1103 internal_error (__FILE__
, __LINE__
,
1104 _("failed internal consistency check"));
1107 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1114 /* Using the objfile specified in OBJFILE, find the address for the
1115 current thread's thread-local storage with offset OFFSET. */
1117 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1119 volatile CORE_ADDR addr
= 0;
1120 struct target_ops
*target
;
1122 for (target
= current_target
.beneath
;
1124 target
= target
->beneath
)
1126 if (target
->to_get_thread_local_address
!= NULL
)
1131 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch
))
1133 ptid_t ptid
= inferior_ptid
;
1134 volatile struct gdb_exception ex
;
1136 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1140 /* Fetch the load module address for this objfile. */
1141 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch
,
1143 /* If it's 0, throw the appropriate exception. */
1145 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1146 _("TLS load module not found"));
1148 addr
= target
->to_get_thread_local_address (target
, ptid
,
1151 /* If an error occurred, print TLS related messages here. Otherwise,
1152 throw the error to some higher catcher. */
1155 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1159 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1160 error (_("Cannot find thread-local variables "
1161 "in this thread library."));
1163 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1164 if (objfile_is_library
)
1165 error (_("Cannot find shared library `%s' in dynamic"
1166 " linker's load module list"), objfile
->name
);
1168 error (_("Cannot find executable file `%s' in dynamic"
1169 " linker's load module list"), objfile
->name
);
1171 case TLS_NOT_ALLOCATED_YET_ERROR
:
1172 if (objfile_is_library
)
1173 error (_("The inferior has not yet allocated storage for"
1174 " thread-local variables in\n"
1175 "the shared library `%s'\n"
1177 objfile
->name
, target_pid_to_str (ptid
));
1179 error (_("The inferior has not yet allocated storage for"
1180 " thread-local variables in\n"
1181 "the executable `%s'\n"
1183 objfile
->name
, target_pid_to_str (ptid
));
1185 case TLS_GENERIC_ERROR
:
1186 if (objfile_is_library
)
1187 error (_("Cannot find thread-local storage for %s, "
1188 "shared library %s:\n%s"),
1189 target_pid_to_str (ptid
),
1190 objfile
->name
, ex
.message
);
1192 error (_("Cannot find thread-local storage for %s, "
1193 "executable file %s:\n%s"),
1194 target_pid_to_str (ptid
),
1195 objfile
->name
, ex
.message
);
1198 throw_exception (ex
);
1203 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1204 TLS is an ABI-specific thing. But we don't do that yet. */
1206 error (_("Cannot find thread-local variables on this target"));
1212 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1214 /* target_read_string -- read a null terminated string, up to LEN bytes,
1215 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1216 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1217 is responsible for freeing it. Return the number of bytes successfully
1221 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1223 int tlen
, origlen
, offset
, i
;
1227 int buffer_allocated
;
1229 unsigned int nbytes_read
= 0;
1231 gdb_assert (string
);
1233 /* Small for testing. */
1234 buffer_allocated
= 4;
1235 buffer
= xmalloc (buffer_allocated
);
1242 tlen
= MIN (len
, 4 - (memaddr
& 3));
1243 offset
= memaddr
& 3;
1245 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1248 /* The transfer request might have crossed the boundary to an
1249 unallocated region of memory. Retry the transfer, requesting
1253 errcode
= target_read_memory (memaddr
, buf
, 1);
1258 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1262 bytes
= bufptr
- buffer
;
1263 buffer_allocated
*= 2;
1264 buffer
= xrealloc (buffer
, buffer_allocated
);
1265 bufptr
= buffer
+ bytes
;
1268 for (i
= 0; i
< tlen
; i
++)
1270 *bufptr
++ = buf
[i
+ offset
];
1271 if (buf
[i
+ offset
] == '\000')
1273 nbytes_read
+= i
+ 1;
1280 nbytes_read
+= tlen
;
1289 struct target_section_table
*
1290 target_get_section_table (struct target_ops
*target
)
1292 struct target_ops
*t
;
1295 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1297 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1298 if (t
->to_get_section_table
!= NULL
)
1299 return (*t
->to_get_section_table
) (t
);
1304 /* Find a section containing ADDR. */
1306 struct target_section
*
1307 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1309 struct target_section_table
*table
= target_get_section_table (target
);
1310 struct target_section
*secp
;
1315 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1317 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1323 /* Read memory from the live target, even if currently inspecting a
1324 traceframe. The return is the same as that of target_read. */
1327 target_read_live_memory (enum target_object object
,
1328 ULONGEST memaddr
, gdb_byte
*myaddr
, LONGEST len
)
1331 struct cleanup
*cleanup
;
1333 /* Switch momentarily out of tfind mode so to access live memory.
1334 Note that this must not clear global state, such as the frame
1335 cache, which must still remain valid for the previous traceframe.
1336 We may be _building_ the frame cache at this point. */
1337 cleanup
= make_cleanup_restore_traceframe_number ();
1338 set_traceframe_number (-1);
1340 ret
= target_read (current_target
.beneath
, object
, NULL
,
1341 myaddr
, memaddr
, len
);
1343 do_cleanups (cleanup
);
1347 /* Using the set of read-only target sections of OPS, read live
1348 read-only memory. Note that the actual reads start from the
1349 top-most target again.
1351 For interface/parameters/return description see target.h,
1355 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1356 enum target_object object
,
1357 gdb_byte
*readbuf
, ULONGEST memaddr
,
1360 struct target_section
*secp
;
1361 struct target_section_table
*table
;
1363 secp
= target_section_by_addr (ops
, memaddr
);
1365 && (bfd_get_section_flags (secp
->bfd
, secp
->the_bfd_section
)
1368 struct target_section
*p
;
1369 ULONGEST memend
= memaddr
+ len
;
1371 table
= target_get_section_table (ops
);
1373 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1375 if (memaddr
>= p
->addr
)
1377 if (memend
<= p
->endaddr
)
1379 /* Entire transfer is within this section. */
1380 return target_read_live_memory (object
, memaddr
,
1383 else if (memaddr
>= p
->endaddr
)
1385 /* This section ends before the transfer starts. */
1390 /* This section overlaps the transfer. Just do half. */
1391 len
= p
->endaddr
- memaddr
;
1392 return target_read_live_memory (object
, memaddr
,
1402 /* Perform a partial memory transfer.
1403 For docs see target.h, to_xfer_partial. */
1406 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1407 void *readbuf
, const void *writebuf
, ULONGEST memaddr
,
1412 struct mem_region
*region
;
1413 struct inferior
*inf
;
1415 /* For accesses to unmapped overlay sections, read directly from
1416 files. Must do this first, as MEMADDR may need adjustment. */
1417 if (readbuf
!= NULL
&& overlay_debugging
)
1419 struct obj_section
*section
= find_pc_overlay (memaddr
);
1421 if (pc_in_unmapped_range (memaddr
, section
))
1423 struct target_section_table
*table
1424 = target_get_section_table (ops
);
1425 const char *section_name
= section
->the_bfd_section
->name
;
1427 memaddr
= overlay_mapped_address (memaddr
, section
);
1428 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1431 table
->sections_end
,
1436 /* Try the executable files, if "trust-readonly-sections" is set. */
1437 if (readbuf
!= NULL
&& trust_readonly
)
1439 struct target_section
*secp
;
1440 struct target_section_table
*table
;
1442 secp
= target_section_by_addr (ops
, memaddr
);
1444 && (bfd_get_section_flags (secp
->bfd
, secp
->the_bfd_section
)
1447 table
= target_get_section_table (ops
);
1448 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1451 table
->sections_end
,
1456 /* If reading unavailable memory in the context of traceframes, and
1457 this address falls within a read-only section, fallback to
1458 reading from live memory. */
1459 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1461 VEC(mem_range_s
) *available
;
1463 /* If we fail to get the set of available memory, then the
1464 target does not support querying traceframe info, and so we
1465 attempt reading from the traceframe anyway (assuming the
1466 target implements the old QTro packet then). */
1467 if (traceframe_available_memory (&available
, memaddr
, len
))
1469 struct cleanup
*old_chain
;
1471 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1473 if (VEC_empty (mem_range_s
, available
)
1474 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1476 /* Don't read into the traceframe's available
1478 if (!VEC_empty (mem_range_s
, available
))
1480 LONGEST oldlen
= len
;
1482 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1483 gdb_assert (len
<= oldlen
);
1486 do_cleanups (old_chain
);
1488 /* This goes through the topmost target again. */
1489 res
= memory_xfer_live_readonly_partial (ops
, object
,
1490 readbuf
, memaddr
, len
);
1494 /* No use trying further, we know some memory starting
1495 at MEMADDR isn't available. */
1499 /* Don't try to read more than how much is available, in
1500 case the target implements the deprecated QTro packet to
1501 cater for older GDBs (the target's knowledge of read-only
1502 sections may be outdated by now). */
1503 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1505 do_cleanups (old_chain
);
1509 /* Try GDB's internal data cache. */
1510 region
= lookup_mem_region (memaddr
);
1511 /* region->hi == 0 means there's no upper bound. */
1512 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1515 reg_len
= region
->hi
- memaddr
;
1517 switch (region
->attrib
.mode
)
1520 if (writebuf
!= NULL
)
1525 if (readbuf
!= NULL
)
1530 /* We only support writing to flash during "load" for now. */
1531 if (writebuf
!= NULL
)
1532 error (_("Writing to flash memory forbidden in this context"));
1539 if (!ptid_equal (inferior_ptid
, null_ptid
))
1540 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1545 /* The dcache reads whole cache lines; that doesn't play well
1546 with reading from a trace buffer, because reading outside of
1547 the collected memory range fails. */
1548 && get_traceframe_number () == -1
1549 && (region
->attrib
.cache
1550 || (stack_cache_enabled_p
&& object
== TARGET_OBJECT_STACK_MEMORY
)))
1552 if (readbuf
!= NULL
)
1553 res
= dcache_xfer_memory (ops
, target_dcache
, memaddr
, readbuf
,
1556 /* FIXME drow/2006-08-09: If we're going to preserve const
1557 correctness dcache_xfer_memory should take readbuf and
1559 res
= dcache_xfer_memory (ops
, target_dcache
, memaddr
,
1568 /* If none of those methods found the memory we wanted, fall back
1569 to a target partial transfer. Normally a single call to
1570 to_xfer_partial is enough; if it doesn't recognize an object
1571 it will call the to_xfer_partial of the next target down.
1572 But for memory this won't do. Memory is the only target
1573 object which can be read from more than one valid target.
1574 A core file, for instance, could have some of memory but
1575 delegate other bits to the target below it. So, we must
1576 manually try all targets. */
1580 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1581 readbuf
, writebuf
, memaddr
, reg_len
);
1585 /* We want to continue past core files to executables, but not
1586 past a running target's memory. */
1587 if (ops
->to_has_all_memory (ops
))
1592 while (ops
!= NULL
);
1594 /* Make sure the cache gets updated no matter what - if we are writing
1595 to the stack. Even if this write is not tagged as such, we still need
1596 to update the cache. */
1601 && !region
->attrib
.cache
1602 && stack_cache_enabled_p
1603 && object
!= TARGET_OBJECT_STACK_MEMORY
)
1605 dcache_update (target_dcache
, memaddr
, (void *) writebuf
, res
);
1608 /* If we still haven't got anything, return the last error. We
1613 /* Perform a partial memory transfer. For docs see target.h,
1617 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1618 void *readbuf
, const void *writebuf
, ULONGEST memaddr
,
1623 /* Zero length requests are ok and require no work. */
1627 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1628 breakpoint insns, thus hiding out from higher layers whether
1629 there are software breakpoints inserted in the code stream. */
1630 if (readbuf
!= NULL
)
1632 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
);
1634 if (res
> 0 && !show_memory_breakpoints
)
1635 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1640 struct cleanup
*old_chain
;
1642 buf
= xmalloc (len
);
1643 old_chain
= make_cleanup (xfree
, buf
);
1644 memcpy (buf
, writebuf
, len
);
1646 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1647 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
);
1649 do_cleanups (old_chain
);
1656 restore_show_memory_breakpoints (void *arg
)
1658 show_memory_breakpoints
= (uintptr_t) arg
;
1662 make_show_memory_breakpoints_cleanup (int show
)
1664 int current
= show_memory_breakpoints
;
1666 show_memory_breakpoints
= show
;
1667 return make_cleanup (restore_show_memory_breakpoints
,
1668 (void *) (uintptr_t) current
);
1671 /* For docs see target.h, to_xfer_partial. */
1674 target_xfer_partial (struct target_ops
*ops
,
1675 enum target_object object
, const char *annex
,
1676 void *readbuf
, const void *writebuf
,
1677 ULONGEST offset
, LONGEST len
)
1681 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1683 if (writebuf
&& !may_write_memory
)
1684 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1685 core_addr_to_string_nz (offset
), plongest (len
));
1687 /* If this is a memory transfer, let the memory-specific code
1688 have a look at it instead. Memory transfers are more
1690 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
)
1691 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1692 writebuf
, offset
, len
);
1695 enum target_object raw_object
= object
;
1697 /* If this is a raw memory transfer, request the normal
1698 memory object from other layers. */
1699 if (raw_object
== TARGET_OBJECT_RAW_MEMORY
)
1700 raw_object
= TARGET_OBJECT_MEMORY
;
1702 retval
= ops
->to_xfer_partial (ops
, raw_object
, annex
, readbuf
,
1703 writebuf
, offset
, len
);
1708 const unsigned char *myaddr
= NULL
;
1710 fprintf_unfiltered (gdb_stdlog
,
1711 "%s:target_xfer_partial "
1712 "(%d, %s, %s, %s, %s, %s) = %s",
1715 (annex
? annex
: "(null)"),
1716 host_address_to_string (readbuf
),
1717 host_address_to_string (writebuf
),
1718 core_addr_to_string_nz (offset
),
1719 plongest (len
), plongest (retval
));
1725 if (retval
> 0 && myaddr
!= NULL
)
1729 fputs_unfiltered (", bytes =", gdb_stdlog
);
1730 for (i
= 0; i
< retval
; i
++)
1732 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1734 if (targetdebug
< 2 && i
> 0)
1736 fprintf_unfiltered (gdb_stdlog
, " ...");
1739 fprintf_unfiltered (gdb_stdlog
, "\n");
1742 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1746 fputc_unfiltered ('\n', gdb_stdlog
);
1751 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1752 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1753 if any error occurs.
1755 If an error occurs, no guarantee is made about the contents of the data at
1756 MYADDR. In particular, the caller should not depend upon partial reads
1757 filling the buffer with good data. There is no way for the caller to know
1758 how much good data might have been transfered anyway. Callers that can
1759 deal with partial reads should call target_read (which will retry until
1760 it makes no progress, and then return how much was transferred). */
1763 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1765 /* Dispatch to the topmost target, not the flattened current_target.
1766 Memory accesses check target->to_has_(all_)memory, and the
1767 flattened target doesn't inherit those. */
1768 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1769 myaddr
, memaddr
, len
) == len
)
1775 /* Like target_read_memory, but specify explicitly that this is a read from
1776 the target's stack. This may trigger different cache behavior. */
1779 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1781 /* Dispatch to the topmost target, not the flattened current_target.
1782 Memory accesses check target->to_has_(all_)memory, and the
1783 flattened target doesn't inherit those. */
1785 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1786 myaddr
, memaddr
, len
) == len
)
1792 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1793 Returns either 0 for success or an errno value if any error occurs.
1794 If an error occurs, no guarantee is made about how much data got written.
1795 Callers that can deal with partial writes should call target_write. */
1798 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1800 /* Dispatch to the topmost target, not the flattened current_target.
1801 Memory accesses check target->to_has_(all_)memory, and the
1802 flattened target doesn't inherit those. */
1803 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1804 myaddr
, memaddr
, len
) == len
)
1810 /* Write LEN bytes from MYADDR to target raw memory at address
1811 MEMADDR. Returns either 0 for success or an errno value if any
1812 error occurs. If an error occurs, no guarantee is made about how
1813 much data got written. Callers that can deal with partial writes
1814 should call target_write. */
1817 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1819 /* Dispatch to the topmost target, not the flattened current_target.
1820 Memory accesses check target->to_has_(all_)memory, and the
1821 flattened target doesn't inherit those. */
1822 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1823 myaddr
, memaddr
, len
) == len
)
1829 /* Fetch the target's memory map. */
1832 target_memory_map (void)
1834 VEC(mem_region_s
) *result
;
1835 struct mem_region
*last_one
, *this_one
;
1837 struct target_ops
*t
;
1840 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1842 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1843 if (t
->to_memory_map
!= NULL
)
1849 result
= t
->to_memory_map (t
);
1853 qsort (VEC_address (mem_region_s
, result
),
1854 VEC_length (mem_region_s
, result
),
1855 sizeof (struct mem_region
), mem_region_cmp
);
1857 /* Check that regions do not overlap. Simultaneously assign
1858 a numbering for the "mem" commands to use to refer to
1861 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1863 this_one
->number
= ix
;
1865 if (last_one
&& last_one
->hi
> this_one
->lo
)
1867 warning (_("Overlapping regions in memory map: ignoring"));
1868 VEC_free (mem_region_s
, result
);
1871 last_one
= this_one
;
1878 target_flash_erase (ULONGEST address
, LONGEST length
)
1880 struct target_ops
*t
;
1882 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1883 if (t
->to_flash_erase
!= NULL
)
1886 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1887 hex_string (address
), phex (length
, 0));
1888 t
->to_flash_erase (t
, address
, length
);
1896 target_flash_done (void)
1898 struct target_ops
*t
;
1900 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1901 if (t
->to_flash_done
!= NULL
)
1904 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1905 t
->to_flash_done (t
);
1913 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1914 struct cmd_list_element
*c
, const char *value
)
1916 fprintf_filtered (file
,
1917 _("Mode for reading from readonly sections is %s.\n"),
1921 /* More generic transfers. */
1924 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1925 const char *annex
, gdb_byte
*readbuf
,
1926 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
1928 if (object
== TARGET_OBJECT_MEMORY
1929 && ops
->deprecated_xfer_memory
!= NULL
)
1930 /* If available, fall back to the target's
1931 "deprecated_xfer_memory" method. */
1936 if (writebuf
!= NULL
)
1938 void *buffer
= xmalloc (len
);
1939 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1941 memcpy (buffer
, writebuf
, len
);
1942 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1943 1/*write*/, NULL
, ops
);
1944 do_cleanups (cleanup
);
1946 if (readbuf
!= NULL
)
1947 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1948 0/*read*/, NULL
, ops
);
1951 else if (xfered
== 0 && errno
== 0)
1952 /* "deprecated_xfer_memory" uses 0, cross checked against
1953 ERRNO as one indication of an error. */
1958 else if (ops
->beneath
!= NULL
)
1959 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1960 readbuf
, writebuf
, offset
, len
);
1965 /* The xfer_partial handler for the topmost target. Unlike the default,
1966 it does not need to handle memory specially; it just passes all
1967 requests down the stack. */
1970 current_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1971 const char *annex
, gdb_byte
*readbuf
,
1972 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
1974 if (ops
->beneath
!= NULL
)
1975 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1976 readbuf
, writebuf
, offset
, len
);
1981 /* Target vector read/write partial wrapper functions. */
1984 target_read_partial (struct target_ops
*ops
,
1985 enum target_object object
,
1986 const char *annex
, gdb_byte
*buf
,
1987 ULONGEST offset
, LONGEST len
)
1989 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
);
1993 target_write_partial (struct target_ops
*ops
,
1994 enum target_object object
,
1995 const char *annex
, const gdb_byte
*buf
,
1996 ULONGEST offset
, LONGEST len
)
1998 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
);
2001 /* Wrappers to perform the full transfer. */
2003 /* For docs on target_read see target.h. */
2006 target_read (struct target_ops
*ops
,
2007 enum target_object object
,
2008 const char *annex
, gdb_byte
*buf
,
2009 ULONGEST offset
, LONGEST len
)
2013 while (xfered
< len
)
2015 LONGEST xfer
= target_read_partial (ops
, object
, annex
,
2016 (gdb_byte
*) buf
+ xfered
,
2017 offset
+ xfered
, len
- xfered
);
2019 /* Call an observer, notifying them of the xfer progress? */
2030 /* Assuming that the entire [begin, end) range of memory cannot be
2031 read, try to read whatever subrange is possible to read.
2033 The function returns, in RESULT, either zero or one memory block.
2034 If there's a readable subrange at the beginning, it is completely
2035 read and returned. Any further readable subrange will not be read.
2036 Otherwise, if there's a readable subrange at the end, it will be
2037 completely read and returned. Any readable subranges before it
2038 (obviously, not starting at the beginning), will be ignored. In
2039 other cases -- either no readable subrange, or readable subrange(s)
2040 that is neither at the beginning, or end, nothing is returned.
2042 The purpose of this function is to handle a read across a boundary
2043 of accessible memory in a case when memory map is not available.
2044 The above restrictions are fine for this case, but will give
2045 incorrect results if the memory is 'patchy'. However, supporting
2046 'patchy' memory would require trying to read every single byte,
2047 and it seems unacceptable solution. Explicit memory map is
2048 recommended for this case -- and target_read_memory_robust will
2049 take care of reading multiple ranges then. */
2052 read_whatever_is_readable (struct target_ops
*ops
,
2053 ULONGEST begin
, ULONGEST end
,
2054 VEC(memory_read_result_s
) **result
)
2056 gdb_byte
*buf
= xmalloc (end
- begin
);
2057 ULONGEST current_begin
= begin
;
2058 ULONGEST current_end
= end
;
2060 memory_read_result_s r
;
2062 /* If we previously failed to read 1 byte, nothing can be done here. */
2063 if (end
- begin
<= 1)
2069 /* Check that either first or the last byte is readable, and give up
2070 if not. This heuristic is meant to permit reading accessible memory
2071 at the boundary of accessible region. */
2072 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2073 buf
, begin
, 1) == 1)
2078 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2079 buf
+ (end
-begin
) - 1, end
- 1, 1) == 1)
2090 /* Loop invariant is that the [current_begin, current_end) was previously
2091 found to be not readable as a whole.
2093 Note loop condition -- if the range has 1 byte, we can't divide the range
2094 so there's no point trying further. */
2095 while (current_end
- current_begin
> 1)
2097 ULONGEST first_half_begin
, first_half_end
;
2098 ULONGEST second_half_begin
, second_half_end
;
2100 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2104 first_half_begin
= current_begin
;
2105 first_half_end
= middle
;
2106 second_half_begin
= middle
;
2107 second_half_end
= current_end
;
2111 first_half_begin
= middle
;
2112 first_half_end
= current_end
;
2113 second_half_begin
= current_begin
;
2114 second_half_end
= middle
;
2117 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2118 buf
+ (first_half_begin
- begin
),
2120 first_half_end
- first_half_begin
);
2122 if (xfer
== first_half_end
- first_half_begin
)
2124 /* This half reads up fine. So, the error must be in the
2126 current_begin
= second_half_begin
;
2127 current_end
= second_half_end
;
2131 /* This half is not readable. Because we've tried one byte, we
2132 know some part of this half if actually redable. Go to the next
2133 iteration to divide again and try to read.
2135 We don't handle the other half, because this function only tries
2136 to read a single readable subrange. */
2137 current_begin
= first_half_begin
;
2138 current_end
= first_half_end
;
2144 /* The [begin, current_begin) range has been read. */
2146 r
.end
= current_begin
;
2151 /* The [current_end, end) range has been read. */
2152 LONGEST rlen
= end
- current_end
;
2154 r
.data
= xmalloc (rlen
);
2155 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2156 r
.begin
= current_end
;
2160 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2164 free_memory_read_result_vector (void *x
)
2166 VEC(memory_read_result_s
) *v
= x
;
2167 memory_read_result_s
*current
;
2170 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2172 xfree (current
->data
);
2174 VEC_free (memory_read_result_s
, v
);
2177 VEC(memory_read_result_s
) *
2178 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2180 VEC(memory_read_result_s
) *result
= 0;
2183 while (xfered
< len
)
2185 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2188 /* If there is no explicit region, a fake one should be created. */
2189 gdb_assert (region
);
2191 if (region
->hi
== 0)
2192 rlen
= len
- xfered
;
2194 rlen
= region
->hi
- offset
;
2196 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2198 /* Cannot read this region. Note that we can end up here only
2199 if the region is explicitly marked inaccessible, or
2200 'inaccessible-by-default' is in effect. */
2205 LONGEST to_read
= min (len
- xfered
, rlen
);
2206 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2208 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2209 (gdb_byte
*) buffer
,
2210 offset
+ xfered
, to_read
);
2211 /* Call an observer, notifying them of the xfer progress? */
2214 /* Got an error reading full chunk. See if maybe we can read
2217 read_whatever_is_readable (ops
, offset
+ xfered
,
2218 offset
+ xfered
+ to_read
, &result
);
2223 struct memory_read_result r
;
2225 r
.begin
= offset
+ xfered
;
2226 r
.end
= r
.begin
+ xfer
;
2227 VEC_safe_push (memory_read_result_s
, result
, &r
);
2237 /* An alternative to target_write with progress callbacks. */
2240 target_write_with_progress (struct target_ops
*ops
,
2241 enum target_object object
,
2242 const char *annex
, const gdb_byte
*buf
,
2243 ULONGEST offset
, LONGEST len
,
2244 void (*progress
) (ULONGEST
, void *), void *baton
)
2248 /* Give the progress callback a chance to set up. */
2250 (*progress
) (0, baton
);
2252 while (xfered
< len
)
2254 LONGEST xfer
= target_write_partial (ops
, object
, annex
,
2255 (gdb_byte
*) buf
+ xfered
,
2256 offset
+ xfered
, len
- xfered
);
2264 (*progress
) (xfer
, baton
);
2272 /* For docs on target_write see target.h. */
2275 target_write (struct target_ops
*ops
,
2276 enum target_object object
,
2277 const char *annex
, const gdb_byte
*buf
,
2278 ULONGEST offset
, LONGEST len
)
2280 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2284 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2285 the size of the transferred data. PADDING additional bytes are
2286 available in *BUF_P. This is a helper function for
2287 target_read_alloc; see the declaration of that function for more
2291 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2292 const char *annex
, gdb_byte
**buf_p
, int padding
)
2294 size_t buf_alloc
, buf_pos
;
2298 /* This function does not have a length parameter; it reads the
2299 entire OBJECT). Also, it doesn't support objects fetched partly
2300 from one target and partly from another (in a different stratum,
2301 e.g. a core file and an executable). Both reasons make it
2302 unsuitable for reading memory. */
2303 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2305 /* Start by reading up to 4K at a time. The target will throttle
2306 this number down if necessary. */
2308 buf
= xmalloc (buf_alloc
);
2312 n
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2313 buf_pos
, buf_alloc
- buf_pos
- padding
);
2316 /* An error occurred. */
2322 /* Read all there was. */
2332 /* If the buffer is filling up, expand it. */
2333 if (buf_alloc
< buf_pos
* 2)
2336 buf
= xrealloc (buf
, buf_alloc
);
2343 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2344 the size of the transferred data. See the declaration in "target.h"
2345 function for more information about the return value. */
2348 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2349 const char *annex
, gdb_byte
**buf_p
)
2351 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2354 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2355 returned as a string, allocated using xmalloc. If an error occurs
2356 or the transfer is unsupported, NULL is returned. Empty objects
2357 are returned as allocated but empty strings. A warning is issued
2358 if the result contains any embedded NUL bytes. */
2361 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2365 LONGEST i
, transferred
;
2367 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2369 if (transferred
< 0)
2372 if (transferred
== 0)
2373 return xstrdup ("");
2375 buffer
[transferred
] = 0;
2377 /* Check for embedded NUL bytes; but allow trailing NULs. */
2378 for (i
= strlen (buffer
); i
< transferred
; i
++)
2381 warning (_("target object %d, annex %s, "
2382 "contained unexpected null characters"),
2383 (int) object
, annex
? annex
: "(none)");
2387 return (char *) buffer
;
2390 /* Memory transfer methods. */
2393 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2396 /* This method is used to read from an alternate, non-current
2397 target. This read must bypass the overlay support (as symbols
2398 don't match this target), and GDB's internal cache (wrong cache
2399 for this target). */
2400 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2402 memory_error (EIO
, addr
);
2406 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2407 int len
, enum bfd_endian byte_order
)
2409 gdb_byte buf
[sizeof (ULONGEST
)];
2411 gdb_assert (len
<= sizeof (buf
));
2412 get_target_memory (ops
, addr
, buf
, len
);
2413 return extract_unsigned_integer (buf
, len
, byte_order
);
2417 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2418 struct bp_target_info
*bp_tgt
)
2420 if (!may_insert_breakpoints
)
2422 warning (_("May not insert breakpoints"));
2426 return (*current_target
.to_insert_breakpoint
) (gdbarch
, bp_tgt
);
2430 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2431 struct bp_target_info
*bp_tgt
)
2433 /* This is kind of a weird case to handle, but the permission might
2434 have been changed after breakpoints were inserted - in which case
2435 we should just take the user literally and assume that any
2436 breakpoints should be left in place. */
2437 if (!may_insert_breakpoints
)
2439 warning (_("May not remove breakpoints"));
2443 return (*current_target
.to_remove_breakpoint
) (gdbarch
, bp_tgt
);
2447 target_info (char *args
, int from_tty
)
2449 struct target_ops
*t
;
2450 int has_all_mem
= 0;
2452 if (symfile_objfile
!= NULL
)
2453 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile
->name
);
2455 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2457 if (!(*t
->to_has_memory
) (t
))
2460 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2463 printf_unfiltered (_("\tWhile running this, "
2464 "GDB does not access memory from...\n"));
2465 printf_unfiltered ("%s:\n", t
->to_longname
);
2466 (t
->to_files_info
) (t
);
2467 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2471 /* This function is called before any new inferior is created, e.g.
2472 by running a program, attaching, or connecting to a target.
2473 It cleans up any state from previous invocations which might
2474 change between runs. This is a subset of what target_preopen
2475 resets (things which might change between targets). */
2478 target_pre_inferior (int from_tty
)
2480 /* Clear out solib state. Otherwise the solib state of the previous
2481 inferior might have survived and is entirely wrong for the new
2482 target. This has been observed on GNU/Linux using glibc 2.3. How
2494 Cannot access memory at address 0xdeadbeef
2497 /* In some OSs, the shared library list is the same/global/shared
2498 across inferiors. If code is shared between processes, so are
2499 memory regions and features. */
2500 if (!gdbarch_has_global_solist (target_gdbarch
))
2502 no_shared_libraries (NULL
, from_tty
);
2504 invalidate_target_mem_regions ();
2506 target_clear_description ();
2509 agent_capability_invalidate ();
2512 /* Callback for iterate_over_inferiors. Gets rid of the given
2516 dispose_inferior (struct inferior
*inf
, void *args
)
2518 struct thread_info
*thread
;
2520 thread
= any_thread_of_process (inf
->pid
);
2523 switch_to_thread (thread
->ptid
);
2525 /* Core inferiors actually should be detached, not killed. */
2526 if (target_has_execution
)
2529 target_detach (NULL
, 0);
2535 /* This is to be called by the open routine before it does
2539 target_preopen (int from_tty
)
2543 if (have_inferiors ())
2546 || !have_live_inferiors ()
2547 || query (_("A program is being debugged already. Kill it? ")))
2548 iterate_over_inferiors (dispose_inferior
, NULL
);
2550 error (_("Program not killed."));
2553 /* Calling target_kill may remove the target from the stack. But if
2554 it doesn't (which seems like a win for UDI), remove it now. */
2555 /* Leave the exec target, though. The user may be switching from a
2556 live process to a core of the same program. */
2557 pop_all_targets_above (file_stratum
, 0);
2559 target_pre_inferior (from_tty
);
2562 /* Detach a target after doing deferred register stores. */
2565 target_detach (char *args
, int from_tty
)
2567 struct target_ops
* t
;
2569 if (gdbarch_has_global_breakpoints (target_gdbarch
))
2570 /* Don't remove global breakpoints here. They're removed on
2571 disconnection from the target. */
2574 /* If we're in breakpoints-always-inserted mode, have to remove
2575 them before detaching. */
2576 remove_breakpoints_pid (PIDGET (inferior_ptid
));
2578 prepare_for_detach ();
2580 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2582 if (t
->to_detach
!= NULL
)
2584 t
->to_detach (t
, args
, from_tty
);
2586 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2592 internal_error (__FILE__
, __LINE__
, _("could not find a target to detach"));
2596 target_disconnect (char *args
, int from_tty
)
2598 struct target_ops
*t
;
2600 /* If we're in breakpoints-always-inserted mode or if breakpoints
2601 are global across processes, we have to remove them before
2603 remove_breakpoints ();
2605 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2606 if (t
->to_disconnect
!= NULL
)
2609 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2611 t
->to_disconnect (t
, args
, from_tty
);
2619 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2621 struct target_ops
*t
;
2623 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2625 if (t
->to_wait
!= NULL
)
2627 ptid_t retval
= (*t
->to_wait
) (t
, ptid
, status
, options
);
2631 char *status_string
;
2633 status_string
= target_waitstatus_to_string (status
);
2634 fprintf_unfiltered (gdb_stdlog
,
2635 "target_wait (%d, status) = %d, %s\n",
2636 PIDGET (ptid
), PIDGET (retval
),
2638 xfree (status_string
);
2649 target_pid_to_str (ptid_t ptid
)
2651 struct target_ops
*t
;
2653 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2655 if (t
->to_pid_to_str
!= NULL
)
2656 return (*t
->to_pid_to_str
) (t
, ptid
);
2659 return normal_pid_to_str (ptid
);
2663 target_thread_name (struct thread_info
*info
)
2665 struct target_ops
*t
;
2667 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2669 if (t
->to_thread_name
!= NULL
)
2670 return (*t
->to_thread_name
) (info
);
2677 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2679 struct target_ops
*t
;
2681 target_dcache_invalidate ();
2683 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2685 if (t
->to_resume
!= NULL
)
2687 t
->to_resume (t
, ptid
, step
, signal
);
2689 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2691 step
? "step" : "continue",
2692 gdb_signal_to_name (signal
));
2694 registers_changed_ptid (ptid
);
2695 set_executing (ptid
, 1);
2696 set_running (ptid
, 1);
2697 clear_inline_frame_state (ptid
);
2706 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2708 struct target_ops
*t
;
2710 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2712 if (t
->to_pass_signals
!= NULL
)
2718 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2721 for (i
= 0; i
< numsigs
; i
++)
2722 if (pass_signals
[i
])
2723 fprintf_unfiltered (gdb_stdlog
, " %s",
2724 gdb_signal_to_name (i
));
2726 fprintf_unfiltered (gdb_stdlog
, " })\n");
2729 (*t
->to_pass_signals
) (numsigs
, pass_signals
);
2736 target_program_signals (int numsigs
, unsigned char *program_signals
)
2738 struct target_ops
*t
;
2740 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2742 if (t
->to_program_signals
!= NULL
)
2748 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2751 for (i
= 0; i
< numsigs
; i
++)
2752 if (program_signals
[i
])
2753 fprintf_unfiltered (gdb_stdlog
, " %s",
2754 gdb_signal_to_name (i
));
2756 fprintf_unfiltered (gdb_stdlog
, " })\n");
2759 (*t
->to_program_signals
) (numsigs
, program_signals
);
2765 /* Look through the list of possible targets for a target that can
2769 target_follow_fork (int follow_child
)
2771 struct target_ops
*t
;
2773 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2775 if (t
->to_follow_fork
!= NULL
)
2777 int retval
= t
->to_follow_fork (t
, follow_child
);
2780 fprintf_unfiltered (gdb_stdlog
, "target_follow_fork (%d) = %d\n",
2781 follow_child
, retval
);
2786 /* Some target returned a fork event, but did not know how to follow it. */
2787 internal_error (__FILE__
, __LINE__
,
2788 _("could not find a target to follow fork"));
2792 target_mourn_inferior (void)
2794 struct target_ops
*t
;
2796 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2798 if (t
->to_mourn_inferior
!= NULL
)
2800 t
->to_mourn_inferior (t
);
2802 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2804 /* We no longer need to keep handles on any of the object files.
2805 Make sure to release them to avoid unnecessarily locking any
2806 of them while we're not actually debugging. */
2807 bfd_cache_close_all ();
2813 internal_error (__FILE__
, __LINE__
,
2814 _("could not find a target to follow mourn inferior"));
2817 /* Look for a target which can describe architectural features, starting
2818 from TARGET. If we find one, return its description. */
2820 const struct target_desc
*
2821 target_read_description (struct target_ops
*target
)
2823 struct target_ops
*t
;
2825 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2826 if (t
->to_read_description
!= NULL
)
2828 const struct target_desc
*tdesc
;
2830 tdesc
= t
->to_read_description (t
);
2838 /* The default implementation of to_search_memory.
2839 This implements a basic search of memory, reading target memory and
2840 performing the search here (as opposed to performing the search in on the
2841 target side with, for example, gdbserver). */
2844 simple_search_memory (struct target_ops
*ops
,
2845 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2846 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2847 CORE_ADDR
*found_addrp
)
2849 /* NOTE: also defined in find.c testcase. */
2850 #define SEARCH_CHUNK_SIZE 16000
2851 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2852 /* Buffer to hold memory contents for searching. */
2853 gdb_byte
*search_buf
;
2854 unsigned search_buf_size
;
2855 struct cleanup
*old_cleanups
;
2857 search_buf_size
= chunk_size
+ pattern_len
- 1;
2859 /* No point in trying to allocate a buffer larger than the search space. */
2860 if (search_space_len
< search_buf_size
)
2861 search_buf_size
= search_space_len
;
2863 search_buf
= malloc (search_buf_size
);
2864 if (search_buf
== NULL
)
2865 error (_("Unable to allocate memory to perform the search."));
2866 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2868 /* Prime the search buffer. */
2870 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2871 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2873 warning (_("Unable to access target memory at %s, halting search."),
2874 hex_string (start_addr
));
2875 do_cleanups (old_cleanups
);
2879 /* Perform the search.
2881 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2882 When we've scanned N bytes we copy the trailing bytes to the start and
2883 read in another N bytes. */
2885 while (search_space_len
>= pattern_len
)
2887 gdb_byte
*found_ptr
;
2888 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2890 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2891 pattern
, pattern_len
);
2893 if (found_ptr
!= NULL
)
2895 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2897 *found_addrp
= found_addr
;
2898 do_cleanups (old_cleanups
);
2902 /* Not found in this chunk, skip to next chunk. */
2904 /* Don't let search_space_len wrap here, it's unsigned. */
2905 if (search_space_len
>= chunk_size
)
2906 search_space_len
-= chunk_size
;
2908 search_space_len
= 0;
2910 if (search_space_len
>= pattern_len
)
2912 unsigned keep_len
= search_buf_size
- chunk_size
;
2913 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2916 /* Copy the trailing part of the previous iteration to the front
2917 of the buffer for the next iteration. */
2918 gdb_assert (keep_len
== pattern_len
- 1);
2919 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2921 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2923 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2924 search_buf
+ keep_len
, read_addr
,
2925 nr_to_read
) != nr_to_read
)
2927 warning (_("Unable to access target "
2928 "memory at %s, halting search."),
2929 hex_string (read_addr
));
2930 do_cleanups (old_cleanups
);
2934 start_addr
+= chunk_size
;
2940 do_cleanups (old_cleanups
);
2944 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2945 sequence of bytes in PATTERN with length PATTERN_LEN.
2947 The result is 1 if found, 0 if not found, and -1 if there was an error
2948 requiring halting of the search (e.g. memory read error).
2949 If the pattern is found the address is recorded in FOUND_ADDRP. */
2952 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2953 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2954 CORE_ADDR
*found_addrp
)
2956 struct target_ops
*t
;
2959 /* We don't use INHERIT to set current_target.to_search_memory,
2960 so we have to scan the target stack and handle targetdebug
2964 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2965 hex_string (start_addr
));
2967 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2968 if (t
->to_search_memory
!= NULL
)
2973 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2974 pattern
, pattern_len
, found_addrp
);
2978 /* If a special version of to_search_memory isn't available, use the
2980 found
= simple_search_memory (current_target
.beneath
,
2981 start_addr
, search_space_len
,
2982 pattern
, pattern_len
, found_addrp
);
2986 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2991 /* Look through the currently pushed targets. If none of them will
2992 be able to restart the currently running process, issue an error
2996 target_require_runnable (void)
2998 struct target_ops
*t
;
3000 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
3002 /* If this target knows how to create a new program, then
3003 assume we will still be able to after killing the current
3004 one. Either killing and mourning will not pop T, or else
3005 find_default_run_target will find it again. */
3006 if (t
->to_create_inferior
!= NULL
)
3009 /* Do not worry about thread_stratum targets that can not
3010 create inferiors. Assume they will be pushed again if
3011 necessary, and continue to the process_stratum. */
3012 if (t
->to_stratum
== thread_stratum
3013 || t
->to_stratum
== arch_stratum
)
3016 error (_("The \"%s\" target does not support \"run\". "
3017 "Try \"help target\" or \"continue\"."),
3021 /* This function is only called if the target is running. In that
3022 case there should have been a process_stratum target and it
3023 should either know how to create inferiors, or not... */
3024 internal_error (__FILE__
, __LINE__
, _("No targets found"));
3027 /* Look through the list of possible targets for a target that can
3028 execute a run or attach command without any other data. This is
3029 used to locate the default process stratum.
3031 If DO_MESG is not NULL, the result is always valid (error() is
3032 called for errors); else, return NULL on error. */
3034 static struct target_ops
*
3035 find_default_run_target (char *do_mesg
)
3037 struct target_ops
**t
;
3038 struct target_ops
*runable
= NULL
;
3043 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
3046 if ((*t
)->to_can_run
&& target_can_run (*t
))
3056 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3065 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3067 struct target_ops
*t
;
3069 t
= find_default_run_target ("attach");
3070 (t
->to_attach
) (t
, args
, from_tty
);
3075 find_default_create_inferior (struct target_ops
*ops
,
3076 char *exec_file
, char *allargs
, char **env
,
3079 struct target_ops
*t
;
3081 t
= find_default_run_target ("run");
3082 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3087 find_default_can_async_p (void)
3089 struct target_ops
*t
;
3091 /* This may be called before the target is pushed on the stack;
3092 look for the default process stratum. If there's none, gdb isn't
3093 configured with a native debugger, and target remote isn't
3095 t
= find_default_run_target (NULL
);
3096 if (t
&& t
->to_can_async_p
)
3097 return (t
->to_can_async_p
) ();
3102 find_default_is_async_p (void)
3104 struct target_ops
*t
;
3106 /* This may be called before the target is pushed on the stack;
3107 look for the default process stratum. If there's none, gdb isn't
3108 configured with a native debugger, and target remote isn't
3110 t
= find_default_run_target (NULL
);
3111 if (t
&& t
->to_is_async_p
)
3112 return (t
->to_is_async_p
) ();
3117 find_default_supports_non_stop (void)
3119 struct target_ops
*t
;
3121 t
= find_default_run_target (NULL
);
3122 if (t
&& t
->to_supports_non_stop
)
3123 return (t
->to_supports_non_stop
) ();
3128 target_supports_non_stop (void)
3130 struct target_ops
*t
;
3132 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3133 if (t
->to_supports_non_stop
)
3134 return t
->to_supports_non_stop ();
3139 /* Implement the "info proc" command. */
3142 target_info_proc (char *args
, enum info_proc_what what
)
3144 struct target_ops
*t
;
3146 /* If we're already connected to something that can get us OS
3147 related data, use it. Otherwise, try using the native
3149 if (current_target
.to_stratum
>= process_stratum
)
3150 t
= current_target
.beneath
;
3152 t
= find_default_run_target (NULL
);
3154 for (; t
!= NULL
; t
= t
->beneath
)
3156 if (t
->to_info_proc
!= NULL
)
3158 t
->to_info_proc (t
, args
, what
);
3161 fprintf_unfiltered (gdb_stdlog
,
3162 "target_info_proc (\"%s\", %d)\n", args
, what
);
3168 error (_("Not supported on this target."));
3172 find_default_supports_disable_randomization (void)
3174 struct target_ops
*t
;
3176 t
= find_default_run_target (NULL
);
3177 if (t
&& t
->to_supports_disable_randomization
)
3178 return (t
->to_supports_disable_randomization
) ();
3183 target_supports_disable_randomization (void)
3185 struct target_ops
*t
;
3187 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3188 if (t
->to_supports_disable_randomization
)
3189 return t
->to_supports_disable_randomization ();
3195 target_get_osdata (const char *type
)
3197 struct target_ops
*t
;
3199 /* If we're already connected to something that can get us OS
3200 related data, use it. Otherwise, try using the native
3202 if (current_target
.to_stratum
>= process_stratum
)
3203 t
= current_target
.beneath
;
3205 t
= find_default_run_target ("get OS data");
3210 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3213 /* Determine the current address space of thread PTID. */
3215 struct address_space
*
3216 target_thread_address_space (ptid_t ptid
)
3218 struct address_space
*aspace
;
3219 struct inferior
*inf
;
3220 struct target_ops
*t
;
3222 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3224 if (t
->to_thread_address_space
!= NULL
)
3226 aspace
= t
->to_thread_address_space (t
, ptid
);
3227 gdb_assert (aspace
);
3230 fprintf_unfiltered (gdb_stdlog
,
3231 "target_thread_address_space (%s) = %d\n",
3232 target_pid_to_str (ptid
),
3233 address_space_num (aspace
));
3238 /* Fall-back to the "main" address space of the inferior. */
3239 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3241 if (inf
== NULL
|| inf
->aspace
== NULL
)
3242 internal_error (__FILE__
, __LINE__
,
3243 _("Can't determine the current "
3244 "address space of thread %s\n"),
3245 target_pid_to_str (ptid
));
3251 /* Target file operations. */
3253 static struct target_ops
*
3254 default_fileio_target (void)
3256 /* If we're already connected to something that can perform
3257 file I/O, use it. Otherwise, try using the native target. */
3258 if (current_target
.to_stratum
>= process_stratum
)
3259 return current_target
.beneath
;
3261 return find_default_run_target ("file I/O");
3264 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3265 target file descriptor, or -1 if an error occurs (and set
3268 target_fileio_open (const char *filename
, int flags
, int mode
,
3271 struct target_ops
*t
;
3273 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3275 if (t
->to_fileio_open
!= NULL
)
3277 int fd
= t
->to_fileio_open (filename
, flags
, mode
, target_errno
);
3280 fprintf_unfiltered (gdb_stdlog
,
3281 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3282 filename
, flags
, mode
,
3283 fd
, fd
!= -1 ? 0 : *target_errno
);
3288 *target_errno
= FILEIO_ENOSYS
;
3292 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3293 Return the number of bytes written, or -1 if an error occurs
3294 (and set *TARGET_ERRNO). */
3296 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3297 ULONGEST offset
, int *target_errno
)
3299 struct target_ops
*t
;
3301 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3303 if (t
->to_fileio_pwrite
!= NULL
)
3305 int ret
= t
->to_fileio_pwrite (fd
, write_buf
, len
, offset
,
3309 fprintf_unfiltered (gdb_stdlog
,
3310 "target_fileio_pwrite (%d,...,%d,%s) "
3312 fd
, len
, pulongest (offset
),
3313 ret
, ret
!= -1 ? 0 : *target_errno
);
3318 *target_errno
= FILEIO_ENOSYS
;
3322 /* Read up to LEN bytes FD on the target into READ_BUF.
3323 Return the number of bytes read, or -1 if an error occurs
3324 (and set *TARGET_ERRNO). */
3326 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3327 ULONGEST offset
, int *target_errno
)
3329 struct target_ops
*t
;
3331 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3333 if (t
->to_fileio_pread
!= NULL
)
3335 int ret
= t
->to_fileio_pread (fd
, read_buf
, len
, offset
,
3339 fprintf_unfiltered (gdb_stdlog
,
3340 "target_fileio_pread (%d,...,%d,%s) "
3342 fd
, len
, pulongest (offset
),
3343 ret
, ret
!= -1 ? 0 : *target_errno
);
3348 *target_errno
= FILEIO_ENOSYS
;
3352 /* Close FD on the target. Return 0, or -1 if an error occurs
3353 (and set *TARGET_ERRNO). */
3355 target_fileio_close (int fd
, int *target_errno
)
3357 struct target_ops
*t
;
3359 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3361 if (t
->to_fileio_close
!= NULL
)
3363 int ret
= t
->to_fileio_close (fd
, target_errno
);
3366 fprintf_unfiltered (gdb_stdlog
,
3367 "target_fileio_close (%d) = %d (%d)\n",
3368 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3373 *target_errno
= FILEIO_ENOSYS
;
3377 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3378 occurs (and set *TARGET_ERRNO). */
3380 target_fileio_unlink (const char *filename
, int *target_errno
)
3382 struct target_ops
*t
;
3384 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3386 if (t
->to_fileio_unlink
!= NULL
)
3388 int ret
= t
->to_fileio_unlink (filename
, target_errno
);
3391 fprintf_unfiltered (gdb_stdlog
,
3392 "target_fileio_unlink (%s) = %d (%d)\n",
3393 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3398 *target_errno
= FILEIO_ENOSYS
;
3402 /* Read value of symbolic link FILENAME on the target. Return a
3403 null-terminated string allocated via xmalloc, or NULL if an error
3404 occurs (and set *TARGET_ERRNO). */
3406 target_fileio_readlink (const char *filename
, int *target_errno
)
3408 struct target_ops
*t
;
3410 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3412 if (t
->to_fileio_readlink
!= NULL
)
3414 char *ret
= t
->to_fileio_readlink (filename
, target_errno
);
3417 fprintf_unfiltered (gdb_stdlog
,
3418 "target_fileio_readlink (%s) = %s (%d)\n",
3419 filename
, ret
? ret
: "(nil)",
3420 ret
? 0 : *target_errno
);
3425 *target_errno
= FILEIO_ENOSYS
;
3430 target_fileio_close_cleanup (void *opaque
)
3432 int fd
= *(int *) opaque
;
3435 target_fileio_close (fd
, &target_errno
);
3438 /* Read target file FILENAME. Store the result in *BUF_P and
3439 return the size of the transferred data. PADDING additional bytes are
3440 available in *BUF_P. This is a helper function for
3441 target_fileio_read_alloc; see the declaration of that function for more
3445 target_fileio_read_alloc_1 (const char *filename
,
3446 gdb_byte
**buf_p
, int padding
)
3448 struct cleanup
*close_cleanup
;
3449 size_t buf_alloc
, buf_pos
;
3455 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3459 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3461 /* Start by reading up to 4K at a time. The target will throttle
3462 this number down if necessary. */
3464 buf
= xmalloc (buf_alloc
);
3468 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3469 buf_alloc
- buf_pos
- padding
, buf_pos
,
3473 /* An error occurred. */
3474 do_cleanups (close_cleanup
);
3480 /* Read all there was. */
3481 do_cleanups (close_cleanup
);
3491 /* If the buffer is filling up, expand it. */
3492 if (buf_alloc
< buf_pos
* 2)
3495 buf
= xrealloc (buf
, buf_alloc
);
3502 /* Read target file FILENAME. Store the result in *BUF_P and return
3503 the size of the transferred data. See the declaration in "target.h"
3504 function for more information about the return value. */
3507 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3509 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3512 /* Read target file FILENAME. The result is NUL-terminated and
3513 returned as a string, allocated using xmalloc. If an error occurs
3514 or the transfer is unsupported, NULL is returned. Empty objects
3515 are returned as allocated but empty strings. A warning is issued
3516 if the result contains any embedded NUL bytes. */
3519 target_fileio_read_stralloc (const char *filename
)
3522 LONGEST i
, transferred
;
3524 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3526 if (transferred
< 0)
3529 if (transferred
== 0)
3530 return xstrdup ("");
3532 buffer
[transferred
] = 0;
3534 /* Check for embedded NUL bytes; but allow trailing NULs. */
3535 for (i
= strlen (buffer
); i
< transferred
; i
++)
3538 warning (_("target file %s "
3539 "contained unexpected null characters"),
3544 return (char *) buffer
;
3549 default_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
3551 return (len
<= gdbarch_ptr_bit (target_gdbarch
) / TARGET_CHAR_BIT
);
3555 default_watchpoint_addr_within_range (struct target_ops
*target
,
3557 CORE_ADDR start
, int length
)
3559 return addr
>= start
&& addr
< start
+ length
;
3562 static struct gdbarch
*
3563 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3565 return target_gdbarch
;
3581 return_minus_one (void)
3586 /* Find a single runnable target in the stack and return it. If for
3587 some reason there is more than one, return NULL. */
3590 find_run_target (void)
3592 struct target_ops
**t
;
3593 struct target_ops
*runable
= NULL
;
3598 for (t
= target_structs
; t
< target_structs
+ target_struct_size
; ++t
)
3600 if ((*t
)->to_can_run
&& target_can_run (*t
))
3607 return (count
== 1 ? runable
: NULL
);
3611 * Find the next target down the stack from the specified target.
3615 find_target_beneath (struct target_ops
*t
)
3621 /* The inferior process has died. Long live the inferior! */
3624 generic_mourn_inferior (void)
3628 ptid
= inferior_ptid
;
3629 inferior_ptid
= null_ptid
;
3631 /* Mark breakpoints uninserted in case something tries to delete a
3632 breakpoint while we delete the inferior's threads (which would
3633 fail, since the inferior is long gone). */
3634 mark_breakpoints_out ();
3636 if (!ptid_equal (ptid
, null_ptid
))
3638 int pid
= ptid_get_pid (ptid
);
3639 exit_inferior (pid
);
3642 /* Note this wipes step-resume breakpoints, so needs to be done
3643 after exit_inferior, which ends up referencing the step-resume
3644 breakpoints through clear_thread_inferior_resources. */
3645 breakpoint_init_inferior (inf_exited
);
3647 registers_changed ();
3649 reopen_exec_file ();
3650 reinit_frame_cache ();
3652 if (deprecated_detach_hook
)
3653 deprecated_detach_hook ();
3656 /* Convert a normal process ID to a string. Returns the string in a
3660 normal_pid_to_str (ptid_t ptid
)
3662 static char buf
[32];
3664 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3669 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3671 return normal_pid_to_str (ptid
);
3674 /* Error-catcher for target_find_memory_regions. */
3676 dummy_find_memory_regions (find_memory_region_ftype ignore1
, void *ignore2
)
3678 error (_("Command not implemented for this target."));
3682 /* Error-catcher for target_make_corefile_notes. */
3684 dummy_make_corefile_notes (bfd
*ignore1
, int *ignore2
)
3686 error (_("Command not implemented for this target."));
3690 /* Error-catcher for target_get_bookmark. */
3692 dummy_get_bookmark (char *ignore1
, int ignore2
)
3698 /* Error-catcher for target_goto_bookmark. */
3700 dummy_goto_bookmark (gdb_byte
*ignore
, int from_tty
)
3705 /* Set up the handful of non-empty slots needed by the dummy target
3709 init_dummy_target (void)
3711 dummy_target
.to_shortname
= "None";
3712 dummy_target
.to_longname
= "None";
3713 dummy_target
.to_doc
= "";
3714 dummy_target
.to_attach
= find_default_attach
;
3715 dummy_target
.to_detach
=
3716 (void (*)(struct target_ops
*, char *, int))target_ignore
;
3717 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3718 dummy_target
.to_can_async_p
= find_default_can_async_p
;
3719 dummy_target
.to_is_async_p
= find_default_is_async_p
;
3720 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3721 dummy_target
.to_supports_disable_randomization
3722 = find_default_supports_disable_randomization
;
3723 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3724 dummy_target
.to_stratum
= dummy_stratum
;
3725 dummy_target
.to_find_memory_regions
= dummy_find_memory_regions
;
3726 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3727 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3728 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3729 dummy_target
.to_xfer_partial
= default_xfer_partial
;
3730 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3731 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3732 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3733 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3734 dummy_target
.to_has_execution
3735 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3736 dummy_target
.to_stopped_by_watchpoint
= return_zero
;
3737 dummy_target
.to_stopped_data_address
=
3738 (int (*) (struct target_ops
*, CORE_ADDR
*)) return_zero
;
3739 dummy_target
.to_magic
= OPS_MAGIC
;
3743 debug_to_open (char *args
, int from_tty
)
3745 debug_target
.to_open (args
, from_tty
);
3747 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3751 target_close (struct target_ops
*targ
, int quitting
)
3753 if (targ
->to_xclose
!= NULL
)
3754 targ
->to_xclose (targ
, quitting
);
3755 else if (targ
->to_close
!= NULL
)
3756 targ
->to_close (quitting
);
3759 fprintf_unfiltered (gdb_stdlog
, "target_close (%d)\n", quitting
);
3763 target_attach (char *args
, int from_tty
)
3765 struct target_ops
*t
;
3767 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3769 if (t
->to_attach
!= NULL
)
3771 t
->to_attach (t
, args
, from_tty
);
3773 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3779 internal_error (__FILE__
, __LINE__
,
3780 _("could not find a target to attach"));
3784 target_thread_alive (ptid_t ptid
)
3786 struct target_ops
*t
;
3788 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3790 if (t
->to_thread_alive
!= NULL
)
3794 retval
= t
->to_thread_alive (t
, ptid
);
3796 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3797 PIDGET (ptid
), retval
);
3807 target_find_new_threads (void)
3809 struct target_ops
*t
;
3811 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3813 if (t
->to_find_new_threads
!= NULL
)
3815 t
->to_find_new_threads (t
);
3817 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3825 target_stop (ptid_t ptid
)
3829 warning (_("May not interrupt or stop the target, ignoring attempt"));
3833 (*current_target
.to_stop
) (ptid
);
3837 debug_to_post_attach (int pid
)
3839 debug_target
.to_post_attach (pid
);
3841 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3844 /* Return a pretty printed form of target_waitstatus.
3845 Space for the result is malloc'd, caller must free. */
3848 target_waitstatus_to_string (const struct target_waitstatus
*ws
)
3850 const char *kind_str
= "status->kind = ";
3854 case TARGET_WAITKIND_EXITED
:
3855 return xstrprintf ("%sexited, status = %d",
3856 kind_str
, ws
->value
.integer
);
3857 case TARGET_WAITKIND_STOPPED
:
3858 return xstrprintf ("%sstopped, signal = %s",
3859 kind_str
, gdb_signal_to_name (ws
->value
.sig
));
3860 case TARGET_WAITKIND_SIGNALLED
:
3861 return xstrprintf ("%ssignalled, signal = %s",
3862 kind_str
, gdb_signal_to_name (ws
->value
.sig
));
3863 case TARGET_WAITKIND_LOADED
:
3864 return xstrprintf ("%sloaded", kind_str
);
3865 case TARGET_WAITKIND_FORKED
:
3866 return xstrprintf ("%sforked", kind_str
);
3867 case TARGET_WAITKIND_VFORKED
:
3868 return xstrprintf ("%svforked", kind_str
);
3869 case TARGET_WAITKIND_EXECD
:
3870 return xstrprintf ("%sexecd", kind_str
);
3871 case TARGET_WAITKIND_SYSCALL_ENTRY
:
3872 return xstrprintf ("%sentered syscall", kind_str
);
3873 case TARGET_WAITKIND_SYSCALL_RETURN
:
3874 return xstrprintf ("%sexited syscall", kind_str
);
3875 case TARGET_WAITKIND_SPURIOUS
:
3876 return xstrprintf ("%sspurious", kind_str
);
3877 case TARGET_WAITKIND_IGNORE
:
3878 return xstrprintf ("%signore", kind_str
);
3879 case TARGET_WAITKIND_NO_HISTORY
:
3880 return xstrprintf ("%sno-history", kind_str
);
3881 case TARGET_WAITKIND_NO_RESUMED
:
3882 return xstrprintf ("%sno-resumed", kind_str
);
3884 return xstrprintf ("%sunknown???", kind_str
);
3889 debug_print_register (const char * func
,
3890 struct regcache
*regcache
, int regno
)
3892 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3894 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3895 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3896 && gdbarch_register_name (gdbarch
, regno
) != NULL
3897 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3898 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3899 gdbarch_register_name (gdbarch
, regno
));
3901 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3902 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3904 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3905 int i
, size
= register_size (gdbarch
, regno
);
3906 unsigned char buf
[MAX_REGISTER_SIZE
];
3908 regcache_raw_collect (regcache
, regno
, buf
);
3909 fprintf_unfiltered (gdb_stdlog
, " = ");
3910 for (i
= 0; i
< size
; i
++)
3912 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3914 if (size
<= sizeof (LONGEST
))
3916 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3918 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3919 core_addr_to_string_nz (val
), plongest (val
));
3922 fprintf_unfiltered (gdb_stdlog
, "\n");
3926 target_fetch_registers (struct regcache
*regcache
, int regno
)
3928 struct target_ops
*t
;
3930 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3932 if (t
->to_fetch_registers
!= NULL
)
3934 t
->to_fetch_registers (t
, regcache
, regno
);
3936 debug_print_register ("target_fetch_registers", regcache
, regno
);
3943 target_store_registers (struct regcache
*regcache
, int regno
)
3945 struct target_ops
*t
;
3947 if (!may_write_registers
)
3948 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3950 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3952 if (t
->to_store_registers
!= NULL
)
3954 t
->to_store_registers (t
, regcache
, regno
);
3957 debug_print_register ("target_store_registers", regcache
, regno
);
3967 target_core_of_thread (ptid_t ptid
)
3969 struct target_ops
*t
;
3971 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3973 if (t
->to_core_of_thread
!= NULL
)
3975 int retval
= t
->to_core_of_thread (t
, ptid
);
3978 fprintf_unfiltered (gdb_stdlog
,
3979 "target_core_of_thread (%d) = %d\n",
3980 PIDGET (ptid
), retval
);
3989 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3991 struct target_ops
*t
;
3993 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3995 if (t
->to_verify_memory
!= NULL
)
3997 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
4000 fprintf_unfiltered (gdb_stdlog
,
4001 "target_verify_memory (%s, %s) = %d\n",
4002 paddress (target_gdbarch
, memaddr
),
4012 /* The documentation for this function is in its prototype declaration in
4016 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4018 struct target_ops
*t
;
4020 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4021 if (t
->to_insert_mask_watchpoint
!= NULL
)
4025 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
4028 fprintf_unfiltered (gdb_stdlog
, "\
4029 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4030 core_addr_to_string (addr
),
4031 core_addr_to_string (mask
), rw
, ret
);
4039 /* The documentation for this function is in its prototype declaration in
4043 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4045 struct target_ops
*t
;
4047 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4048 if (t
->to_remove_mask_watchpoint
!= NULL
)
4052 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
4055 fprintf_unfiltered (gdb_stdlog
, "\
4056 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4057 core_addr_to_string (addr
),
4058 core_addr_to_string (mask
), rw
, ret
);
4066 /* The documentation for this function is in its prototype declaration
4070 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4072 struct target_ops
*t
;
4074 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4075 if (t
->to_masked_watch_num_registers
!= NULL
)
4076 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
4081 /* The documentation for this function is in its prototype declaration
4085 target_ranged_break_num_registers (void)
4087 struct target_ops
*t
;
4089 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4090 if (t
->to_ranged_break_num_registers
!= NULL
)
4091 return t
->to_ranged_break_num_registers (t
);
4097 debug_to_prepare_to_store (struct regcache
*regcache
)
4099 debug_target
.to_prepare_to_store (regcache
);
4101 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4105 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4106 int write
, struct mem_attrib
*attrib
,
4107 struct target_ops
*target
)
4111 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4114 fprintf_unfiltered (gdb_stdlog
,
4115 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4116 paddress (target_gdbarch
, memaddr
), len
,
4117 write
? "write" : "read", retval
);
4123 fputs_unfiltered (", bytes =", gdb_stdlog
);
4124 for (i
= 0; i
< retval
; i
++)
4126 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4128 if (targetdebug
< 2 && i
> 0)
4130 fprintf_unfiltered (gdb_stdlog
, " ...");
4133 fprintf_unfiltered (gdb_stdlog
, "\n");
4136 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4140 fputc_unfiltered ('\n', gdb_stdlog
);
4146 debug_to_files_info (struct target_ops
*target
)
4148 debug_target
.to_files_info (target
);
4150 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4154 debug_to_insert_breakpoint (struct gdbarch
*gdbarch
,
4155 struct bp_target_info
*bp_tgt
)
4159 retval
= debug_target
.to_insert_breakpoint (gdbarch
, bp_tgt
);
4161 fprintf_unfiltered (gdb_stdlog
,
4162 "target_insert_breakpoint (%s, xxx) = %ld\n",
4163 core_addr_to_string (bp_tgt
->placed_address
),
4164 (unsigned long) retval
);
4169 debug_to_remove_breakpoint (struct gdbarch
*gdbarch
,
4170 struct bp_target_info
*bp_tgt
)
4174 retval
= debug_target
.to_remove_breakpoint (gdbarch
, bp_tgt
);
4176 fprintf_unfiltered (gdb_stdlog
,
4177 "target_remove_breakpoint (%s, xxx) = %ld\n",
4178 core_addr_to_string (bp_tgt
->placed_address
),
4179 (unsigned long) retval
);
4184 debug_to_can_use_hw_breakpoint (int type
, int cnt
, int from_tty
)
4188 retval
= debug_target
.to_can_use_hw_breakpoint (type
, cnt
, from_tty
);
4190 fprintf_unfiltered (gdb_stdlog
,
4191 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4192 (unsigned long) type
,
4193 (unsigned long) cnt
,
4194 (unsigned long) from_tty
,
4195 (unsigned long) retval
);
4200 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
4204 retval
= debug_target
.to_region_ok_for_hw_watchpoint (addr
, len
);
4206 fprintf_unfiltered (gdb_stdlog
,
4207 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4208 core_addr_to_string (addr
), (unsigned long) len
,
4209 core_addr_to_string (retval
));
4214 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int rw
,
4215 struct expression
*cond
)
4219 retval
= debug_target
.to_can_accel_watchpoint_condition (addr
, len
,
4222 fprintf_unfiltered (gdb_stdlog
,
4223 "target_can_accel_watchpoint_condition "
4224 "(%s, %d, %d, %s) = %ld\n",
4225 core_addr_to_string (addr
), len
, rw
,
4226 host_address_to_string (cond
), (unsigned long) retval
);
4231 debug_to_stopped_by_watchpoint (void)
4235 retval
= debug_target
.to_stopped_by_watchpoint ();
4237 fprintf_unfiltered (gdb_stdlog
,
4238 "target_stopped_by_watchpoint () = %ld\n",
4239 (unsigned long) retval
);
4244 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4248 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4250 fprintf_unfiltered (gdb_stdlog
,
4251 "target_stopped_data_address ([%s]) = %ld\n",
4252 core_addr_to_string (*addr
),
4253 (unsigned long)retval
);
4258 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4260 CORE_ADDR start
, int length
)
4264 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4267 fprintf_filtered (gdb_stdlog
,
4268 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4269 core_addr_to_string (addr
), core_addr_to_string (start
),
4275 debug_to_insert_hw_breakpoint (struct gdbarch
*gdbarch
,
4276 struct bp_target_info
*bp_tgt
)
4280 retval
= debug_target
.to_insert_hw_breakpoint (gdbarch
, bp_tgt
);
4282 fprintf_unfiltered (gdb_stdlog
,
4283 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4284 core_addr_to_string (bp_tgt
->placed_address
),
4285 (unsigned long) retval
);
4290 debug_to_remove_hw_breakpoint (struct gdbarch
*gdbarch
,
4291 struct bp_target_info
*bp_tgt
)
4295 retval
= debug_target
.to_remove_hw_breakpoint (gdbarch
, bp_tgt
);
4297 fprintf_unfiltered (gdb_stdlog
,
4298 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4299 core_addr_to_string (bp_tgt
->placed_address
),
4300 (unsigned long) retval
);
4305 debug_to_insert_watchpoint (CORE_ADDR addr
, int len
, int type
,
4306 struct expression
*cond
)
4310 retval
= debug_target
.to_insert_watchpoint (addr
, len
, type
, cond
);
4312 fprintf_unfiltered (gdb_stdlog
,
4313 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4314 core_addr_to_string (addr
), len
, type
,
4315 host_address_to_string (cond
), (unsigned long) retval
);
4320 debug_to_remove_watchpoint (CORE_ADDR addr
, int len
, int type
,
4321 struct expression
*cond
)
4325 retval
= debug_target
.to_remove_watchpoint (addr
, len
, type
, cond
);
4327 fprintf_unfiltered (gdb_stdlog
,
4328 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4329 core_addr_to_string (addr
), len
, type
,
4330 host_address_to_string (cond
), (unsigned long) retval
);
4335 debug_to_terminal_init (void)
4337 debug_target
.to_terminal_init ();
4339 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4343 debug_to_terminal_inferior (void)
4345 debug_target
.to_terminal_inferior ();
4347 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4351 debug_to_terminal_ours_for_output (void)
4353 debug_target
.to_terminal_ours_for_output ();
4355 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4359 debug_to_terminal_ours (void)
4361 debug_target
.to_terminal_ours ();
4363 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4367 debug_to_terminal_save_ours (void)
4369 debug_target
.to_terminal_save_ours ();
4371 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4375 debug_to_terminal_info (char *arg
, int from_tty
)
4377 debug_target
.to_terminal_info (arg
, from_tty
);
4379 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4384 debug_to_load (char *args
, int from_tty
)
4386 debug_target
.to_load (args
, from_tty
);
4388 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4392 debug_to_post_startup_inferior (ptid_t ptid
)
4394 debug_target
.to_post_startup_inferior (ptid
);
4396 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4401 debug_to_insert_fork_catchpoint (int pid
)
4405 retval
= debug_target
.to_insert_fork_catchpoint (pid
);
4407 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4414 debug_to_remove_fork_catchpoint (int pid
)
4418 retval
= debug_target
.to_remove_fork_catchpoint (pid
);
4420 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4427 debug_to_insert_vfork_catchpoint (int pid
)
4431 retval
= debug_target
.to_insert_vfork_catchpoint (pid
);
4433 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4440 debug_to_remove_vfork_catchpoint (int pid
)
4444 retval
= debug_target
.to_remove_vfork_catchpoint (pid
);
4446 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4453 debug_to_insert_exec_catchpoint (int pid
)
4457 retval
= debug_target
.to_insert_exec_catchpoint (pid
);
4459 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4466 debug_to_remove_exec_catchpoint (int pid
)
4470 retval
= debug_target
.to_remove_exec_catchpoint (pid
);
4472 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4479 debug_to_has_exited (int pid
, int wait_status
, int *exit_status
)
4483 has_exited
= debug_target
.to_has_exited (pid
, wait_status
, exit_status
);
4485 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4486 pid
, wait_status
, *exit_status
, has_exited
);
4492 debug_to_can_run (void)
4496 retval
= debug_target
.to_can_run ();
4498 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4503 static struct gdbarch
*
4504 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4506 struct gdbarch
*retval
;
4508 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4510 fprintf_unfiltered (gdb_stdlog
,
4511 "target_thread_architecture (%s) = %s [%s]\n",
4512 target_pid_to_str (ptid
),
4513 host_address_to_string (retval
),
4514 gdbarch_bfd_arch_info (retval
)->printable_name
);
4519 debug_to_stop (ptid_t ptid
)
4521 debug_target
.to_stop (ptid
);
4523 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4524 target_pid_to_str (ptid
));
4528 debug_to_rcmd (char *command
,
4529 struct ui_file
*outbuf
)
4531 debug_target
.to_rcmd (command
, outbuf
);
4532 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4536 debug_to_pid_to_exec_file (int pid
)
4540 exec_file
= debug_target
.to_pid_to_exec_file (pid
);
4542 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4549 setup_target_debug (void)
4551 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4553 current_target
.to_open
= debug_to_open
;
4554 current_target
.to_post_attach
= debug_to_post_attach
;
4555 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4556 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4557 current_target
.to_files_info
= debug_to_files_info
;
4558 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4559 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4560 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4561 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4562 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4563 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4564 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4565 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4566 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4567 current_target
.to_watchpoint_addr_within_range
4568 = debug_to_watchpoint_addr_within_range
;
4569 current_target
.to_region_ok_for_hw_watchpoint
4570 = debug_to_region_ok_for_hw_watchpoint
;
4571 current_target
.to_can_accel_watchpoint_condition
4572 = debug_to_can_accel_watchpoint_condition
;
4573 current_target
.to_terminal_init
= debug_to_terminal_init
;
4574 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4575 current_target
.to_terminal_ours_for_output
4576 = debug_to_terminal_ours_for_output
;
4577 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4578 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4579 current_target
.to_terminal_info
= debug_to_terminal_info
;
4580 current_target
.to_load
= debug_to_load
;
4581 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4582 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4583 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4584 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4585 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4586 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4587 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4588 current_target
.to_has_exited
= debug_to_has_exited
;
4589 current_target
.to_can_run
= debug_to_can_run
;
4590 current_target
.to_stop
= debug_to_stop
;
4591 current_target
.to_rcmd
= debug_to_rcmd
;
4592 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4593 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4597 static char targ_desc
[] =
4598 "Names of targets and files being debugged.\nShows the entire \
4599 stack of targets currently in use (including the exec-file,\n\
4600 core-file, and process, if any), as well as the symbol file name.";
4603 do_monitor_command (char *cmd
,
4606 if ((current_target
.to_rcmd
4607 == (void (*) (char *, struct ui_file
*)) tcomplain
)
4608 || (current_target
.to_rcmd
== debug_to_rcmd
4609 && (debug_target
.to_rcmd
4610 == (void (*) (char *, struct ui_file
*)) tcomplain
)))
4611 error (_("\"monitor\" command not supported by this target."));
4612 target_rcmd (cmd
, gdb_stdtarg
);
4615 /* Print the name of each layers of our target stack. */
4618 maintenance_print_target_stack (char *cmd
, int from_tty
)
4620 struct target_ops
*t
;
4622 printf_filtered (_("The current target stack is:\n"));
4624 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4626 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4630 /* Controls if async mode is permitted. */
4631 int target_async_permitted
= 0;
4633 /* The set command writes to this variable. If the inferior is
4634 executing, linux_nat_async_permitted is *not* updated. */
4635 static int target_async_permitted_1
= 0;
4638 set_target_async_command (char *args
, int from_tty
,
4639 struct cmd_list_element
*c
)
4641 if (have_live_inferiors ())
4643 target_async_permitted_1
= target_async_permitted
;
4644 error (_("Cannot change this setting while the inferior is running."));
4647 target_async_permitted
= target_async_permitted_1
;
4651 show_target_async_command (struct ui_file
*file
, int from_tty
,
4652 struct cmd_list_element
*c
,
4655 fprintf_filtered (file
,
4656 _("Controlling the inferior in "
4657 "asynchronous mode is %s.\n"), value
);
4660 /* Temporary copies of permission settings. */
4662 static int may_write_registers_1
= 1;
4663 static int may_write_memory_1
= 1;
4664 static int may_insert_breakpoints_1
= 1;
4665 static int may_insert_tracepoints_1
= 1;
4666 static int may_insert_fast_tracepoints_1
= 1;
4667 static int may_stop_1
= 1;
4669 /* Make the user-set values match the real values again. */
4672 update_target_permissions (void)
4674 may_write_registers_1
= may_write_registers
;
4675 may_write_memory_1
= may_write_memory
;
4676 may_insert_breakpoints_1
= may_insert_breakpoints
;
4677 may_insert_tracepoints_1
= may_insert_tracepoints
;
4678 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4679 may_stop_1
= may_stop
;
4682 /* The one function handles (most of) the permission flags in the same
4686 set_target_permissions (char *args
, int from_tty
,
4687 struct cmd_list_element
*c
)
4689 if (target_has_execution
)
4691 update_target_permissions ();
4692 error (_("Cannot change this setting while the inferior is running."));
4695 /* Make the real values match the user-changed values. */
4696 may_write_registers
= may_write_registers_1
;
4697 may_insert_breakpoints
= may_insert_breakpoints_1
;
4698 may_insert_tracepoints
= may_insert_tracepoints_1
;
4699 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4700 may_stop
= may_stop_1
;
4701 update_observer_mode ();
4704 /* Set memory write permission independently of observer mode. */
4707 set_write_memory_permission (char *args
, int from_tty
,
4708 struct cmd_list_element
*c
)
4710 /* Make the real values match the user-changed values. */
4711 may_write_memory
= may_write_memory_1
;
4712 update_observer_mode ();
4717 initialize_targets (void)
4719 init_dummy_target ();
4720 push_target (&dummy_target
);
4722 add_info ("target", target_info
, targ_desc
);
4723 add_info ("files", target_info
, targ_desc
);
4725 add_setshow_zinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4726 Set target debugging."), _("\
4727 Show target debugging."), _("\
4728 When non-zero, target debugging is enabled. Higher numbers are more\n\
4729 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4733 &setdebuglist
, &showdebuglist
);
4735 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4736 &trust_readonly
, _("\
4737 Set mode for reading from readonly sections."), _("\
4738 Show mode for reading from readonly sections."), _("\
4739 When this mode is on, memory reads from readonly sections (such as .text)\n\
4740 will be read from the object file instead of from the target. This will\n\
4741 result in significant performance improvement for remote targets."),
4743 show_trust_readonly
,
4744 &setlist
, &showlist
);
4746 add_com ("monitor", class_obscure
, do_monitor_command
,
4747 _("Send a command to the remote monitor (remote targets only)."));
4749 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4750 _("Print the name of each layer of the internal target stack."),
4751 &maintenanceprintlist
);
4753 add_setshow_boolean_cmd ("target-async", no_class
,
4754 &target_async_permitted_1
, _("\
4755 Set whether gdb controls the inferior in asynchronous mode."), _("\
4756 Show whether gdb controls the inferior in asynchronous mode."), _("\
4757 Tells gdb whether to control the inferior in asynchronous mode."),
4758 set_target_async_command
,
4759 show_target_async_command
,
4763 add_setshow_boolean_cmd ("stack-cache", class_support
,
4764 &stack_cache_enabled_p_1
, _("\
4765 Set cache use for stack access."), _("\
4766 Show cache use for stack access."), _("\
4767 When on, use the data cache for all stack access, regardless of any\n\
4768 configured memory regions. This improves remote performance significantly.\n\
4769 By default, caching for stack access is on."),
4770 set_stack_cache_enabled_p
,
4771 show_stack_cache_enabled_p
,
4772 &setlist
, &showlist
);
4774 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4775 &may_write_registers_1
, _("\
4776 Set permission to write into registers."), _("\
4777 Show permission to write into registers."), _("\
4778 When this permission is on, GDB may write into the target's registers.\n\
4779 Otherwise, any sort of write attempt will result in an error."),
4780 set_target_permissions
, NULL
,
4781 &setlist
, &showlist
);
4783 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4784 &may_write_memory_1
, _("\
4785 Set permission to write into target memory."), _("\
4786 Show permission to write into target memory."), _("\
4787 When this permission is on, GDB may write into the target's memory.\n\
4788 Otherwise, any sort of write attempt will result in an error."),
4789 set_write_memory_permission
, NULL
,
4790 &setlist
, &showlist
);
4792 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4793 &may_insert_breakpoints_1
, _("\
4794 Set permission to insert breakpoints in the target."), _("\
4795 Show permission to insert breakpoints in the target."), _("\
4796 When this permission is on, GDB may insert breakpoints in the program.\n\
4797 Otherwise, any sort of insertion attempt will result in an error."),
4798 set_target_permissions
, NULL
,
4799 &setlist
, &showlist
);
4801 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4802 &may_insert_tracepoints_1
, _("\
4803 Set permission to insert tracepoints in the target."), _("\
4804 Show permission to insert tracepoints in the target."), _("\
4805 When this permission is on, GDB may insert tracepoints in the program.\n\
4806 Otherwise, any sort of insertion attempt will result in an error."),
4807 set_target_permissions
, NULL
,
4808 &setlist
, &showlist
);
4810 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4811 &may_insert_fast_tracepoints_1
, _("\
4812 Set permission to insert fast tracepoints in the target."), _("\
4813 Show permission to insert fast tracepoints in the target."), _("\
4814 When this permission is on, GDB may insert fast tracepoints.\n\
4815 Otherwise, any sort of insertion attempt will result in an error."),
4816 set_target_permissions
, NULL
,
4817 &setlist
, &showlist
);
4819 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4821 Set permission to interrupt or signal the target."), _("\
4822 Show permission to interrupt or signal the target."), _("\
4823 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4824 Otherwise, any attempt to interrupt or stop will be ignored."),
4825 set_target_permissions
, NULL
,
4826 &setlist
, &showlist
);
4829 target_dcache
= dcache_init ();