05b2e4b487bd5c9052a90d9559073d04f500335e
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void tcomplain (void) ATTRIBUTE_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (struct target_ops *self);
136
137 static void debug_to_terminal_inferior (struct target_ops *self);
138
139 static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141 static void debug_to_terminal_save_ours (struct target_ops *self);
142
143 static void debug_to_terminal_ours (struct target_ops *self);
144
145 static void debug_to_load (struct target_ops *self, char *, int);
146
147 static int debug_to_can_run (struct target_ops *self);
148
149 static void debug_to_stop (struct target_ops *self, ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The user just typed 'target' without the name of a target. */
216
217 static void
218 target_command (char *arg, int from_tty)
219 {
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222 }
223
224 /* Default target_has_* methods for process_stratum targets. */
225
226 int
227 default_child_has_all_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_memory (struct target_ops *ops)
238 {
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_stack (struct target_ops *ops)
248 {
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_registers (struct target_ops *ops)
258 {
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266 int
267 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268 {
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277
278 int
279 target_has_all_memory_1 (void)
280 {
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288 }
289
290 int
291 target_has_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_stack_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_registers_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_execution_1 (ptid_t the_ptid)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_current (void)
340 {
341 return target_has_execution_1 (inferior_ptid);
342 }
343
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347 void
348 complete_target_initialization (struct target_ops *t)
349 {
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370 }
371
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376 void
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379 {
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411 }
412
413 /* Add a possible target architecture to the list. */
414
415 void
416 add_target (struct target_ops *t)
417 {
418 add_target_with_completer (t, NULL);
419 }
420
421 /* See target.h. */
422
423 void
424 add_deprecated_target_alias (struct target_ops *t, char *alias)
425 {
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops *self)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 INHERIT (to_post_attach, t);
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 INHERIT (to_prepare_to_store, t);
601 INHERIT (deprecated_xfer_memory, t);
602 INHERIT (to_files_info, t);
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 INHERIT (to_can_use_hw_breakpoint, t);
606 INHERIT (to_insert_hw_breakpoint, t);
607 INHERIT (to_remove_hw_breakpoint, t);
608 /* Do not inherit to_ranged_break_num_registers. */
609 INHERIT (to_insert_watchpoint, t);
610 INHERIT (to_remove_watchpoint, t);
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 INHERIT (to_watchpoint_addr_within_range, t);
618 INHERIT (to_region_ok_for_hw_watchpoint, t);
619 INHERIT (to_can_accel_watchpoint_condition, t);
620 /* Do not inherit to_masked_watch_num_registers. */
621 INHERIT (to_terminal_init, t);
622 INHERIT (to_terminal_inferior, t);
623 INHERIT (to_terminal_ours_for_output, t);
624 INHERIT (to_terminal_ours, t);
625 INHERIT (to_terminal_save_ours, t);
626 INHERIT (to_terminal_info, t);
627 /* Do not inherit to_kill. */
628 INHERIT (to_load, t);
629 /* Do no inherit to_create_inferior. */
630 INHERIT (to_post_startup_inferior, t);
631 INHERIT (to_insert_fork_catchpoint, t);
632 INHERIT (to_remove_fork_catchpoint, t);
633 INHERIT (to_insert_vfork_catchpoint, t);
634 INHERIT (to_remove_vfork_catchpoint, t);
635 /* Do not inherit to_follow_fork. */
636 INHERIT (to_insert_exec_catchpoint, t);
637 INHERIT (to_remove_exec_catchpoint, t);
638 INHERIT (to_set_syscall_catchpoint, t);
639 INHERIT (to_has_exited, t);
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 INHERIT (to_extra_thread_info, t);
648 INHERIT (to_thread_name, t);
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 INHERIT (to_rcmd, t);
652 INHERIT (to_pid_to_exec_file, t);
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715 #undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (to_post_attach,
732 (void (*) (struct target_ops *, int))
733 target_ignore);
734 de_fault (to_prepare_to_store,
735 (void (*) (struct target_ops *, struct regcache *))
736 noprocess);
737 de_fault (deprecated_xfer_memory,
738 (int (*) (CORE_ADDR, gdb_byte *, int, int,
739 struct mem_attrib *, struct target_ops *))
740 nomemory);
741 de_fault (to_files_info,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (struct target_ops *, int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct target_ops *, struct gdbarch *,
749 struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_remove_hw_breakpoint,
752 (int (*) (struct target_ops *, struct gdbarch *,
753 struct bp_target_info *))
754 return_minus_one);
755 de_fault (to_insert_watchpoint,
756 (int (*) (struct target_ops *, CORE_ADDR, int, int,
757 struct expression *))
758 return_minus_one);
759 de_fault (to_remove_watchpoint,
760 (int (*) (struct target_ops *, CORE_ADDR, int, int,
761 struct expression *))
762 return_minus_one);
763 de_fault (to_watchpoint_addr_within_range,
764 default_watchpoint_addr_within_range);
765 de_fault (to_region_ok_for_hw_watchpoint,
766 default_region_ok_for_hw_watchpoint);
767 de_fault (to_can_accel_watchpoint_condition,
768 (int (*) (struct target_ops *, CORE_ADDR, int, int,
769 struct expression *))
770 return_zero);
771 de_fault (to_terminal_init,
772 (void (*) (struct target_ops *))
773 target_ignore);
774 de_fault (to_terminal_inferior,
775 (void (*) (struct target_ops *))
776 target_ignore);
777 de_fault (to_terminal_ours_for_output,
778 (void (*) (struct target_ops *))
779 target_ignore);
780 de_fault (to_terminal_ours,
781 (void (*) (struct target_ops *))
782 target_ignore);
783 de_fault (to_terminal_save_ours,
784 (void (*) (struct target_ops *))
785 target_ignore);
786 de_fault (to_terminal_info,
787 default_terminal_info);
788 de_fault (to_load,
789 (void (*) (struct target_ops *, char *, int))
790 tcomplain);
791 de_fault (to_post_startup_inferior,
792 (void (*) (struct target_ops *, ptid_t))
793 target_ignore);
794 de_fault (to_insert_fork_catchpoint,
795 (int (*) (struct target_ops *, int))
796 return_one);
797 de_fault (to_remove_fork_catchpoint,
798 (int (*) (struct target_ops *, int))
799 return_one);
800 de_fault (to_insert_vfork_catchpoint,
801 (int (*) (struct target_ops *, int))
802 return_one);
803 de_fault (to_remove_vfork_catchpoint,
804 (int (*) (struct target_ops *, int))
805 return_one);
806 de_fault (to_insert_exec_catchpoint,
807 (int (*) (struct target_ops *, int))
808 return_one);
809 de_fault (to_remove_exec_catchpoint,
810 (int (*) (struct target_ops *, int))
811 return_one);
812 de_fault (to_set_syscall_catchpoint,
813 (int (*) (struct target_ops *, int, int, int, int, int *))
814 return_one);
815 de_fault (to_has_exited,
816 (int (*) (struct target_ops *, int, int, int *))
817 return_zero);
818 de_fault (to_can_run,
819 (int (*) (struct target_ops *))
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct target_ops *, struct thread_info *))
823 return_null);
824 de_fault (to_thread_name,
825 (char *(*) (struct target_ops *, struct thread_info *))
826 return_null);
827 de_fault (to_stop,
828 (void (*) (struct target_ops *, ptid_t))
829 target_ignore);
830 de_fault (to_rcmd,
831 (void (*) (struct target_ops *, char *, struct ui_file *))
832 tcomplain);
833 de_fault (to_pid_to_exec_file,
834 (char *(*) (struct target_ops *, int))
835 return_null);
836 de_fault (to_thread_architecture,
837 default_thread_architecture);
838 current_target.to_read_description = NULL;
839 de_fault (to_get_ada_task_ptid,
840 (ptid_t (*) (struct target_ops *, long, long))
841 default_get_ada_task_ptid);
842 de_fault (to_supports_multi_process,
843 (int (*) (struct target_ops *))
844 return_zero);
845 de_fault (to_supports_enable_disable_tracepoint,
846 (int (*) (struct target_ops *))
847 return_zero);
848 de_fault (to_supports_string_tracing,
849 (int (*) (struct target_ops *))
850 return_zero);
851 de_fault (to_trace_init,
852 (void (*) (struct target_ops *))
853 tcomplain);
854 de_fault (to_download_tracepoint,
855 (void (*) (struct target_ops *, struct bp_location *))
856 tcomplain);
857 de_fault (to_can_download_tracepoint,
858 (int (*) (struct target_ops *))
859 return_zero);
860 de_fault (to_download_trace_state_variable,
861 (void (*) (struct target_ops *, struct trace_state_variable *))
862 tcomplain);
863 de_fault (to_enable_tracepoint,
864 (void (*) (struct target_ops *, struct bp_location *))
865 tcomplain);
866 de_fault (to_disable_tracepoint,
867 (void (*) (struct target_ops *, struct bp_location *))
868 tcomplain);
869 de_fault (to_trace_set_readonly_regions,
870 (void (*) (struct target_ops *))
871 tcomplain);
872 de_fault (to_trace_start,
873 (void (*) (struct target_ops *))
874 tcomplain);
875 de_fault (to_get_trace_status,
876 (int (*) (struct target_ops *, struct trace_status *))
877 return_minus_one);
878 de_fault (to_get_tracepoint_status,
879 (void (*) (struct target_ops *, struct breakpoint *,
880 struct uploaded_tp *))
881 tcomplain);
882 de_fault (to_trace_stop,
883 (void (*) (struct target_ops *))
884 tcomplain);
885 de_fault (to_trace_find,
886 (int (*) (struct target_ops *,
887 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
888 return_minus_one);
889 de_fault (to_get_trace_state_variable_value,
890 (int (*) (struct target_ops *, int, LONGEST *))
891 return_zero);
892 de_fault (to_save_trace_data,
893 (int (*) (struct target_ops *, const char *))
894 tcomplain);
895 de_fault (to_upload_tracepoints,
896 (int (*) (struct target_ops *, struct uploaded_tp **))
897 return_zero);
898 de_fault (to_upload_trace_state_variables,
899 (int (*) (struct target_ops *, struct uploaded_tsv **))
900 return_zero);
901 de_fault (to_get_raw_trace_data,
902 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
903 tcomplain);
904 de_fault (to_get_min_fast_tracepoint_insn_len,
905 (int (*) (struct target_ops *))
906 return_minus_one);
907 de_fault (to_set_disconnected_tracing,
908 (void (*) (struct target_ops *, int))
909 target_ignore);
910 de_fault (to_set_circular_trace_buffer,
911 (void (*) (struct target_ops *, int))
912 target_ignore);
913 de_fault (to_set_trace_buffer_size,
914 (void (*) (struct target_ops *, LONGEST))
915 target_ignore);
916 de_fault (to_set_trace_notes,
917 (int (*) (struct target_ops *,
918 const char *, const char *, const char *))
919 return_zero);
920 de_fault (to_get_tib_address,
921 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
922 tcomplain);
923 de_fault (to_set_permissions,
924 (void (*) (struct target_ops *))
925 target_ignore);
926 de_fault (to_static_tracepoint_marker_at,
927 (int (*) (struct target_ops *,
928 CORE_ADDR, struct static_tracepoint_marker *))
929 return_zero);
930 de_fault (to_static_tracepoint_markers_by_strid,
931 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
932 const char *))
933 tcomplain);
934 de_fault (to_traceframe_info,
935 (struct traceframe_info * (*) (struct target_ops *))
936 return_null);
937 de_fault (to_supports_evaluation_of_breakpoint_conditions,
938 (int (*) (struct target_ops *))
939 return_zero);
940 de_fault (to_can_run_breakpoint_commands,
941 (int (*) (struct target_ops *))
942 return_zero);
943 de_fault (to_use_agent,
944 (int (*) (int))
945 tcomplain);
946 de_fault (to_can_use_agent,
947 (int (*) (void))
948 return_zero);
949 de_fault (to_augmented_libraries_svr4_read,
950 (int (*) (void))
951 return_zero);
952 de_fault (to_execution_direction, default_execution_direction);
953
954 #undef de_fault
955
956 /* Finally, position the target-stack beneath the squashed
957 "current_target". That way code looking for a non-inherited
958 target method can quickly and simply find it. */
959 current_target.beneath = target_stack;
960
961 if (targetdebug)
962 setup_target_debug ();
963 }
964
965 /* Push a new target type into the stack of the existing target accessors,
966 possibly superseding some of the existing accessors.
967
968 Rather than allow an empty stack, we always have the dummy target at
969 the bottom stratum, so we can call the function vectors without
970 checking them. */
971
972 void
973 push_target (struct target_ops *t)
974 {
975 struct target_ops **cur;
976
977 /* Check magic number. If wrong, it probably means someone changed
978 the struct definition, but not all the places that initialize one. */
979 if (t->to_magic != OPS_MAGIC)
980 {
981 fprintf_unfiltered (gdb_stderr,
982 "Magic number of %s target struct wrong\n",
983 t->to_shortname);
984 internal_error (__FILE__, __LINE__,
985 _("failed internal consistency check"));
986 }
987
988 /* Find the proper stratum to install this target in. */
989 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
990 {
991 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
992 break;
993 }
994
995 /* If there's already targets at this stratum, remove them. */
996 /* FIXME: cagney/2003-10-15: I think this should be popping all
997 targets to CUR, and not just those at this stratum level. */
998 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
999 {
1000 /* There's already something at this stratum level. Close it,
1001 and un-hook it from the stack. */
1002 struct target_ops *tmp = (*cur);
1003
1004 (*cur) = (*cur)->beneath;
1005 tmp->beneath = NULL;
1006 target_close (tmp);
1007 }
1008
1009 /* We have removed all targets in our stratum, now add the new one. */
1010 t->beneath = (*cur);
1011 (*cur) = t;
1012
1013 update_current_target ();
1014 }
1015
1016 /* Remove a target_ops vector from the stack, wherever it may be.
1017 Return how many times it was removed (0 or 1). */
1018
1019 int
1020 unpush_target (struct target_ops *t)
1021 {
1022 struct target_ops **cur;
1023 struct target_ops *tmp;
1024
1025 if (t->to_stratum == dummy_stratum)
1026 internal_error (__FILE__, __LINE__,
1027 _("Attempt to unpush the dummy target"));
1028
1029 /* Look for the specified target. Note that we assume that a target
1030 can only occur once in the target stack. */
1031
1032 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1033 {
1034 if ((*cur) == t)
1035 break;
1036 }
1037
1038 /* If we don't find target_ops, quit. Only open targets should be
1039 closed. */
1040 if ((*cur) == NULL)
1041 return 0;
1042
1043 /* Unchain the target. */
1044 tmp = (*cur);
1045 (*cur) = (*cur)->beneath;
1046 tmp->beneath = NULL;
1047
1048 update_current_target ();
1049
1050 /* Finally close the target. Note we do this after unchaining, so
1051 any target method calls from within the target_close
1052 implementation don't end up in T anymore. */
1053 target_close (t);
1054
1055 return 1;
1056 }
1057
1058 void
1059 pop_all_targets_above (enum strata above_stratum)
1060 {
1061 while ((int) (current_target.to_stratum) > (int) above_stratum)
1062 {
1063 if (!unpush_target (target_stack))
1064 {
1065 fprintf_unfiltered (gdb_stderr,
1066 "pop_all_targets couldn't find target %s\n",
1067 target_stack->to_shortname);
1068 internal_error (__FILE__, __LINE__,
1069 _("failed internal consistency check"));
1070 break;
1071 }
1072 }
1073 }
1074
1075 void
1076 pop_all_targets (void)
1077 {
1078 pop_all_targets_above (dummy_stratum);
1079 }
1080
1081 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1082
1083 int
1084 target_is_pushed (struct target_ops *t)
1085 {
1086 struct target_ops **cur;
1087
1088 /* Check magic number. If wrong, it probably means someone changed
1089 the struct definition, but not all the places that initialize one. */
1090 if (t->to_magic != OPS_MAGIC)
1091 {
1092 fprintf_unfiltered (gdb_stderr,
1093 "Magic number of %s target struct wrong\n",
1094 t->to_shortname);
1095 internal_error (__FILE__, __LINE__,
1096 _("failed internal consistency check"));
1097 }
1098
1099 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1100 if (*cur == t)
1101 return 1;
1102
1103 return 0;
1104 }
1105
1106 /* Using the objfile specified in OBJFILE, find the address for the
1107 current thread's thread-local storage with offset OFFSET. */
1108 CORE_ADDR
1109 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1110 {
1111 volatile CORE_ADDR addr = 0;
1112 struct target_ops *target;
1113
1114 for (target = current_target.beneath;
1115 target != NULL;
1116 target = target->beneath)
1117 {
1118 if (target->to_get_thread_local_address != NULL)
1119 break;
1120 }
1121
1122 if (target != NULL
1123 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1124 {
1125 ptid_t ptid = inferior_ptid;
1126 volatile struct gdb_exception ex;
1127
1128 TRY_CATCH (ex, RETURN_MASK_ALL)
1129 {
1130 CORE_ADDR lm_addr;
1131
1132 /* Fetch the load module address for this objfile. */
1133 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1134 objfile);
1135 /* If it's 0, throw the appropriate exception. */
1136 if (lm_addr == 0)
1137 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1138 _("TLS load module not found"));
1139
1140 addr = target->to_get_thread_local_address (target, ptid,
1141 lm_addr, offset);
1142 }
1143 /* If an error occurred, print TLS related messages here. Otherwise,
1144 throw the error to some higher catcher. */
1145 if (ex.reason < 0)
1146 {
1147 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1148
1149 switch (ex.error)
1150 {
1151 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1152 error (_("Cannot find thread-local variables "
1153 "in this thread library."));
1154 break;
1155 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1156 if (objfile_is_library)
1157 error (_("Cannot find shared library `%s' in dynamic"
1158 " linker's load module list"), objfile_name (objfile));
1159 else
1160 error (_("Cannot find executable file `%s' in dynamic"
1161 " linker's load module list"), objfile_name (objfile));
1162 break;
1163 case TLS_NOT_ALLOCATED_YET_ERROR:
1164 if (objfile_is_library)
1165 error (_("The inferior has not yet allocated storage for"
1166 " thread-local variables in\n"
1167 "the shared library `%s'\n"
1168 "for %s"),
1169 objfile_name (objfile), target_pid_to_str (ptid));
1170 else
1171 error (_("The inferior has not yet allocated storage for"
1172 " thread-local variables in\n"
1173 "the executable `%s'\n"
1174 "for %s"),
1175 objfile_name (objfile), target_pid_to_str (ptid));
1176 break;
1177 case TLS_GENERIC_ERROR:
1178 if (objfile_is_library)
1179 error (_("Cannot find thread-local storage for %s, "
1180 "shared library %s:\n%s"),
1181 target_pid_to_str (ptid),
1182 objfile_name (objfile), ex.message);
1183 else
1184 error (_("Cannot find thread-local storage for %s, "
1185 "executable file %s:\n%s"),
1186 target_pid_to_str (ptid),
1187 objfile_name (objfile), ex.message);
1188 break;
1189 default:
1190 throw_exception (ex);
1191 break;
1192 }
1193 }
1194 }
1195 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1196 TLS is an ABI-specific thing. But we don't do that yet. */
1197 else
1198 error (_("Cannot find thread-local variables on this target"));
1199
1200 return addr;
1201 }
1202
1203 const char *
1204 target_xfer_status_to_string (enum target_xfer_status err)
1205 {
1206 #define CASE(X) case X: return #X
1207 switch (err)
1208 {
1209 CASE(TARGET_XFER_E_IO);
1210 CASE(TARGET_XFER_E_UNAVAILABLE);
1211 default:
1212 return "<unknown>";
1213 }
1214 #undef CASE
1215 };
1216
1217
1218 #undef MIN
1219 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1220
1221 /* target_read_string -- read a null terminated string, up to LEN bytes,
1222 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1223 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1224 is responsible for freeing it. Return the number of bytes successfully
1225 read. */
1226
1227 int
1228 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1229 {
1230 int tlen, offset, i;
1231 gdb_byte buf[4];
1232 int errcode = 0;
1233 char *buffer;
1234 int buffer_allocated;
1235 char *bufptr;
1236 unsigned int nbytes_read = 0;
1237
1238 gdb_assert (string);
1239
1240 /* Small for testing. */
1241 buffer_allocated = 4;
1242 buffer = xmalloc (buffer_allocated);
1243 bufptr = buffer;
1244
1245 while (len > 0)
1246 {
1247 tlen = MIN (len, 4 - (memaddr & 3));
1248 offset = memaddr & 3;
1249
1250 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1251 if (errcode != 0)
1252 {
1253 /* The transfer request might have crossed the boundary to an
1254 unallocated region of memory. Retry the transfer, requesting
1255 a single byte. */
1256 tlen = 1;
1257 offset = 0;
1258 errcode = target_read_memory (memaddr, buf, 1);
1259 if (errcode != 0)
1260 goto done;
1261 }
1262
1263 if (bufptr - buffer + tlen > buffer_allocated)
1264 {
1265 unsigned int bytes;
1266
1267 bytes = bufptr - buffer;
1268 buffer_allocated *= 2;
1269 buffer = xrealloc (buffer, buffer_allocated);
1270 bufptr = buffer + bytes;
1271 }
1272
1273 for (i = 0; i < tlen; i++)
1274 {
1275 *bufptr++ = buf[i + offset];
1276 if (buf[i + offset] == '\000')
1277 {
1278 nbytes_read += i + 1;
1279 goto done;
1280 }
1281 }
1282
1283 memaddr += tlen;
1284 len -= tlen;
1285 nbytes_read += tlen;
1286 }
1287 done:
1288 *string = buffer;
1289 if (errnop != NULL)
1290 *errnop = errcode;
1291 return nbytes_read;
1292 }
1293
1294 struct target_section_table *
1295 target_get_section_table (struct target_ops *target)
1296 {
1297 struct target_ops *t;
1298
1299 if (targetdebug)
1300 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1301
1302 for (t = target; t != NULL; t = t->beneath)
1303 if (t->to_get_section_table != NULL)
1304 return (*t->to_get_section_table) (t);
1305
1306 return NULL;
1307 }
1308
1309 /* Find a section containing ADDR. */
1310
1311 struct target_section *
1312 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1313 {
1314 struct target_section_table *table = target_get_section_table (target);
1315 struct target_section *secp;
1316
1317 if (table == NULL)
1318 return NULL;
1319
1320 for (secp = table->sections; secp < table->sections_end; secp++)
1321 {
1322 if (addr >= secp->addr && addr < secp->endaddr)
1323 return secp;
1324 }
1325 return NULL;
1326 }
1327
1328 /* Read memory from the live target, even if currently inspecting a
1329 traceframe. The return is the same as that of target_read. */
1330
1331 static enum target_xfer_status
1332 target_read_live_memory (enum target_object object,
1333 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1334 ULONGEST *xfered_len)
1335 {
1336 enum target_xfer_status ret;
1337 struct cleanup *cleanup;
1338
1339 /* Switch momentarily out of tfind mode so to access live memory.
1340 Note that this must not clear global state, such as the frame
1341 cache, which must still remain valid for the previous traceframe.
1342 We may be _building_ the frame cache at this point. */
1343 cleanup = make_cleanup_restore_traceframe_number ();
1344 set_traceframe_number (-1);
1345
1346 ret = target_xfer_partial (current_target.beneath, object, NULL,
1347 myaddr, NULL, memaddr, len, xfered_len);
1348
1349 do_cleanups (cleanup);
1350 return ret;
1351 }
1352
1353 /* Using the set of read-only target sections of OPS, read live
1354 read-only memory. Note that the actual reads start from the
1355 top-most target again.
1356
1357 For interface/parameters/return description see target.h,
1358 to_xfer_partial. */
1359
1360 static enum target_xfer_status
1361 memory_xfer_live_readonly_partial (struct target_ops *ops,
1362 enum target_object object,
1363 gdb_byte *readbuf, ULONGEST memaddr,
1364 ULONGEST len, ULONGEST *xfered_len)
1365 {
1366 struct target_section *secp;
1367 struct target_section_table *table;
1368
1369 secp = target_section_by_addr (ops, memaddr);
1370 if (secp != NULL
1371 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1372 secp->the_bfd_section)
1373 & SEC_READONLY))
1374 {
1375 struct target_section *p;
1376 ULONGEST memend = memaddr + len;
1377
1378 table = target_get_section_table (ops);
1379
1380 for (p = table->sections; p < table->sections_end; p++)
1381 {
1382 if (memaddr >= p->addr)
1383 {
1384 if (memend <= p->endaddr)
1385 {
1386 /* Entire transfer is within this section. */
1387 return target_read_live_memory (object, memaddr,
1388 readbuf, len, xfered_len);
1389 }
1390 else if (memaddr >= p->endaddr)
1391 {
1392 /* This section ends before the transfer starts. */
1393 continue;
1394 }
1395 else
1396 {
1397 /* This section overlaps the transfer. Just do half. */
1398 len = p->endaddr - memaddr;
1399 return target_read_live_memory (object, memaddr,
1400 readbuf, len, xfered_len);
1401 }
1402 }
1403 }
1404 }
1405
1406 return TARGET_XFER_EOF;
1407 }
1408
1409 /* Read memory from more than one valid target. A core file, for
1410 instance, could have some of memory but delegate other bits to
1411 the target below it. So, we must manually try all targets. */
1412
1413 static enum target_xfer_status
1414 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1415 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1416 ULONGEST *xfered_len)
1417 {
1418 enum target_xfer_status res;
1419
1420 do
1421 {
1422 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1423 readbuf, writebuf, memaddr, len,
1424 xfered_len);
1425 if (res == TARGET_XFER_OK)
1426 break;
1427
1428 /* Stop if the target reports that the memory is not available. */
1429 if (res == TARGET_XFER_E_UNAVAILABLE)
1430 break;
1431
1432 /* We want to continue past core files to executables, but not
1433 past a running target's memory. */
1434 if (ops->to_has_all_memory (ops))
1435 break;
1436
1437 ops = ops->beneath;
1438 }
1439 while (ops != NULL);
1440
1441 return res;
1442 }
1443
1444 /* Perform a partial memory transfer.
1445 For docs see target.h, to_xfer_partial. */
1446
1447 static enum target_xfer_status
1448 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1449 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1450 ULONGEST len, ULONGEST *xfered_len)
1451 {
1452 enum target_xfer_status res;
1453 int reg_len;
1454 struct mem_region *region;
1455 struct inferior *inf;
1456
1457 /* For accesses to unmapped overlay sections, read directly from
1458 files. Must do this first, as MEMADDR may need adjustment. */
1459 if (readbuf != NULL && overlay_debugging)
1460 {
1461 struct obj_section *section = find_pc_overlay (memaddr);
1462
1463 if (pc_in_unmapped_range (memaddr, section))
1464 {
1465 struct target_section_table *table
1466 = target_get_section_table (ops);
1467 const char *section_name = section->the_bfd_section->name;
1468
1469 memaddr = overlay_mapped_address (memaddr, section);
1470 return section_table_xfer_memory_partial (readbuf, writebuf,
1471 memaddr, len, xfered_len,
1472 table->sections,
1473 table->sections_end,
1474 section_name);
1475 }
1476 }
1477
1478 /* Try the executable files, if "trust-readonly-sections" is set. */
1479 if (readbuf != NULL && trust_readonly)
1480 {
1481 struct target_section *secp;
1482 struct target_section_table *table;
1483
1484 secp = target_section_by_addr (ops, memaddr);
1485 if (secp != NULL
1486 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1487 secp->the_bfd_section)
1488 & SEC_READONLY))
1489 {
1490 table = target_get_section_table (ops);
1491 return section_table_xfer_memory_partial (readbuf, writebuf,
1492 memaddr, len, xfered_len,
1493 table->sections,
1494 table->sections_end,
1495 NULL);
1496 }
1497 }
1498
1499 /* If reading unavailable memory in the context of traceframes, and
1500 this address falls within a read-only section, fallback to
1501 reading from live memory. */
1502 if (readbuf != NULL && get_traceframe_number () != -1)
1503 {
1504 VEC(mem_range_s) *available;
1505
1506 /* If we fail to get the set of available memory, then the
1507 target does not support querying traceframe info, and so we
1508 attempt reading from the traceframe anyway (assuming the
1509 target implements the old QTro packet then). */
1510 if (traceframe_available_memory (&available, memaddr, len))
1511 {
1512 struct cleanup *old_chain;
1513
1514 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1515
1516 if (VEC_empty (mem_range_s, available)
1517 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1518 {
1519 /* Don't read into the traceframe's available
1520 memory. */
1521 if (!VEC_empty (mem_range_s, available))
1522 {
1523 LONGEST oldlen = len;
1524
1525 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1526 gdb_assert (len <= oldlen);
1527 }
1528
1529 do_cleanups (old_chain);
1530
1531 /* This goes through the topmost target again. */
1532 res = memory_xfer_live_readonly_partial (ops, object,
1533 readbuf, memaddr,
1534 len, xfered_len);
1535 if (res == TARGET_XFER_OK)
1536 return TARGET_XFER_OK;
1537 else
1538 {
1539 /* No use trying further, we know some memory starting
1540 at MEMADDR isn't available. */
1541 *xfered_len = len;
1542 return TARGET_XFER_E_UNAVAILABLE;
1543 }
1544 }
1545
1546 /* Don't try to read more than how much is available, in
1547 case the target implements the deprecated QTro packet to
1548 cater for older GDBs (the target's knowledge of read-only
1549 sections may be outdated by now). */
1550 len = VEC_index (mem_range_s, available, 0)->length;
1551
1552 do_cleanups (old_chain);
1553 }
1554 }
1555
1556 /* Try GDB's internal data cache. */
1557 region = lookup_mem_region (memaddr);
1558 /* region->hi == 0 means there's no upper bound. */
1559 if (memaddr + len < region->hi || region->hi == 0)
1560 reg_len = len;
1561 else
1562 reg_len = region->hi - memaddr;
1563
1564 switch (region->attrib.mode)
1565 {
1566 case MEM_RO:
1567 if (writebuf != NULL)
1568 return TARGET_XFER_E_IO;
1569 break;
1570
1571 case MEM_WO:
1572 if (readbuf != NULL)
1573 return TARGET_XFER_E_IO;
1574 break;
1575
1576 case MEM_FLASH:
1577 /* We only support writing to flash during "load" for now. */
1578 if (writebuf != NULL)
1579 error (_("Writing to flash memory forbidden in this context"));
1580 break;
1581
1582 case MEM_NONE:
1583 return TARGET_XFER_E_IO;
1584 }
1585
1586 if (!ptid_equal (inferior_ptid, null_ptid))
1587 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1588 else
1589 inf = NULL;
1590
1591 if (inf != NULL
1592 /* The dcache reads whole cache lines; that doesn't play well
1593 with reading from a trace buffer, because reading outside of
1594 the collected memory range fails. */
1595 && get_traceframe_number () == -1
1596 && (region->attrib.cache
1597 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1598 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1599 {
1600 DCACHE *dcache = target_dcache_get_or_init ();
1601 int l;
1602
1603 if (readbuf != NULL)
1604 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1605 else
1606 /* FIXME drow/2006-08-09: If we're going to preserve const
1607 correctness dcache_xfer_memory should take readbuf and
1608 writebuf. */
1609 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1610 reg_len, 1);
1611 if (l <= 0)
1612 return TARGET_XFER_E_IO;
1613 else
1614 {
1615 *xfered_len = (ULONGEST) l;
1616 return TARGET_XFER_OK;
1617 }
1618 }
1619
1620 /* If none of those methods found the memory we wanted, fall back
1621 to a target partial transfer. Normally a single call to
1622 to_xfer_partial is enough; if it doesn't recognize an object
1623 it will call the to_xfer_partial of the next target down.
1624 But for memory this won't do. Memory is the only target
1625 object which can be read from more than one valid target.
1626 A core file, for instance, could have some of memory but
1627 delegate other bits to the target below it. So, we must
1628 manually try all targets. */
1629
1630 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1631 xfered_len);
1632
1633 /* Make sure the cache gets updated no matter what - if we are writing
1634 to the stack. Even if this write is not tagged as such, we still need
1635 to update the cache. */
1636
1637 if (res == TARGET_XFER_OK
1638 && inf != NULL
1639 && writebuf != NULL
1640 && target_dcache_init_p ()
1641 && !region->attrib.cache
1642 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1643 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1644 {
1645 DCACHE *dcache = target_dcache_get ();
1646
1647 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1648 }
1649
1650 /* If we still haven't got anything, return the last error. We
1651 give up. */
1652 return res;
1653 }
1654
1655 /* Perform a partial memory transfer. For docs see target.h,
1656 to_xfer_partial. */
1657
1658 static enum target_xfer_status
1659 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1660 gdb_byte *readbuf, const gdb_byte *writebuf,
1661 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1662 {
1663 enum target_xfer_status res;
1664
1665 /* Zero length requests are ok and require no work. */
1666 if (len == 0)
1667 return TARGET_XFER_EOF;
1668
1669 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1670 breakpoint insns, thus hiding out from higher layers whether
1671 there are software breakpoints inserted in the code stream. */
1672 if (readbuf != NULL)
1673 {
1674 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1675 xfered_len);
1676
1677 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1678 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1679 }
1680 else
1681 {
1682 void *buf;
1683 struct cleanup *old_chain;
1684
1685 /* A large write request is likely to be partially satisfied
1686 by memory_xfer_partial_1. We will continually malloc
1687 and free a copy of the entire write request for breakpoint
1688 shadow handling even though we only end up writing a small
1689 subset of it. Cap writes to 4KB to mitigate this. */
1690 len = min (4096, len);
1691
1692 buf = xmalloc (len);
1693 old_chain = make_cleanup (xfree, buf);
1694 memcpy (buf, writebuf, len);
1695
1696 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1697 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1698 xfered_len);
1699
1700 do_cleanups (old_chain);
1701 }
1702
1703 return res;
1704 }
1705
1706 static void
1707 restore_show_memory_breakpoints (void *arg)
1708 {
1709 show_memory_breakpoints = (uintptr_t) arg;
1710 }
1711
1712 struct cleanup *
1713 make_show_memory_breakpoints_cleanup (int show)
1714 {
1715 int current = show_memory_breakpoints;
1716
1717 show_memory_breakpoints = show;
1718 return make_cleanup (restore_show_memory_breakpoints,
1719 (void *) (uintptr_t) current);
1720 }
1721
1722 /* For docs see target.h, to_xfer_partial. */
1723
1724 enum target_xfer_status
1725 target_xfer_partial (struct target_ops *ops,
1726 enum target_object object, const char *annex,
1727 gdb_byte *readbuf, const gdb_byte *writebuf,
1728 ULONGEST offset, ULONGEST len,
1729 ULONGEST *xfered_len)
1730 {
1731 enum target_xfer_status retval;
1732
1733 gdb_assert (ops->to_xfer_partial != NULL);
1734
1735 /* Transfer is done when LEN is zero. */
1736 if (len == 0)
1737 return TARGET_XFER_EOF;
1738
1739 if (writebuf && !may_write_memory)
1740 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1741 core_addr_to_string_nz (offset), plongest (len));
1742
1743 *xfered_len = 0;
1744
1745 /* If this is a memory transfer, let the memory-specific code
1746 have a look at it instead. Memory transfers are more
1747 complicated. */
1748 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1749 || object == TARGET_OBJECT_CODE_MEMORY)
1750 retval = memory_xfer_partial (ops, object, readbuf,
1751 writebuf, offset, len, xfered_len);
1752 else if (object == TARGET_OBJECT_RAW_MEMORY)
1753 {
1754 /* Request the normal memory object from other layers. */
1755 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1756 xfered_len);
1757 }
1758 else
1759 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1760 writebuf, offset, len, xfered_len);
1761
1762 if (targetdebug)
1763 {
1764 const unsigned char *myaddr = NULL;
1765
1766 fprintf_unfiltered (gdb_stdlog,
1767 "%s:target_xfer_partial "
1768 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1769 ops->to_shortname,
1770 (int) object,
1771 (annex ? annex : "(null)"),
1772 host_address_to_string (readbuf),
1773 host_address_to_string (writebuf),
1774 core_addr_to_string_nz (offset),
1775 pulongest (len), retval,
1776 pulongest (*xfered_len));
1777
1778 if (readbuf)
1779 myaddr = readbuf;
1780 if (writebuf)
1781 myaddr = writebuf;
1782 if (retval == TARGET_XFER_OK && myaddr != NULL)
1783 {
1784 int i;
1785
1786 fputs_unfiltered (", bytes =", gdb_stdlog);
1787 for (i = 0; i < *xfered_len; i++)
1788 {
1789 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1790 {
1791 if (targetdebug < 2 && i > 0)
1792 {
1793 fprintf_unfiltered (gdb_stdlog, " ...");
1794 break;
1795 }
1796 fprintf_unfiltered (gdb_stdlog, "\n");
1797 }
1798
1799 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1800 }
1801 }
1802
1803 fputc_unfiltered ('\n', gdb_stdlog);
1804 }
1805
1806 /* Check implementations of to_xfer_partial update *XFERED_LEN
1807 properly. Do assertion after printing debug messages, so that we
1808 can find more clues on assertion failure from debugging messages. */
1809 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1810 gdb_assert (*xfered_len > 0);
1811
1812 return retval;
1813 }
1814
1815 /* Read LEN bytes of target memory at address MEMADDR, placing the
1816 results in GDB's memory at MYADDR. Returns either 0 for success or
1817 TARGET_XFER_E_IO if any error occurs.
1818
1819 If an error occurs, no guarantee is made about the contents of the data at
1820 MYADDR. In particular, the caller should not depend upon partial reads
1821 filling the buffer with good data. There is no way for the caller to know
1822 how much good data might have been transfered anyway. Callers that can
1823 deal with partial reads should call target_read (which will retry until
1824 it makes no progress, and then return how much was transferred). */
1825
1826 int
1827 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1828 {
1829 /* Dispatch to the topmost target, not the flattened current_target.
1830 Memory accesses check target->to_has_(all_)memory, and the
1831 flattened target doesn't inherit those. */
1832 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1833 myaddr, memaddr, len) == len)
1834 return 0;
1835 else
1836 return TARGET_XFER_E_IO;
1837 }
1838
1839 /* Like target_read_memory, but specify explicitly that this is a read
1840 from the target's raw memory. That is, this read bypasses the
1841 dcache, breakpoint shadowing, etc. */
1842
1843 int
1844 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1845 {
1846 /* See comment in target_read_memory about why the request starts at
1847 current_target.beneath. */
1848 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1849 myaddr, memaddr, len) == len)
1850 return 0;
1851 else
1852 return TARGET_XFER_E_IO;
1853 }
1854
1855 /* Like target_read_memory, but specify explicitly that this is a read from
1856 the target's stack. This may trigger different cache behavior. */
1857
1858 int
1859 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1860 {
1861 /* See comment in target_read_memory about why the request starts at
1862 current_target.beneath. */
1863 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1864 myaddr, memaddr, len) == len)
1865 return 0;
1866 else
1867 return TARGET_XFER_E_IO;
1868 }
1869
1870 /* Like target_read_memory, but specify explicitly that this is a read from
1871 the target's code. This may trigger different cache behavior. */
1872
1873 int
1874 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1875 {
1876 /* See comment in target_read_memory about why the request starts at
1877 current_target.beneath. */
1878 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1879 myaddr, memaddr, len) == len)
1880 return 0;
1881 else
1882 return TARGET_XFER_E_IO;
1883 }
1884
1885 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1886 Returns either 0 for success or TARGET_XFER_E_IO if any
1887 error occurs. If an error occurs, no guarantee is made about how
1888 much data got written. Callers that can deal with partial writes
1889 should call target_write. */
1890
1891 int
1892 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1893 {
1894 /* See comment in target_read_memory about why the request starts at
1895 current_target.beneath. */
1896 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1897 myaddr, memaddr, len) == len)
1898 return 0;
1899 else
1900 return TARGET_XFER_E_IO;
1901 }
1902
1903 /* Write LEN bytes from MYADDR to target raw memory at address
1904 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1905 if any error occurs. If an error occurs, no guarantee is made
1906 about how much data got written. Callers that can deal with
1907 partial writes should call target_write. */
1908
1909 int
1910 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1911 {
1912 /* See comment in target_read_memory about why the request starts at
1913 current_target.beneath. */
1914 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1915 myaddr, memaddr, len) == len)
1916 return 0;
1917 else
1918 return TARGET_XFER_E_IO;
1919 }
1920
1921 /* Fetch the target's memory map. */
1922
1923 VEC(mem_region_s) *
1924 target_memory_map (void)
1925 {
1926 VEC(mem_region_s) *result;
1927 struct mem_region *last_one, *this_one;
1928 int ix;
1929 struct target_ops *t;
1930
1931 if (targetdebug)
1932 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1933
1934 for (t = current_target.beneath; t != NULL; t = t->beneath)
1935 if (t->to_memory_map != NULL)
1936 break;
1937
1938 if (t == NULL)
1939 return NULL;
1940
1941 result = t->to_memory_map (t);
1942 if (result == NULL)
1943 return NULL;
1944
1945 qsort (VEC_address (mem_region_s, result),
1946 VEC_length (mem_region_s, result),
1947 sizeof (struct mem_region), mem_region_cmp);
1948
1949 /* Check that regions do not overlap. Simultaneously assign
1950 a numbering for the "mem" commands to use to refer to
1951 each region. */
1952 last_one = NULL;
1953 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1954 {
1955 this_one->number = ix;
1956
1957 if (last_one && last_one->hi > this_one->lo)
1958 {
1959 warning (_("Overlapping regions in memory map: ignoring"));
1960 VEC_free (mem_region_s, result);
1961 return NULL;
1962 }
1963 last_one = this_one;
1964 }
1965
1966 return result;
1967 }
1968
1969 void
1970 target_flash_erase (ULONGEST address, LONGEST length)
1971 {
1972 struct target_ops *t;
1973
1974 for (t = current_target.beneath; t != NULL; t = t->beneath)
1975 if (t->to_flash_erase != NULL)
1976 {
1977 if (targetdebug)
1978 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1979 hex_string (address), phex (length, 0));
1980 t->to_flash_erase (t, address, length);
1981 return;
1982 }
1983
1984 tcomplain ();
1985 }
1986
1987 void
1988 target_flash_done (void)
1989 {
1990 struct target_ops *t;
1991
1992 for (t = current_target.beneath; t != NULL; t = t->beneath)
1993 if (t->to_flash_done != NULL)
1994 {
1995 if (targetdebug)
1996 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1997 t->to_flash_done (t);
1998 return;
1999 }
2000
2001 tcomplain ();
2002 }
2003
2004 static void
2005 show_trust_readonly (struct ui_file *file, int from_tty,
2006 struct cmd_list_element *c, const char *value)
2007 {
2008 fprintf_filtered (file,
2009 _("Mode for reading from readonly sections is %s.\n"),
2010 value);
2011 }
2012
2013 /* More generic transfers. */
2014
2015 static enum target_xfer_status
2016 default_xfer_partial (struct target_ops *ops, enum target_object object,
2017 const char *annex, gdb_byte *readbuf,
2018 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2019 ULONGEST *xfered_len)
2020 {
2021 if (object == TARGET_OBJECT_MEMORY
2022 && ops->deprecated_xfer_memory != NULL)
2023 /* If available, fall back to the target's
2024 "deprecated_xfer_memory" method. */
2025 {
2026 int xfered = -1;
2027
2028 errno = 0;
2029 if (writebuf != NULL)
2030 {
2031 void *buffer = xmalloc (len);
2032 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2033
2034 memcpy (buffer, writebuf, len);
2035 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2036 1/*write*/, NULL, ops);
2037 do_cleanups (cleanup);
2038 }
2039 if (readbuf != NULL)
2040 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2041 0/*read*/, NULL, ops);
2042 if (xfered > 0)
2043 {
2044 *xfered_len = (ULONGEST) xfered;
2045 return TARGET_XFER_E_IO;
2046 }
2047 else if (xfered == 0 && errno == 0)
2048 /* "deprecated_xfer_memory" uses 0, cross checked against
2049 ERRNO as one indication of an error. */
2050 return TARGET_XFER_EOF;
2051 else
2052 return TARGET_XFER_E_IO;
2053 }
2054 else
2055 {
2056 gdb_assert (ops->beneath != NULL);
2057 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2058 readbuf, writebuf, offset, len,
2059 xfered_len);
2060 }
2061 }
2062
2063 /* Target vector read/write partial wrapper functions. */
2064
2065 static enum target_xfer_status
2066 target_read_partial (struct target_ops *ops,
2067 enum target_object object,
2068 const char *annex, gdb_byte *buf,
2069 ULONGEST offset, ULONGEST len,
2070 ULONGEST *xfered_len)
2071 {
2072 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2073 xfered_len);
2074 }
2075
2076 static enum target_xfer_status
2077 target_write_partial (struct target_ops *ops,
2078 enum target_object object,
2079 const char *annex, const gdb_byte *buf,
2080 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2081 {
2082 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2083 xfered_len);
2084 }
2085
2086 /* Wrappers to perform the full transfer. */
2087
2088 /* For docs on target_read see target.h. */
2089
2090 LONGEST
2091 target_read (struct target_ops *ops,
2092 enum target_object object,
2093 const char *annex, gdb_byte *buf,
2094 ULONGEST offset, LONGEST len)
2095 {
2096 LONGEST xfered = 0;
2097
2098 while (xfered < len)
2099 {
2100 ULONGEST xfered_len;
2101 enum target_xfer_status status;
2102
2103 status = target_read_partial (ops, object, annex,
2104 (gdb_byte *) buf + xfered,
2105 offset + xfered, len - xfered,
2106 &xfered_len);
2107
2108 /* Call an observer, notifying them of the xfer progress? */
2109 if (status == TARGET_XFER_EOF)
2110 return xfered;
2111 else if (status == TARGET_XFER_OK)
2112 {
2113 xfered += xfered_len;
2114 QUIT;
2115 }
2116 else
2117 return -1;
2118
2119 }
2120 return len;
2121 }
2122
2123 /* Assuming that the entire [begin, end) range of memory cannot be
2124 read, try to read whatever subrange is possible to read.
2125
2126 The function returns, in RESULT, either zero or one memory block.
2127 If there's a readable subrange at the beginning, it is completely
2128 read and returned. Any further readable subrange will not be read.
2129 Otherwise, if there's a readable subrange at the end, it will be
2130 completely read and returned. Any readable subranges before it
2131 (obviously, not starting at the beginning), will be ignored. In
2132 other cases -- either no readable subrange, or readable subrange(s)
2133 that is neither at the beginning, or end, nothing is returned.
2134
2135 The purpose of this function is to handle a read across a boundary
2136 of accessible memory in a case when memory map is not available.
2137 The above restrictions are fine for this case, but will give
2138 incorrect results if the memory is 'patchy'. However, supporting
2139 'patchy' memory would require trying to read every single byte,
2140 and it seems unacceptable solution. Explicit memory map is
2141 recommended for this case -- and target_read_memory_robust will
2142 take care of reading multiple ranges then. */
2143
2144 static void
2145 read_whatever_is_readable (struct target_ops *ops,
2146 ULONGEST begin, ULONGEST end,
2147 VEC(memory_read_result_s) **result)
2148 {
2149 gdb_byte *buf = xmalloc (end - begin);
2150 ULONGEST current_begin = begin;
2151 ULONGEST current_end = end;
2152 int forward;
2153 memory_read_result_s r;
2154 ULONGEST xfered_len;
2155
2156 /* If we previously failed to read 1 byte, nothing can be done here. */
2157 if (end - begin <= 1)
2158 {
2159 xfree (buf);
2160 return;
2161 }
2162
2163 /* Check that either first or the last byte is readable, and give up
2164 if not. This heuristic is meant to permit reading accessible memory
2165 at the boundary of accessible region. */
2166 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2167 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2168 {
2169 forward = 1;
2170 ++current_begin;
2171 }
2172 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2173 buf + (end-begin) - 1, end - 1, 1,
2174 &xfered_len) == TARGET_XFER_OK)
2175 {
2176 forward = 0;
2177 --current_end;
2178 }
2179 else
2180 {
2181 xfree (buf);
2182 return;
2183 }
2184
2185 /* Loop invariant is that the [current_begin, current_end) was previously
2186 found to be not readable as a whole.
2187
2188 Note loop condition -- if the range has 1 byte, we can't divide the range
2189 so there's no point trying further. */
2190 while (current_end - current_begin > 1)
2191 {
2192 ULONGEST first_half_begin, first_half_end;
2193 ULONGEST second_half_begin, second_half_end;
2194 LONGEST xfer;
2195 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2196
2197 if (forward)
2198 {
2199 first_half_begin = current_begin;
2200 first_half_end = middle;
2201 second_half_begin = middle;
2202 second_half_end = current_end;
2203 }
2204 else
2205 {
2206 first_half_begin = middle;
2207 first_half_end = current_end;
2208 second_half_begin = current_begin;
2209 second_half_end = middle;
2210 }
2211
2212 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2213 buf + (first_half_begin - begin),
2214 first_half_begin,
2215 first_half_end - first_half_begin);
2216
2217 if (xfer == first_half_end - first_half_begin)
2218 {
2219 /* This half reads up fine. So, the error must be in the
2220 other half. */
2221 current_begin = second_half_begin;
2222 current_end = second_half_end;
2223 }
2224 else
2225 {
2226 /* This half is not readable. Because we've tried one byte, we
2227 know some part of this half if actually redable. Go to the next
2228 iteration to divide again and try to read.
2229
2230 We don't handle the other half, because this function only tries
2231 to read a single readable subrange. */
2232 current_begin = first_half_begin;
2233 current_end = first_half_end;
2234 }
2235 }
2236
2237 if (forward)
2238 {
2239 /* The [begin, current_begin) range has been read. */
2240 r.begin = begin;
2241 r.end = current_begin;
2242 r.data = buf;
2243 }
2244 else
2245 {
2246 /* The [current_end, end) range has been read. */
2247 LONGEST rlen = end - current_end;
2248
2249 r.data = xmalloc (rlen);
2250 memcpy (r.data, buf + current_end - begin, rlen);
2251 r.begin = current_end;
2252 r.end = end;
2253 xfree (buf);
2254 }
2255 VEC_safe_push(memory_read_result_s, (*result), &r);
2256 }
2257
2258 void
2259 free_memory_read_result_vector (void *x)
2260 {
2261 VEC(memory_read_result_s) *v = x;
2262 memory_read_result_s *current;
2263 int ix;
2264
2265 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2266 {
2267 xfree (current->data);
2268 }
2269 VEC_free (memory_read_result_s, v);
2270 }
2271
2272 VEC(memory_read_result_s) *
2273 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2274 {
2275 VEC(memory_read_result_s) *result = 0;
2276
2277 LONGEST xfered = 0;
2278 while (xfered < len)
2279 {
2280 struct mem_region *region = lookup_mem_region (offset + xfered);
2281 LONGEST rlen;
2282
2283 /* If there is no explicit region, a fake one should be created. */
2284 gdb_assert (region);
2285
2286 if (region->hi == 0)
2287 rlen = len - xfered;
2288 else
2289 rlen = region->hi - offset;
2290
2291 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2292 {
2293 /* Cannot read this region. Note that we can end up here only
2294 if the region is explicitly marked inaccessible, or
2295 'inaccessible-by-default' is in effect. */
2296 xfered += rlen;
2297 }
2298 else
2299 {
2300 LONGEST to_read = min (len - xfered, rlen);
2301 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2302
2303 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2304 (gdb_byte *) buffer,
2305 offset + xfered, to_read);
2306 /* Call an observer, notifying them of the xfer progress? */
2307 if (xfer <= 0)
2308 {
2309 /* Got an error reading full chunk. See if maybe we can read
2310 some subrange. */
2311 xfree (buffer);
2312 read_whatever_is_readable (ops, offset + xfered,
2313 offset + xfered + to_read, &result);
2314 xfered += to_read;
2315 }
2316 else
2317 {
2318 struct memory_read_result r;
2319 r.data = buffer;
2320 r.begin = offset + xfered;
2321 r.end = r.begin + xfer;
2322 VEC_safe_push (memory_read_result_s, result, &r);
2323 xfered += xfer;
2324 }
2325 QUIT;
2326 }
2327 }
2328 return result;
2329 }
2330
2331
2332 /* An alternative to target_write with progress callbacks. */
2333
2334 LONGEST
2335 target_write_with_progress (struct target_ops *ops,
2336 enum target_object object,
2337 const char *annex, const gdb_byte *buf,
2338 ULONGEST offset, LONGEST len,
2339 void (*progress) (ULONGEST, void *), void *baton)
2340 {
2341 LONGEST xfered = 0;
2342
2343 /* Give the progress callback a chance to set up. */
2344 if (progress)
2345 (*progress) (0, baton);
2346
2347 while (xfered < len)
2348 {
2349 ULONGEST xfered_len;
2350 enum target_xfer_status status;
2351
2352 status = target_write_partial (ops, object, annex,
2353 (gdb_byte *) buf + xfered,
2354 offset + xfered, len - xfered,
2355 &xfered_len);
2356
2357 if (status == TARGET_XFER_EOF)
2358 return xfered;
2359 if (TARGET_XFER_STATUS_ERROR_P (status))
2360 return -1;
2361
2362 gdb_assert (status == TARGET_XFER_OK);
2363 if (progress)
2364 (*progress) (xfered_len, baton);
2365
2366 xfered += xfered_len;
2367 QUIT;
2368 }
2369 return len;
2370 }
2371
2372 /* For docs on target_write see target.h. */
2373
2374 LONGEST
2375 target_write (struct target_ops *ops,
2376 enum target_object object,
2377 const char *annex, const gdb_byte *buf,
2378 ULONGEST offset, LONGEST len)
2379 {
2380 return target_write_with_progress (ops, object, annex, buf, offset, len,
2381 NULL, NULL);
2382 }
2383
2384 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2385 the size of the transferred data. PADDING additional bytes are
2386 available in *BUF_P. This is a helper function for
2387 target_read_alloc; see the declaration of that function for more
2388 information. */
2389
2390 static LONGEST
2391 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2392 const char *annex, gdb_byte **buf_p, int padding)
2393 {
2394 size_t buf_alloc, buf_pos;
2395 gdb_byte *buf;
2396
2397 /* This function does not have a length parameter; it reads the
2398 entire OBJECT). Also, it doesn't support objects fetched partly
2399 from one target and partly from another (in a different stratum,
2400 e.g. a core file and an executable). Both reasons make it
2401 unsuitable for reading memory. */
2402 gdb_assert (object != TARGET_OBJECT_MEMORY);
2403
2404 /* Start by reading up to 4K at a time. The target will throttle
2405 this number down if necessary. */
2406 buf_alloc = 4096;
2407 buf = xmalloc (buf_alloc);
2408 buf_pos = 0;
2409 while (1)
2410 {
2411 ULONGEST xfered_len;
2412 enum target_xfer_status status;
2413
2414 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2415 buf_pos, buf_alloc - buf_pos - padding,
2416 &xfered_len);
2417
2418 if (status == TARGET_XFER_EOF)
2419 {
2420 /* Read all there was. */
2421 if (buf_pos == 0)
2422 xfree (buf);
2423 else
2424 *buf_p = buf;
2425 return buf_pos;
2426 }
2427 else if (status != TARGET_XFER_OK)
2428 {
2429 /* An error occurred. */
2430 xfree (buf);
2431 return TARGET_XFER_E_IO;
2432 }
2433
2434 buf_pos += xfered_len;
2435
2436 /* If the buffer is filling up, expand it. */
2437 if (buf_alloc < buf_pos * 2)
2438 {
2439 buf_alloc *= 2;
2440 buf = xrealloc (buf, buf_alloc);
2441 }
2442
2443 QUIT;
2444 }
2445 }
2446
2447 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2448 the size of the transferred data. See the declaration in "target.h"
2449 function for more information about the return value. */
2450
2451 LONGEST
2452 target_read_alloc (struct target_ops *ops, enum target_object object,
2453 const char *annex, gdb_byte **buf_p)
2454 {
2455 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2456 }
2457
2458 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2459 returned as a string, allocated using xmalloc. If an error occurs
2460 or the transfer is unsupported, NULL is returned. Empty objects
2461 are returned as allocated but empty strings. A warning is issued
2462 if the result contains any embedded NUL bytes. */
2463
2464 char *
2465 target_read_stralloc (struct target_ops *ops, enum target_object object,
2466 const char *annex)
2467 {
2468 gdb_byte *buffer;
2469 char *bufstr;
2470 LONGEST i, transferred;
2471
2472 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2473 bufstr = (char *) buffer;
2474
2475 if (transferred < 0)
2476 return NULL;
2477
2478 if (transferred == 0)
2479 return xstrdup ("");
2480
2481 bufstr[transferred] = 0;
2482
2483 /* Check for embedded NUL bytes; but allow trailing NULs. */
2484 for (i = strlen (bufstr); i < transferred; i++)
2485 if (bufstr[i] != 0)
2486 {
2487 warning (_("target object %d, annex %s, "
2488 "contained unexpected null characters"),
2489 (int) object, annex ? annex : "(none)");
2490 break;
2491 }
2492
2493 return bufstr;
2494 }
2495
2496 /* Memory transfer methods. */
2497
2498 void
2499 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2500 LONGEST len)
2501 {
2502 /* This method is used to read from an alternate, non-current
2503 target. This read must bypass the overlay support (as symbols
2504 don't match this target), and GDB's internal cache (wrong cache
2505 for this target). */
2506 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2507 != len)
2508 memory_error (TARGET_XFER_E_IO, addr);
2509 }
2510
2511 ULONGEST
2512 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2513 int len, enum bfd_endian byte_order)
2514 {
2515 gdb_byte buf[sizeof (ULONGEST)];
2516
2517 gdb_assert (len <= sizeof (buf));
2518 get_target_memory (ops, addr, buf, len);
2519 return extract_unsigned_integer (buf, len, byte_order);
2520 }
2521
2522 /* See target.h. */
2523
2524 int
2525 target_insert_breakpoint (struct gdbarch *gdbarch,
2526 struct bp_target_info *bp_tgt)
2527 {
2528 if (!may_insert_breakpoints)
2529 {
2530 warning (_("May not insert breakpoints"));
2531 return 1;
2532 }
2533
2534 return current_target.to_insert_breakpoint (&current_target,
2535 gdbarch, bp_tgt);
2536 }
2537
2538 /* See target.h. */
2539
2540 int
2541 target_remove_breakpoint (struct gdbarch *gdbarch,
2542 struct bp_target_info *bp_tgt)
2543 {
2544 /* This is kind of a weird case to handle, but the permission might
2545 have been changed after breakpoints were inserted - in which case
2546 we should just take the user literally and assume that any
2547 breakpoints should be left in place. */
2548 if (!may_insert_breakpoints)
2549 {
2550 warning (_("May not remove breakpoints"));
2551 return 1;
2552 }
2553
2554 return current_target.to_remove_breakpoint (&current_target,
2555 gdbarch, bp_tgt);
2556 }
2557
2558 static void
2559 target_info (char *args, int from_tty)
2560 {
2561 struct target_ops *t;
2562 int has_all_mem = 0;
2563
2564 if (symfile_objfile != NULL)
2565 printf_unfiltered (_("Symbols from \"%s\".\n"),
2566 objfile_name (symfile_objfile));
2567
2568 for (t = target_stack; t != NULL; t = t->beneath)
2569 {
2570 if (!(*t->to_has_memory) (t))
2571 continue;
2572
2573 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2574 continue;
2575 if (has_all_mem)
2576 printf_unfiltered (_("\tWhile running this, "
2577 "GDB does not access memory from...\n"));
2578 printf_unfiltered ("%s:\n", t->to_longname);
2579 (t->to_files_info) (t);
2580 has_all_mem = (*t->to_has_all_memory) (t);
2581 }
2582 }
2583
2584 /* This function is called before any new inferior is created, e.g.
2585 by running a program, attaching, or connecting to a target.
2586 It cleans up any state from previous invocations which might
2587 change between runs. This is a subset of what target_preopen
2588 resets (things which might change between targets). */
2589
2590 void
2591 target_pre_inferior (int from_tty)
2592 {
2593 /* Clear out solib state. Otherwise the solib state of the previous
2594 inferior might have survived and is entirely wrong for the new
2595 target. This has been observed on GNU/Linux using glibc 2.3. How
2596 to reproduce:
2597
2598 bash$ ./foo&
2599 [1] 4711
2600 bash$ ./foo&
2601 [1] 4712
2602 bash$ gdb ./foo
2603 [...]
2604 (gdb) attach 4711
2605 (gdb) detach
2606 (gdb) attach 4712
2607 Cannot access memory at address 0xdeadbeef
2608 */
2609
2610 /* In some OSs, the shared library list is the same/global/shared
2611 across inferiors. If code is shared between processes, so are
2612 memory regions and features. */
2613 if (!gdbarch_has_global_solist (target_gdbarch ()))
2614 {
2615 no_shared_libraries (NULL, from_tty);
2616
2617 invalidate_target_mem_regions ();
2618
2619 target_clear_description ();
2620 }
2621
2622 agent_capability_invalidate ();
2623 }
2624
2625 /* Callback for iterate_over_inferiors. Gets rid of the given
2626 inferior. */
2627
2628 static int
2629 dispose_inferior (struct inferior *inf, void *args)
2630 {
2631 struct thread_info *thread;
2632
2633 thread = any_thread_of_process (inf->pid);
2634 if (thread)
2635 {
2636 switch_to_thread (thread->ptid);
2637
2638 /* Core inferiors actually should be detached, not killed. */
2639 if (target_has_execution)
2640 target_kill ();
2641 else
2642 target_detach (NULL, 0);
2643 }
2644
2645 return 0;
2646 }
2647
2648 /* This is to be called by the open routine before it does
2649 anything. */
2650
2651 void
2652 target_preopen (int from_tty)
2653 {
2654 dont_repeat ();
2655
2656 if (have_inferiors ())
2657 {
2658 if (!from_tty
2659 || !have_live_inferiors ()
2660 || query (_("A program is being debugged already. Kill it? ")))
2661 iterate_over_inferiors (dispose_inferior, NULL);
2662 else
2663 error (_("Program not killed."));
2664 }
2665
2666 /* Calling target_kill may remove the target from the stack. But if
2667 it doesn't (which seems like a win for UDI), remove it now. */
2668 /* Leave the exec target, though. The user may be switching from a
2669 live process to a core of the same program. */
2670 pop_all_targets_above (file_stratum);
2671
2672 target_pre_inferior (from_tty);
2673 }
2674
2675 /* Detach a target after doing deferred register stores. */
2676
2677 void
2678 target_detach (const char *args, int from_tty)
2679 {
2680 struct target_ops* t;
2681
2682 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2683 /* Don't remove global breakpoints here. They're removed on
2684 disconnection from the target. */
2685 ;
2686 else
2687 /* If we're in breakpoints-always-inserted mode, have to remove
2688 them before detaching. */
2689 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2690
2691 prepare_for_detach ();
2692
2693 for (t = current_target.beneath; t != NULL; t = t->beneath)
2694 {
2695 if (t->to_detach != NULL)
2696 {
2697 t->to_detach (t, args, from_tty);
2698 if (targetdebug)
2699 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2700 args, from_tty);
2701 return;
2702 }
2703 }
2704
2705 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2706 }
2707
2708 void
2709 target_disconnect (char *args, int from_tty)
2710 {
2711 struct target_ops *t;
2712
2713 /* If we're in breakpoints-always-inserted mode or if breakpoints
2714 are global across processes, we have to remove them before
2715 disconnecting. */
2716 remove_breakpoints ();
2717
2718 for (t = current_target.beneath; t != NULL; t = t->beneath)
2719 if (t->to_disconnect != NULL)
2720 {
2721 if (targetdebug)
2722 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2723 args, from_tty);
2724 t->to_disconnect (t, args, from_tty);
2725 return;
2726 }
2727
2728 tcomplain ();
2729 }
2730
2731 ptid_t
2732 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2733 {
2734 struct target_ops *t;
2735 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2736 status, options);
2737
2738 if (targetdebug)
2739 {
2740 char *status_string;
2741 char *options_string;
2742
2743 status_string = target_waitstatus_to_string (status);
2744 options_string = target_options_to_string (options);
2745 fprintf_unfiltered (gdb_stdlog,
2746 "target_wait (%d, status, options={%s})"
2747 " = %d, %s\n",
2748 ptid_get_pid (ptid), options_string,
2749 ptid_get_pid (retval), status_string);
2750 xfree (status_string);
2751 xfree (options_string);
2752 }
2753
2754 return retval;
2755 }
2756
2757 char *
2758 target_pid_to_str (ptid_t ptid)
2759 {
2760 struct target_ops *t;
2761
2762 for (t = current_target.beneath; t != NULL; t = t->beneath)
2763 {
2764 if (t->to_pid_to_str != NULL)
2765 return (*t->to_pid_to_str) (t, ptid);
2766 }
2767
2768 return normal_pid_to_str (ptid);
2769 }
2770
2771 char *
2772 target_thread_name (struct thread_info *info)
2773 {
2774 struct target_ops *t;
2775
2776 for (t = current_target.beneath; t != NULL; t = t->beneath)
2777 {
2778 if (t->to_thread_name != NULL)
2779 return (*t->to_thread_name) (t, info);
2780 }
2781
2782 return NULL;
2783 }
2784
2785 void
2786 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2787 {
2788 struct target_ops *t;
2789
2790 target_dcache_invalidate ();
2791
2792 current_target.to_resume (&current_target, ptid, step, signal);
2793 if (targetdebug)
2794 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2795 ptid_get_pid (ptid),
2796 step ? "step" : "continue",
2797 gdb_signal_to_name (signal));
2798
2799 registers_changed_ptid (ptid);
2800 set_executing (ptid, 1);
2801 set_running (ptid, 1);
2802 clear_inline_frame_state (ptid);
2803 }
2804
2805 void
2806 target_pass_signals (int numsigs, unsigned char *pass_signals)
2807 {
2808 struct target_ops *t;
2809
2810 for (t = current_target.beneath; t != NULL; t = t->beneath)
2811 {
2812 if (t->to_pass_signals != NULL)
2813 {
2814 if (targetdebug)
2815 {
2816 int i;
2817
2818 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2819 numsigs);
2820
2821 for (i = 0; i < numsigs; i++)
2822 if (pass_signals[i])
2823 fprintf_unfiltered (gdb_stdlog, " %s",
2824 gdb_signal_to_name (i));
2825
2826 fprintf_unfiltered (gdb_stdlog, " })\n");
2827 }
2828
2829 (*t->to_pass_signals) (t, numsigs, pass_signals);
2830 return;
2831 }
2832 }
2833 }
2834
2835 void
2836 target_program_signals (int numsigs, unsigned char *program_signals)
2837 {
2838 struct target_ops *t;
2839
2840 for (t = current_target.beneath; t != NULL; t = t->beneath)
2841 {
2842 if (t->to_program_signals != NULL)
2843 {
2844 if (targetdebug)
2845 {
2846 int i;
2847
2848 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2849 numsigs);
2850
2851 for (i = 0; i < numsigs; i++)
2852 if (program_signals[i])
2853 fprintf_unfiltered (gdb_stdlog, " %s",
2854 gdb_signal_to_name (i));
2855
2856 fprintf_unfiltered (gdb_stdlog, " })\n");
2857 }
2858
2859 (*t->to_program_signals) (t, numsigs, program_signals);
2860 return;
2861 }
2862 }
2863 }
2864
2865 /* Look through the list of possible targets for a target that can
2866 follow forks. */
2867
2868 int
2869 target_follow_fork (int follow_child, int detach_fork)
2870 {
2871 struct target_ops *t;
2872
2873 for (t = current_target.beneath; t != NULL; t = t->beneath)
2874 {
2875 if (t->to_follow_fork != NULL)
2876 {
2877 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2878
2879 if (targetdebug)
2880 fprintf_unfiltered (gdb_stdlog,
2881 "target_follow_fork (%d, %d) = %d\n",
2882 follow_child, detach_fork, retval);
2883 return retval;
2884 }
2885 }
2886
2887 /* Some target returned a fork event, but did not know how to follow it. */
2888 internal_error (__FILE__, __LINE__,
2889 _("could not find a target to follow fork"));
2890 }
2891
2892 void
2893 target_mourn_inferior (void)
2894 {
2895 struct target_ops *t;
2896
2897 for (t = current_target.beneath; t != NULL; t = t->beneath)
2898 {
2899 if (t->to_mourn_inferior != NULL)
2900 {
2901 t->to_mourn_inferior (t);
2902 if (targetdebug)
2903 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2904
2905 /* We no longer need to keep handles on any of the object files.
2906 Make sure to release them to avoid unnecessarily locking any
2907 of them while we're not actually debugging. */
2908 bfd_cache_close_all ();
2909
2910 return;
2911 }
2912 }
2913
2914 internal_error (__FILE__, __LINE__,
2915 _("could not find a target to follow mourn inferior"));
2916 }
2917
2918 /* Look for a target which can describe architectural features, starting
2919 from TARGET. If we find one, return its description. */
2920
2921 const struct target_desc *
2922 target_read_description (struct target_ops *target)
2923 {
2924 struct target_ops *t;
2925
2926 for (t = target; t != NULL; t = t->beneath)
2927 if (t->to_read_description != NULL)
2928 {
2929 const struct target_desc *tdesc;
2930
2931 tdesc = t->to_read_description (t);
2932 if (tdesc)
2933 return tdesc;
2934 }
2935
2936 return NULL;
2937 }
2938
2939 /* The default implementation of to_search_memory.
2940 This implements a basic search of memory, reading target memory and
2941 performing the search here (as opposed to performing the search in on the
2942 target side with, for example, gdbserver). */
2943
2944 int
2945 simple_search_memory (struct target_ops *ops,
2946 CORE_ADDR start_addr, ULONGEST search_space_len,
2947 const gdb_byte *pattern, ULONGEST pattern_len,
2948 CORE_ADDR *found_addrp)
2949 {
2950 /* NOTE: also defined in find.c testcase. */
2951 #define SEARCH_CHUNK_SIZE 16000
2952 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2953 /* Buffer to hold memory contents for searching. */
2954 gdb_byte *search_buf;
2955 unsigned search_buf_size;
2956 struct cleanup *old_cleanups;
2957
2958 search_buf_size = chunk_size + pattern_len - 1;
2959
2960 /* No point in trying to allocate a buffer larger than the search space. */
2961 if (search_space_len < search_buf_size)
2962 search_buf_size = search_space_len;
2963
2964 search_buf = malloc (search_buf_size);
2965 if (search_buf == NULL)
2966 error (_("Unable to allocate memory to perform the search."));
2967 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2968
2969 /* Prime the search buffer. */
2970
2971 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2972 search_buf, start_addr, search_buf_size) != search_buf_size)
2973 {
2974 warning (_("Unable to access %s bytes of target "
2975 "memory at %s, halting search."),
2976 pulongest (search_buf_size), hex_string (start_addr));
2977 do_cleanups (old_cleanups);
2978 return -1;
2979 }
2980
2981 /* Perform the search.
2982
2983 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2984 When we've scanned N bytes we copy the trailing bytes to the start and
2985 read in another N bytes. */
2986
2987 while (search_space_len >= pattern_len)
2988 {
2989 gdb_byte *found_ptr;
2990 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2991
2992 found_ptr = memmem (search_buf, nr_search_bytes,
2993 pattern, pattern_len);
2994
2995 if (found_ptr != NULL)
2996 {
2997 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2998
2999 *found_addrp = found_addr;
3000 do_cleanups (old_cleanups);
3001 return 1;
3002 }
3003
3004 /* Not found in this chunk, skip to next chunk. */
3005
3006 /* Don't let search_space_len wrap here, it's unsigned. */
3007 if (search_space_len >= chunk_size)
3008 search_space_len -= chunk_size;
3009 else
3010 search_space_len = 0;
3011
3012 if (search_space_len >= pattern_len)
3013 {
3014 unsigned keep_len = search_buf_size - chunk_size;
3015 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3016 int nr_to_read;
3017
3018 /* Copy the trailing part of the previous iteration to the front
3019 of the buffer for the next iteration. */
3020 gdb_assert (keep_len == pattern_len - 1);
3021 memcpy (search_buf, search_buf + chunk_size, keep_len);
3022
3023 nr_to_read = min (search_space_len - keep_len, chunk_size);
3024
3025 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3026 search_buf + keep_len, read_addr,
3027 nr_to_read) != nr_to_read)
3028 {
3029 warning (_("Unable to access %s bytes of target "
3030 "memory at %s, halting search."),
3031 plongest (nr_to_read),
3032 hex_string (read_addr));
3033 do_cleanups (old_cleanups);
3034 return -1;
3035 }
3036
3037 start_addr += chunk_size;
3038 }
3039 }
3040
3041 /* Not found. */
3042
3043 do_cleanups (old_cleanups);
3044 return 0;
3045 }
3046
3047 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3048 sequence of bytes in PATTERN with length PATTERN_LEN.
3049
3050 The result is 1 if found, 0 if not found, and -1 if there was an error
3051 requiring halting of the search (e.g. memory read error).
3052 If the pattern is found the address is recorded in FOUND_ADDRP. */
3053
3054 int
3055 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3056 const gdb_byte *pattern, ULONGEST pattern_len,
3057 CORE_ADDR *found_addrp)
3058 {
3059 struct target_ops *t;
3060 int found;
3061
3062 /* We don't use INHERIT to set current_target.to_search_memory,
3063 so we have to scan the target stack and handle targetdebug
3064 ourselves. */
3065
3066 if (targetdebug)
3067 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3068 hex_string (start_addr));
3069
3070 for (t = current_target.beneath; t != NULL; t = t->beneath)
3071 if (t->to_search_memory != NULL)
3072 break;
3073
3074 if (t != NULL)
3075 {
3076 found = t->to_search_memory (t, start_addr, search_space_len,
3077 pattern, pattern_len, found_addrp);
3078 }
3079 else
3080 {
3081 /* If a special version of to_search_memory isn't available, use the
3082 simple version. */
3083 found = simple_search_memory (current_target.beneath,
3084 start_addr, search_space_len,
3085 pattern, pattern_len, found_addrp);
3086 }
3087
3088 if (targetdebug)
3089 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3090
3091 return found;
3092 }
3093
3094 /* Look through the currently pushed targets. If none of them will
3095 be able to restart the currently running process, issue an error
3096 message. */
3097
3098 void
3099 target_require_runnable (void)
3100 {
3101 struct target_ops *t;
3102
3103 for (t = target_stack; t != NULL; t = t->beneath)
3104 {
3105 /* If this target knows how to create a new program, then
3106 assume we will still be able to after killing the current
3107 one. Either killing and mourning will not pop T, or else
3108 find_default_run_target will find it again. */
3109 if (t->to_create_inferior != NULL)
3110 return;
3111
3112 /* Do not worry about thread_stratum targets that can not
3113 create inferiors. Assume they will be pushed again if
3114 necessary, and continue to the process_stratum. */
3115 if (t->to_stratum == thread_stratum
3116 || t->to_stratum == arch_stratum)
3117 continue;
3118
3119 error (_("The \"%s\" target does not support \"run\". "
3120 "Try \"help target\" or \"continue\"."),
3121 t->to_shortname);
3122 }
3123
3124 /* This function is only called if the target is running. In that
3125 case there should have been a process_stratum target and it
3126 should either know how to create inferiors, or not... */
3127 internal_error (__FILE__, __LINE__, _("No targets found"));
3128 }
3129
3130 /* Look through the list of possible targets for a target that can
3131 execute a run or attach command without any other data. This is
3132 used to locate the default process stratum.
3133
3134 If DO_MESG is not NULL, the result is always valid (error() is
3135 called for errors); else, return NULL on error. */
3136
3137 static struct target_ops *
3138 find_default_run_target (char *do_mesg)
3139 {
3140 struct target_ops **t;
3141 struct target_ops *runable = NULL;
3142 int count;
3143
3144 count = 0;
3145
3146 for (t = target_structs; t < target_structs + target_struct_size;
3147 ++t)
3148 {
3149 if ((*t)->to_can_run && target_can_run (*t))
3150 {
3151 runable = *t;
3152 ++count;
3153 }
3154 }
3155
3156 if (count != 1)
3157 {
3158 if (do_mesg)
3159 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3160 else
3161 return NULL;
3162 }
3163
3164 return runable;
3165 }
3166
3167 void
3168 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3169 {
3170 struct target_ops *t;
3171
3172 t = find_default_run_target ("attach");
3173 (t->to_attach) (t, args, from_tty);
3174 return;
3175 }
3176
3177 void
3178 find_default_create_inferior (struct target_ops *ops,
3179 char *exec_file, char *allargs, char **env,
3180 int from_tty)
3181 {
3182 struct target_ops *t;
3183
3184 t = find_default_run_target ("run");
3185 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3186 return;
3187 }
3188
3189 static int
3190 find_default_can_async_p (struct target_ops *ignore)
3191 {
3192 struct target_ops *t;
3193
3194 /* This may be called before the target is pushed on the stack;
3195 look for the default process stratum. If there's none, gdb isn't
3196 configured with a native debugger, and target remote isn't
3197 connected yet. */
3198 t = find_default_run_target (NULL);
3199 if (t && t->to_can_async_p != delegate_can_async_p)
3200 return (t->to_can_async_p) (t);
3201 return 0;
3202 }
3203
3204 static int
3205 find_default_is_async_p (struct target_ops *ignore)
3206 {
3207 struct target_ops *t;
3208
3209 /* This may be called before the target is pushed on the stack;
3210 look for the default process stratum. If there's none, gdb isn't
3211 configured with a native debugger, and target remote isn't
3212 connected yet. */
3213 t = find_default_run_target (NULL);
3214 if (t && t->to_is_async_p != delegate_is_async_p)
3215 return (t->to_is_async_p) (t);
3216 return 0;
3217 }
3218
3219 static int
3220 find_default_supports_non_stop (struct target_ops *self)
3221 {
3222 struct target_ops *t;
3223
3224 t = find_default_run_target (NULL);
3225 if (t && t->to_supports_non_stop)
3226 return (t->to_supports_non_stop) (t);
3227 return 0;
3228 }
3229
3230 int
3231 target_supports_non_stop (void)
3232 {
3233 struct target_ops *t;
3234
3235 for (t = &current_target; t != NULL; t = t->beneath)
3236 if (t->to_supports_non_stop)
3237 return t->to_supports_non_stop (t);
3238
3239 return 0;
3240 }
3241
3242 /* Implement the "info proc" command. */
3243
3244 int
3245 target_info_proc (char *args, enum info_proc_what what)
3246 {
3247 struct target_ops *t;
3248
3249 /* If we're already connected to something that can get us OS
3250 related data, use it. Otherwise, try using the native
3251 target. */
3252 if (current_target.to_stratum >= process_stratum)
3253 t = current_target.beneath;
3254 else
3255 t = find_default_run_target (NULL);
3256
3257 for (; t != NULL; t = t->beneath)
3258 {
3259 if (t->to_info_proc != NULL)
3260 {
3261 t->to_info_proc (t, args, what);
3262
3263 if (targetdebug)
3264 fprintf_unfiltered (gdb_stdlog,
3265 "target_info_proc (\"%s\", %d)\n", args, what);
3266
3267 return 1;
3268 }
3269 }
3270
3271 return 0;
3272 }
3273
3274 static int
3275 find_default_supports_disable_randomization (struct target_ops *self)
3276 {
3277 struct target_ops *t;
3278
3279 t = find_default_run_target (NULL);
3280 if (t && t->to_supports_disable_randomization)
3281 return (t->to_supports_disable_randomization) (t);
3282 return 0;
3283 }
3284
3285 int
3286 target_supports_disable_randomization (void)
3287 {
3288 struct target_ops *t;
3289
3290 for (t = &current_target; t != NULL; t = t->beneath)
3291 if (t->to_supports_disable_randomization)
3292 return t->to_supports_disable_randomization (t);
3293
3294 return 0;
3295 }
3296
3297 char *
3298 target_get_osdata (const char *type)
3299 {
3300 struct target_ops *t;
3301
3302 /* If we're already connected to something that can get us OS
3303 related data, use it. Otherwise, try using the native
3304 target. */
3305 if (current_target.to_stratum >= process_stratum)
3306 t = current_target.beneath;
3307 else
3308 t = find_default_run_target ("get OS data");
3309
3310 if (!t)
3311 return NULL;
3312
3313 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3314 }
3315
3316 /* Determine the current address space of thread PTID. */
3317
3318 struct address_space *
3319 target_thread_address_space (ptid_t ptid)
3320 {
3321 struct address_space *aspace;
3322 struct inferior *inf;
3323 struct target_ops *t;
3324
3325 for (t = current_target.beneath; t != NULL; t = t->beneath)
3326 {
3327 if (t->to_thread_address_space != NULL)
3328 {
3329 aspace = t->to_thread_address_space (t, ptid);
3330 gdb_assert (aspace);
3331
3332 if (targetdebug)
3333 fprintf_unfiltered (gdb_stdlog,
3334 "target_thread_address_space (%s) = %d\n",
3335 target_pid_to_str (ptid),
3336 address_space_num (aspace));
3337 return aspace;
3338 }
3339 }
3340
3341 /* Fall-back to the "main" address space of the inferior. */
3342 inf = find_inferior_pid (ptid_get_pid (ptid));
3343
3344 if (inf == NULL || inf->aspace == NULL)
3345 internal_error (__FILE__, __LINE__,
3346 _("Can't determine the current "
3347 "address space of thread %s\n"),
3348 target_pid_to_str (ptid));
3349
3350 return inf->aspace;
3351 }
3352
3353
3354 /* Target file operations. */
3355
3356 static struct target_ops *
3357 default_fileio_target (void)
3358 {
3359 /* If we're already connected to something that can perform
3360 file I/O, use it. Otherwise, try using the native target. */
3361 if (current_target.to_stratum >= process_stratum)
3362 return current_target.beneath;
3363 else
3364 return find_default_run_target ("file I/O");
3365 }
3366
3367 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3368 target file descriptor, or -1 if an error occurs (and set
3369 *TARGET_ERRNO). */
3370 int
3371 target_fileio_open (const char *filename, int flags, int mode,
3372 int *target_errno)
3373 {
3374 struct target_ops *t;
3375
3376 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3377 {
3378 if (t->to_fileio_open != NULL)
3379 {
3380 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3381
3382 if (targetdebug)
3383 fprintf_unfiltered (gdb_stdlog,
3384 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3385 filename, flags, mode,
3386 fd, fd != -1 ? 0 : *target_errno);
3387 return fd;
3388 }
3389 }
3390
3391 *target_errno = FILEIO_ENOSYS;
3392 return -1;
3393 }
3394
3395 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3396 Return the number of bytes written, or -1 if an error occurs
3397 (and set *TARGET_ERRNO). */
3398 int
3399 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3400 ULONGEST offset, int *target_errno)
3401 {
3402 struct target_ops *t;
3403
3404 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3405 {
3406 if (t->to_fileio_pwrite != NULL)
3407 {
3408 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3409 target_errno);
3410
3411 if (targetdebug)
3412 fprintf_unfiltered (gdb_stdlog,
3413 "target_fileio_pwrite (%d,...,%d,%s) "
3414 "= %d (%d)\n",
3415 fd, len, pulongest (offset),
3416 ret, ret != -1 ? 0 : *target_errno);
3417 return ret;
3418 }
3419 }
3420
3421 *target_errno = FILEIO_ENOSYS;
3422 return -1;
3423 }
3424
3425 /* Read up to LEN bytes FD on the target into READ_BUF.
3426 Return the number of bytes read, or -1 if an error occurs
3427 (and set *TARGET_ERRNO). */
3428 int
3429 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3430 ULONGEST offset, int *target_errno)
3431 {
3432 struct target_ops *t;
3433
3434 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3435 {
3436 if (t->to_fileio_pread != NULL)
3437 {
3438 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3439 target_errno);
3440
3441 if (targetdebug)
3442 fprintf_unfiltered (gdb_stdlog,
3443 "target_fileio_pread (%d,...,%d,%s) "
3444 "= %d (%d)\n",
3445 fd, len, pulongest (offset),
3446 ret, ret != -1 ? 0 : *target_errno);
3447 return ret;
3448 }
3449 }
3450
3451 *target_errno = FILEIO_ENOSYS;
3452 return -1;
3453 }
3454
3455 /* Close FD on the target. Return 0, or -1 if an error occurs
3456 (and set *TARGET_ERRNO). */
3457 int
3458 target_fileio_close (int fd, int *target_errno)
3459 {
3460 struct target_ops *t;
3461
3462 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3463 {
3464 if (t->to_fileio_close != NULL)
3465 {
3466 int ret = t->to_fileio_close (t, fd, target_errno);
3467
3468 if (targetdebug)
3469 fprintf_unfiltered (gdb_stdlog,
3470 "target_fileio_close (%d) = %d (%d)\n",
3471 fd, ret, ret != -1 ? 0 : *target_errno);
3472 return ret;
3473 }
3474 }
3475
3476 *target_errno = FILEIO_ENOSYS;
3477 return -1;
3478 }
3479
3480 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3481 occurs (and set *TARGET_ERRNO). */
3482 int
3483 target_fileio_unlink (const char *filename, int *target_errno)
3484 {
3485 struct target_ops *t;
3486
3487 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3488 {
3489 if (t->to_fileio_unlink != NULL)
3490 {
3491 int ret = t->to_fileio_unlink (t, filename, target_errno);
3492
3493 if (targetdebug)
3494 fprintf_unfiltered (gdb_stdlog,
3495 "target_fileio_unlink (%s) = %d (%d)\n",
3496 filename, ret, ret != -1 ? 0 : *target_errno);
3497 return ret;
3498 }
3499 }
3500
3501 *target_errno = FILEIO_ENOSYS;
3502 return -1;
3503 }
3504
3505 /* Read value of symbolic link FILENAME on the target. Return a
3506 null-terminated string allocated via xmalloc, or NULL if an error
3507 occurs (and set *TARGET_ERRNO). */
3508 char *
3509 target_fileio_readlink (const char *filename, int *target_errno)
3510 {
3511 struct target_ops *t;
3512
3513 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3514 {
3515 if (t->to_fileio_readlink != NULL)
3516 {
3517 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3518
3519 if (targetdebug)
3520 fprintf_unfiltered (gdb_stdlog,
3521 "target_fileio_readlink (%s) = %s (%d)\n",
3522 filename, ret? ret : "(nil)",
3523 ret? 0 : *target_errno);
3524 return ret;
3525 }
3526 }
3527
3528 *target_errno = FILEIO_ENOSYS;
3529 return NULL;
3530 }
3531
3532 static void
3533 target_fileio_close_cleanup (void *opaque)
3534 {
3535 int fd = *(int *) opaque;
3536 int target_errno;
3537
3538 target_fileio_close (fd, &target_errno);
3539 }
3540
3541 /* Read target file FILENAME. Store the result in *BUF_P and
3542 return the size of the transferred data. PADDING additional bytes are
3543 available in *BUF_P. This is a helper function for
3544 target_fileio_read_alloc; see the declaration of that function for more
3545 information. */
3546
3547 static LONGEST
3548 target_fileio_read_alloc_1 (const char *filename,
3549 gdb_byte **buf_p, int padding)
3550 {
3551 struct cleanup *close_cleanup;
3552 size_t buf_alloc, buf_pos;
3553 gdb_byte *buf;
3554 LONGEST n;
3555 int fd;
3556 int target_errno;
3557
3558 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3559 if (fd == -1)
3560 return -1;
3561
3562 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3563
3564 /* Start by reading up to 4K at a time. The target will throttle
3565 this number down if necessary. */
3566 buf_alloc = 4096;
3567 buf = xmalloc (buf_alloc);
3568 buf_pos = 0;
3569 while (1)
3570 {
3571 n = target_fileio_pread (fd, &buf[buf_pos],
3572 buf_alloc - buf_pos - padding, buf_pos,
3573 &target_errno);
3574 if (n < 0)
3575 {
3576 /* An error occurred. */
3577 do_cleanups (close_cleanup);
3578 xfree (buf);
3579 return -1;
3580 }
3581 else if (n == 0)
3582 {
3583 /* Read all there was. */
3584 do_cleanups (close_cleanup);
3585 if (buf_pos == 0)
3586 xfree (buf);
3587 else
3588 *buf_p = buf;
3589 return buf_pos;
3590 }
3591
3592 buf_pos += n;
3593
3594 /* If the buffer is filling up, expand it. */
3595 if (buf_alloc < buf_pos * 2)
3596 {
3597 buf_alloc *= 2;
3598 buf = xrealloc (buf, buf_alloc);
3599 }
3600
3601 QUIT;
3602 }
3603 }
3604
3605 /* Read target file FILENAME. Store the result in *BUF_P and return
3606 the size of the transferred data. See the declaration in "target.h"
3607 function for more information about the return value. */
3608
3609 LONGEST
3610 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3611 {
3612 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3613 }
3614
3615 /* Read target file FILENAME. The result is NUL-terminated and
3616 returned as a string, allocated using xmalloc. If an error occurs
3617 or the transfer is unsupported, NULL is returned. Empty objects
3618 are returned as allocated but empty strings. A warning is issued
3619 if the result contains any embedded NUL bytes. */
3620
3621 char *
3622 target_fileio_read_stralloc (const char *filename)
3623 {
3624 gdb_byte *buffer;
3625 char *bufstr;
3626 LONGEST i, transferred;
3627
3628 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3629 bufstr = (char *) buffer;
3630
3631 if (transferred < 0)
3632 return NULL;
3633
3634 if (transferred == 0)
3635 return xstrdup ("");
3636
3637 bufstr[transferred] = 0;
3638
3639 /* Check for embedded NUL bytes; but allow trailing NULs. */
3640 for (i = strlen (bufstr); i < transferred; i++)
3641 if (bufstr[i] != 0)
3642 {
3643 warning (_("target file %s "
3644 "contained unexpected null characters"),
3645 filename);
3646 break;
3647 }
3648
3649 return bufstr;
3650 }
3651
3652
3653 static int
3654 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3655 CORE_ADDR addr, int len)
3656 {
3657 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3658 }
3659
3660 static int
3661 default_watchpoint_addr_within_range (struct target_ops *target,
3662 CORE_ADDR addr,
3663 CORE_ADDR start, int length)
3664 {
3665 return addr >= start && addr < start + length;
3666 }
3667
3668 static struct gdbarch *
3669 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3670 {
3671 return target_gdbarch ();
3672 }
3673
3674 static int
3675 return_zero (void)
3676 {
3677 return 0;
3678 }
3679
3680 static int
3681 return_one (void)
3682 {
3683 return 1;
3684 }
3685
3686 static int
3687 return_minus_one (void)
3688 {
3689 return -1;
3690 }
3691
3692 static void *
3693 return_null (void)
3694 {
3695 return 0;
3696 }
3697
3698 /*
3699 * Find the next target down the stack from the specified target.
3700 */
3701
3702 struct target_ops *
3703 find_target_beneath (struct target_ops *t)
3704 {
3705 return t->beneath;
3706 }
3707
3708 /* See target.h. */
3709
3710 struct target_ops *
3711 find_target_at (enum strata stratum)
3712 {
3713 struct target_ops *t;
3714
3715 for (t = current_target.beneath; t != NULL; t = t->beneath)
3716 if (t->to_stratum == stratum)
3717 return t;
3718
3719 return NULL;
3720 }
3721
3722 \f
3723 /* The inferior process has died. Long live the inferior! */
3724
3725 void
3726 generic_mourn_inferior (void)
3727 {
3728 ptid_t ptid;
3729
3730 ptid = inferior_ptid;
3731 inferior_ptid = null_ptid;
3732
3733 /* Mark breakpoints uninserted in case something tries to delete a
3734 breakpoint while we delete the inferior's threads (which would
3735 fail, since the inferior is long gone). */
3736 mark_breakpoints_out ();
3737
3738 if (!ptid_equal (ptid, null_ptid))
3739 {
3740 int pid = ptid_get_pid (ptid);
3741 exit_inferior (pid);
3742 }
3743
3744 /* Note this wipes step-resume breakpoints, so needs to be done
3745 after exit_inferior, which ends up referencing the step-resume
3746 breakpoints through clear_thread_inferior_resources. */
3747 breakpoint_init_inferior (inf_exited);
3748
3749 registers_changed ();
3750
3751 reopen_exec_file ();
3752 reinit_frame_cache ();
3753
3754 if (deprecated_detach_hook)
3755 deprecated_detach_hook ();
3756 }
3757 \f
3758 /* Convert a normal process ID to a string. Returns the string in a
3759 static buffer. */
3760
3761 char *
3762 normal_pid_to_str (ptid_t ptid)
3763 {
3764 static char buf[32];
3765
3766 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3767 return buf;
3768 }
3769
3770 static char *
3771 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3772 {
3773 return normal_pid_to_str (ptid);
3774 }
3775
3776 /* Error-catcher for target_find_memory_regions. */
3777 static int
3778 dummy_find_memory_regions (struct target_ops *self,
3779 find_memory_region_ftype ignore1, void *ignore2)
3780 {
3781 error (_("Command not implemented for this target."));
3782 return 0;
3783 }
3784
3785 /* Error-catcher for target_make_corefile_notes. */
3786 static char *
3787 dummy_make_corefile_notes (struct target_ops *self,
3788 bfd *ignore1, int *ignore2)
3789 {
3790 error (_("Command not implemented for this target."));
3791 return NULL;
3792 }
3793
3794 /* Error-catcher for target_get_bookmark. */
3795 static gdb_byte *
3796 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3797 {
3798 tcomplain ();
3799 return NULL;
3800 }
3801
3802 /* Error-catcher for target_goto_bookmark. */
3803 static void
3804 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3805 {
3806 tcomplain ();
3807 }
3808
3809 /* Set up the handful of non-empty slots needed by the dummy target
3810 vector. */
3811
3812 static void
3813 init_dummy_target (void)
3814 {
3815 dummy_target.to_shortname = "None";
3816 dummy_target.to_longname = "None";
3817 dummy_target.to_doc = "";
3818 dummy_target.to_attach = find_default_attach;
3819 dummy_target.to_detach =
3820 (void (*)(struct target_ops *, const char *, int))target_ignore;
3821 dummy_target.to_create_inferior = find_default_create_inferior;
3822 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3823 dummy_target.to_supports_disable_randomization
3824 = find_default_supports_disable_randomization;
3825 dummy_target.to_pid_to_str = dummy_pid_to_str;
3826 dummy_target.to_stratum = dummy_stratum;
3827 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3828 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3829 dummy_target.to_get_bookmark = dummy_get_bookmark;
3830 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3831 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3832 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3833 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3834 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3835 dummy_target.to_has_execution
3836 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3837 dummy_target.to_magic = OPS_MAGIC;
3838
3839 install_dummy_methods (&dummy_target);
3840 }
3841 \f
3842 static void
3843 debug_to_open (char *args, int from_tty)
3844 {
3845 debug_target.to_open (args, from_tty);
3846
3847 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3848 }
3849
3850 void
3851 target_close (struct target_ops *targ)
3852 {
3853 gdb_assert (!target_is_pushed (targ));
3854
3855 if (targ->to_xclose != NULL)
3856 targ->to_xclose (targ);
3857 else if (targ->to_close != NULL)
3858 targ->to_close (targ);
3859
3860 if (targetdebug)
3861 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3862 }
3863
3864 void
3865 target_attach (char *args, int from_tty)
3866 {
3867 struct target_ops *t;
3868
3869 for (t = current_target.beneath; t != NULL; t = t->beneath)
3870 {
3871 if (t->to_attach != NULL)
3872 {
3873 t->to_attach (t, args, from_tty);
3874 if (targetdebug)
3875 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3876 args, from_tty);
3877 return;
3878 }
3879 }
3880
3881 internal_error (__FILE__, __LINE__,
3882 _("could not find a target to attach"));
3883 }
3884
3885 int
3886 target_thread_alive (ptid_t ptid)
3887 {
3888 struct target_ops *t;
3889
3890 for (t = current_target.beneath; t != NULL; t = t->beneath)
3891 {
3892 if (t->to_thread_alive != NULL)
3893 {
3894 int retval;
3895
3896 retval = t->to_thread_alive (t, ptid);
3897 if (targetdebug)
3898 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3899 ptid_get_pid (ptid), retval);
3900
3901 return retval;
3902 }
3903 }
3904
3905 return 0;
3906 }
3907
3908 void
3909 target_find_new_threads (void)
3910 {
3911 struct target_ops *t;
3912
3913 for (t = current_target.beneath; t != NULL; t = t->beneath)
3914 {
3915 if (t->to_find_new_threads != NULL)
3916 {
3917 t->to_find_new_threads (t);
3918 if (targetdebug)
3919 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3920
3921 return;
3922 }
3923 }
3924 }
3925
3926 void
3927 target_stop (ptid_t ptid)
3928 {
3929 if (!may_stop)
3930 {
3931 warning (_("May not interrupt or stop the target, ignoring attempt"));
3932 return;
3933 }
3934
3935 (*current_target.to_stop) (&current_target, ptid);
3936 }
3937
3938 static void
3939 debug_to_post_attach (struct target_ops *self, int pid)
3940 {
3941 debug_target.to_post_attach (&debug_target, pid);
3942
3943 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3944 }
3945
3946 /* Concatenate ELEM to LIST, a comma separate list, and return the
3947 result. The LIST incoming argument is released. */
3948
3949 static char *
3950 str_comma_list_concat_elem (char *list, const char *elem)
3951 {
3952 if (list == NULL)
3953 return xstrdup (elem);
3954 else
3955 return reconcat (list, list, ", ", elem, (char *) NULL);
3956 }
3957
3958 /* Helper for target_options_to_string. If OPT is present in
3959 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3960 Returns the new resulting string. OPT is removed from
3961 TARGET_OPTIONS. */
3962
3963 static char *
3964 do_option (int *target_options, char *ret,
3965 int opt, char *opt_str)
3966 {
3967 if ((*target_options & opt) != 0)
3968 {
3969 ret = str_comma_list_concat_elem (ret, opt_str);
3970 *target_options &= ~opt;
3971 }
3972
3973 return ret;
3974 }
3975
3976 char *
3977 target_options_to_string (int target_options)
3978 {
3979 char *ret = NULL;
3980
3981 #define DO_TARG_OPTION(OPT) \
3982 ret = do_option (&target_options, ret, OPT, #OPT)
3983
3984 DO_TARG_OPTION (TARGET_WNOHANG);
3985
3986 if (target_options != 0)
3987 ret = str_comma_list_concat_elem (ret, "unknown???");
3988
3989 if (ret == NULL)
3990 ret = xstrdup ("");
3991 return ret;
3992 }
3993
3994 static void
3995 debug_print_register (const char * func,
3996 struct regcache *regcache, int regno)
3997 {
3998 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3999
4000 fprintf_unfiltered (gdb_stdlog, "%s ", func);
4001 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
4002 && gdbarch_register_name (gdbarch, regno) != NULL
4003 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
4004 fprintf_unfiltered (gdb_stdlog, "(%s)",
4005 gdbarch_register_name (gdbarch, regno));
4006 else
4007 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
4008 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
4009 {
4010 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4011 int i, size = register_size (gdbarch, regno);
4012 gdb_byte buf[MAX_REGISTER_SIZE];
4013
4014 regcache_raw_collect (regcache, regno, buf);
4015 fprintf_unfiltered (gdb_stdlog, " = ");
4016 for (i = 0; i < size; i++)
4017 {
4018 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4019 }
4020 if (size <= sizeof (LONGEST))
4021 {
4022 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4023
4024 fprintf_unfiltered (gdb_stdlog, " %s %s",
4025 core_addr_to_string_nz (val), plongest (val));
4026 }
4027 }
4028 fprintf_unfiltered (gdb_stdlog, "\n");
4029 }
4030
4031 void
4032 target_fetch_registers (struct regcache *regcache, int regno)
4033 {
4034 struct target_ops *t;
4035
4036 for (t = current_target.beneath; t != NULL; t = t->beneath)
4037 {
4038 if (t->to_fetch_registers != NULL)
4039 {
4040 t->to_fetch_registers (t, regcache, regno);
4041 if (targetdebug)
4042 debug_print_register ("target_fetch_registers", regcache, regno);
4043 return;
4044 }
4045 }
4046 }
4047
4048 void
4049 target_store_registers (struct regcache *regcache, int regno)
4050 {
4051 struct target_ops *t;
4052
4053 if (!may_write_registers)
4054 error (_("Writing to registers is not allowed (regno %d)"), regno);
4055
4056 current_target.to_store_registers (&current_target, regcache, regno);
4057 if (targetdebug)
4058 {
4059 debug_print_register ("target_store_registers", regcache, regno);
4060 }
4061 }
4062
4063 int
4064 target_core_of_thread (ptid_t ptid)
4065 {
4066 struct target_ops *t;
4067
4068 for (t = current_target.beneath; t != NULL; t = t->beneath)
4069 {
4070 if (t->to_core_of_thread != NULL)
4071 {
4072 int retval = t->to_core_of_thread (t, ptid);
4073
4074 if (targetdebug)
4075 fprintf_unfiltered (gdb_stdlog,
4076 "target_core_of_thread (%d) = %d\n",
4077 ptid_get_pid (ptid), retval);
4078 return retval;
4079 }
4080 }
4081
4082 return -1;
4083 }
4084
4085 int
4086 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4087 {
4088 struct target_ops *t;
4089
4090 for (t = current_target.beneath; t != NULL; t = t->beneath)
4091 {
4092 if (t->to_verify_memory != NULL)
4093 {
4094 int retval = t->to_verify_memory (t, data, memaddr, size);
4095
4096 if (targetdebug)
4097 fprintf_unfiltered (gdb_stdlog,
4098 "target_verify_memory (%s, %s) = %d\n",
4099 paddress (target_gdbarch (), memaddr),
4100 pulongest (size),
4101 retval);
4102 return retval;
4103 }
4104 }
4105
4106 tcomplain ();
4107 }
4108
4109 /* The documentation for this function is in its prototype declaration in
4110 target.h. */
4111
4112 int
4113 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4114 {
4115 struct target_ops *t;
4116
4117 for (t = current_target.beneath; t != NULL; t = t->beneath)
4118 if (t->to_insert_mask_watchpoint != NULL)
4119 {
4120 int ret;
4121
4122 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4123
4124 if (targetdebug)
4125 fprintf_unfiltered (gdb_stdlog, "\
4126 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4127 core_addr_to_string (addr),
4128 core_addr_to_string (mask), rw, ret);
4129
4130 return ret;
4131 }
4132
4133 return 1;
4134 }
4135
4136 /* The documentation for this function is in its prototype declaration in
4137 target.h. */
4138
4139 int
4140 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4141 {
4142 struct target_ops *t;
4143
4144 for (t = current_target.beneath; t != NULL; t = t->beneath)
4145 if (t->to_remove_mask_watchpoint != NULL)
4146 {
4147 int ret;
4148
4149 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4150
4151 if (targetdebug)
4152 fprintf_unfiltered (gdb_stdlog, "\
4153 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4154 core_addr_to_string (addr),
4155 core_addr_to_string (mask), rw, ret);
4156
4157 return ret;
4158 }
4159
4160 return 1;
4161 }
4162
4163 /* The documentation for this function is in its prototype declaration
4164 in target.h. */
4165
4166 int
4167 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4168 {
4169 struct target_ops *t;
4170
4171 for (t = current_target.beneath; t != NULL; t = t->beneath)
4172 if (t->to_masked_watch_num_registers != NULL)
4173 return t->to_masked_watch_num_registers (t, addr, mask);
4174
4175 return -1;
4176 }
4177
4178 /* The documentation for this function is in its prototype declaration
4179 in target.h. */
4180
4181 int
4182 target_ranged_break_num_registers (void)
4183 {
4184 struct target_ops *t;
4185
4186 for (t = current_target.beneath; t != NULL; t = t->beneath)
4187 if (t->to_ranged_break_num_registers != NULL)
4188 return t->to_ranged_break_num_registers (t);
4189
4190 return -1;
4191 }
4192
4193 /* See target.h. */
4194
4195 struct btrace_target_info *
4196 target_enable_btrace (ptid_t ptid)
4197 {
4198 struct target_ops *t;
4199
4200 for (t = current_target.beneath; t != NULL; t = t->beneath)
4201 if (t->to_enable_btrace != NULL)
4202 return t->to_enable_btrace (ptid);
4203
4204 tcomplain ();
4205 return NULL;
4206 }
4207
4208 /* See target.h. */
4209
4210 void
4211 target_disable_btrace (struct btrace_target_info *btinfo)
4212 {
4213 struct target_ops *t;
4214
4215 for (t = current_target.beneath; t != NULL; t = t->beneath)
4216 if (t->to_disable_btrace != NULL)
4217 {
4218 t->to_disable_btrace (btinfo);
4219 return;
4220 }
4221
4222 tcomplain ();
4223 }
4224
4225 /* See target.h. */
4226
4227 void
4228 target_teardown_btrace (struct btrace_target_info *btinfo)
4229 {
4230 struct target_ops *t;
4231
4232 for (t = current_target.beneath; t != NULL; t = t->beneath)
4233 if (t->to_teardown_btrace != NULL)
4234 {
4235 t->to_teardown_btrace (btinfo);
4236 return;
4237 }
4238
4239 tcomplain ();
4240 }
4241
4242 /* See target.h. */
4243
4244 enum btrace_error
4245 target_read_btrace (VEC (btrace_block_s) **btrace,
4246 struct btrace_target_info *btinfo,
4247 enum btrace_read_type type)
4248 {
4249 struct target_ops *t;
4250
4251 for (t = current_target.beneath; t != NULL; t = t->beneath)
4252 if (t->to_read_btrace != NULL)
4253 return t->to_read_btrace (btrace, btinfo, type);
4254
4255 tcomplain ();
4256 return BTRACE_ERR_NOT_SUPPORTED;
4257 }
4258
4259 /* See target.h. */
4260
4261 void
4262 target_stop_recording (void)
4263 {
4264 struct target_ops *t;
4265
4266 for (t = current_target.beneath; t != NULL; t = t->beneath)
4267 if (t->to_stop_recording != NULL)
4268 {
4269 t->to_stop_recording ();
4270 return;
4271 }
4272
4273 /* This is optional. */
4274 }
4275
4276 /* See target.h. */
4277
4278 void
4279 target_info_record (void)
4280 {
4281 struct target_ops *t;
4282
4283 for (t = current_target.beneath; t != NULL; t = t->beneath)
4284 if (t->to_info_record != NULL)
4285 {
4286 t->to_info_record ();
4287 return;
4288 }
4289
4290 tcomplain ();
4291 }
4292
4293 /* See target.h. */
4294
4295 void
4296 target_save_record (const char *filename)
4297 {
4298 struct target_ops *t;
4299
4300 for (t = current_target.beneath; t != NULL; t = t->beneath)
4301 if (t->to_save_record != NULL)
4302 {
4303 t->to_save_record (filename);
4304 return;
4305 }
4306
4307 tcomplain ();
4308 }
4309
4310 /* See target.h. */
4311
4312 int
4313 target_supports_delete_record (void)
4314 {
4315 struct target_ops *t;
4316
4317 for (t = current_target.beneath; t != NULL; t = t->beneath)
4318 if (t->to_delete_record != NULL)
4319 return 1;
4320
4321 return 0;
4322 }
4323
4324 /* See target.h. */
4325
4326 void
4327 target_delete_record (void)
4328 {
4329 struct target_ops *t;
4330
4331 for (t = current_target.beneath; t != NULL; t = t->beneath)
4332 if (t->to_delete_record != NULL)
4333 {
4334 t->to_delete_record ();
4335 return;
4336 }
4337
4338 tcomplain ();
4339 }
4340
4341 /* See target.h. */
4342
4343 int
4344 target_record_is_replaying (void)
4345 {
4346 struct target_ops *t;
4347
4348 for (t = current_target.beneath; t != NULL; t = t->beneath)
4349 if (t->to_record_is_replaying != NULL)
4350 return t->to_record_is_replaying ();
4351
4352 return 0;
4353 }
4354
4355 /* See target.h. */
4356
4357 void
4358 target_goto_record_begin (void)
4359 {
4360 struct target_ops *t;
4361
4362 for (t = current_target.beneath; t != NULL; t = t->beneath)
4363 if (t->to_goto_record_begin != NULL)
4364 {
4365 t->to_goto_record_begin ();
4366 return;
4367 }
4368
4369 tcomplain ();
4370 }
4371
4372 /* See target.h. */
4373
4374 void
4375 target_goto_record_end (void)
4376 {
4377 struct target_ops *t;
4378
4379 for (t = current_target.beneath; t != NULL; t = t->beneath)
4380 if (t->to_goto_record_end != NULL)
4381 {
4382 t->to_goto_record_end ();
4383 return;
4384 }
4385
4386 tcomplain ();
4387 }
4388
4389 /* See target.h. */
4390
4391 void
4392 target_goto_record (ULONGEST insn)
4393 {
4394 struct target_ops *t;
4395
4396 for (t = current_target.beneath; t != NULL; t = t->beneath)
4397 if (t->to_goto_record != NULL)
4398 {
4399 t->to_goto_record (insn);
4400 return;
4401 }
4402
4403 tcomplain ();
4404 }
4405
4406 /* See target.h. */
4407
4408 void
4409 target_insn_history (int size, int flags)
4410 {
4411 struct target_ops *t;
4412
4413 for (t = current_target.beneath; t != NULL; t = t->beneath)
4414 if (t->to_insn_history != NULL)
4415 {
4416 t->to_insn_history (size, flags);
4417 return;
4418 }
4419
4420 tcomplain ();
4421 }
4422
4423 /* See target.h. */
4424
4425 void
4426 target_insn_history_from (ULONGEST from, int size, int flags)
4427 {
4428 struct target_ops *t;
4429
4430 for (t = current_target.beneath; t != NULL; t = t->beneath)
4431 if (t->to_insn_history_from != NULL)
4432 {
4433 t->to_insn_history_from (from, size, flags);
4434 return;
4435 }
4436
4437 tcomplain ();
4438 }
4439
4440 /* See target.h. */
4441
4442 void
4443 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4444 {
4445 struct target_ops *t;
4446
4447 for (t = current_target.beneath; t != NULL; t = t->beneath)
4448 if (t->to_insn_history_range != NULL)
4449 {
4450 t->to_insn_history_range (begin, end, flags);
4451 return;
4452 }
4453
4454 tcomplain ();
4455 }
4456
4457 /* See target.h. */
4458
4459 void
4460 target_call_history (int size, int flags)
4461 {
4462 struct target_ops *t;
4463
4464 for (t = current_target.beneath; t != NULL; t = t->beneath)
4465 if (t->to_call_history != NULL)
4466 {
4467 t->to_call_history (size, flags);
4468 return;
4469 }
4470
4471 tcomplain ();
4472 }
4473
4474 /* See target.h. */
4475
4476 void
4477 target_call_history_from (ULONGEST begin, int size, int flags)
4478 {
4479 struct target_ops *t;
4480
4481 for (t = current_target.beneath; t != NULL; t = t->beneath)
4482 if (t->to_call_history_from != NULL)
4483 {
4484 t->to_call_history_from (begin, size, flags);
4485 return;
4486 }
4487
4488 tcomplain ();
4489 }
4490
4491 /* See target.h. */
4492
4493 void
4494 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4495 {
4496 struct target_ops *t;
4497
4498 for (t = current_target.beneath; t != NULL; t = t->beneath)
4499 if (t->to_call_history_range != NULL)
4500 {
4501 t->to_call_history_range (begin, end, flags);
4502 return;
4503 }
4504
4505 tcomplain ();
4506 }
4507
4508 static void
4509 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4510 {
4511 debug_target.to_prepare_to_store (&debug_target, regcache);
4512
4513 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4514 }
4515
4516 /* See target.h. */
4517
4518 const struct frame_unwind *
4519 target_get_unwinder (void)
4520 {
4521 struct target_ops *t;
4522
4523 for (t = current_target.beneath; t != NULL; t = t->beneath)
4524 if (t->to_get_unwinder != NULL)
4525 return t->to_get_unwinder;
4526
4527 return NULL;
4528 }
4529
4530 /* See target.h. */
4531
4532 const struct frame_unwind *
4533 target_get_tailcall_unwinder (void)
4534 {
4535 struct target_ops *t;
4536
4537 for (t = current_target.beneath; t != NULL; t = t->beneath)
4538 if (t->to_get_tailcall_unwinder != NULL)
4539 return t->to_get_tailcall_unwinder;
4540
4541 return NULL;
4542 }
4543
4544 /* See target.h. */
4545
4546 CORE_ADDR
4547 forward_target_decr_pc_after_break (struct target_ops *ops,
4548 struct gdbarch *gdbarch)
4549 {
4550 for (; ops != NULL; ops = ops->beneath)
4551 if (ops->to_decr_pc_after_break != NULL)
4552 return ops->to_decr_pc_after_break (ops, gdbarch);
4553
4554 return gdbarch_decr_pc_after_break (gdbarch);
4555 }
4556
4557 /* See target.h. */
4558
4559 CORE_ADDR
4560 target_decr_pc_after_break (struct gdbarch *gdbarch)
4561 {
4562 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4563 }
4564
4565 static int
4566 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4567 int write, struct mem_attrib *attrib,
4568 struct target_ops *target)
4569 {
4570 int retval;
4571
4572 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4573 attrib, target);
4574
4575 fprintf_unfiltered (gdb_stdlog,
4576 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4577 paddress (target_gdbarch (), memaddr), len,
4578 write ? "write" : "read", retval);
4579
4580 if (retval > 0)
4581 {
4582 int i;
4583
4584 fputs_unfiltered (", bytes =", gdb_stdlog);
4585 for (i = 0; i < retval; i++)
4586 {
4587 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4588 {
4589 if (targetdebug < 2 && i > 0)
4590 {
4591 fprintf_unfiltered (gdb_stdlog, " ...");
4592 break;
4593 }
4594 fprintf_unfiltered (gdb_stdlog, "\n");
4595 }
4596
4597 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4598 }
4599 }
4600
4601 fputc_unfiltered ('\n', gdb_stdlog);
4602
4603 return retval;
4604 }
4605
4606 static void
4607 debug_to_files_info (struct target_ops *target)
4608 {
4609 debug_target.to_files_info (target);
4610
4611 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4612 }
4613
4614 static int
4615 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4616 struct bp_target_info *bp_tgt)
4617 {
4618 int retval;
4619
4620 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4621
4622 fprintf_unfiltered (gdb_stdlog,
4623 "target_insert_breakpoint (%s, xxx) = %ld\n",
4624 core_addr_to_string (bp_tgt->placed_address),
4625 (unsigned long) retval);
4626 return retval;
4627 }
4628
4629 static int
4630 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4631 struct bp_target_info *bp_tgt)
4632 {
4633 int retval;
4634
4635 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4636
4637 fprintf_unfiltered (gdb_stdlog,
4638 "target_remove_breakpoint (%s, xxx) = %ld\n",
4639 core_addr_to_string (bp_tgt->placed_address),
4640 (unsigned long) retval);
4641 return retval;
4642 }
4643
4644 static int
4645 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4646 int type, int cnt, int from_tty)
4647 {
4648 int retval;
4649
4650 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4651 type, cnt, from_tty);
4652
4653 fprintf_unfiltered (gdb_stdlog,
4654 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4655 (unsigned long) type,
4656 (unsigned long) cnt,
4657 (unsigned long) from_tty,
4658 (unsigned long) retval);
4659 return retval;
4660 }
4661
4662 static int
4663 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4664 CORE_ADDR addr, int len)
4665 {
4666 CORE_ADDR retval;
4667
4668 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4669 addr, len);
4670
4671 fprintf_unfiltered (gdb_stdlog,
4672 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4673 core_addr_to_string (addr), (unsigned long) len,
4674 core_addr_to_string (retval));
4675 return retval;
4676 }
4677
4678 static int
4679 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4680 CORE_ADDR addr, int len, int rw,
4681 struct expression *cond)
4682 {
4683 int retval;
4684
4685 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4686 addr, len,
4687 rw, cond);
4688
4689 fprintf_unfiltered (gdb_stdlog,
4690 "target_can_accel_watchpoint_condition "
4691 "(%s, %d, %d, %s) = %ld\n",
4692 core_addr_to_string (addr), len, rw,
4693 host_address_to_string (cond), (unsigned long) retval);
4694 return retval;
4695 }
4696
4697 static int
4698 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4699 {
4700 int retval;
4701
4702 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4703
4704 fprintf_unfiltered (gdb_stdlog,
4705 "target_stopped_by_watchpoint () = %ld\n",
4706 (unsigned long) retval);
4707 return retval;
4708 }
4709
4710 static int
4711 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4712 {
4713 int retval;
4714
4715 retval = debug_target.to_stopped_data_address (target, addr);
4716
4717 fprintf_unfiltered (gdb_stdlog,
4718 "target_stopped_data_address ([%s]) = %ld\n",
4719 core_addr_to_string (*addr),
4720 (unsigned long)retval);
4721 return retval;
4722 }
4723
4724 static int
4725 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4726 CORE_ADDR addr,
4727 CORE_ADDR start, int length)
4728 {
4729 int retval;
4730
4731 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4732 start, length);
4733
4734 fprintf_filtered (gdb_stdlog,
4735 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4736 core_addr_to_string (addr), core_addr_to_string (start),
4737 length, retval);
4738 return retval;
4739 }
4740
4741 static int
4742 debug_to_insert_hw_breakpoint (struct target_ops *self,
4743 struct gdbarch *gdbarch,
4744 struct bp_target_info *bp_tgt)
4745 {
4746 int retval;
4747
4748 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4749 gdbarch, bp_tgt);
4750
4751 fprintf_unfiltered (gdb_stdlog,
4752 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4753 core_addr_to_string (bp_tgt->placed_address),
4754 (unsigned long) retval);
4755 return retval;
4756 }
4757
4758 static int
4759 debug_to_remove_hw_breakpoint (struct target_ops *self,
4760 struct gdbarch *gdbarch,
4761 struct bp_target_info *bp_tgt)
4762 {
4763 int retval;
4764
4765 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4766 gdbarch, bp_tgt);
4767
4768 fprintf_unfiltered (gdb_stdlog,
4769 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4770 core_addr_to_string (bp_tgt->placed_address),
4771 (unsigned long) retval);
4772 return retval;
4773 }
4774
4775 static int
4776 debug_to_insert_watchpoint (struct target_ops *self,
4777 CORE_ADDR addr, int len, int type,
4778 struct expression *cond)
4779 {
4780 int retval;
4781
4782 retval = debug_target.to_insert_watchpoint (&debug_target,
4783 addr, len, type, cond);
4784
4785 fprintf_unfiltered (gdb_stdlog,
4786 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4787 core_addr_to_string (addr), len, type,
4788 host_address_to_string (cond), (unsigned long) retval);
4789 return retval;
4790 }
4791
4792 static int
4793 debug_to_remove_watchpoint (struct target_ops *self,
4794 CORE_ADDR addr, int len, int type,
4795 struct expression *cond)
4796 {
4797 int retval;
4798
4799 retval = debug_target.to_remove_watchpoint (&debug_target,
4800 addr, len, type, cond);
4801
4802 fprintf_unfiltered (gdb_stdlog,
4803 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4804 core_addr_to_string (addr), len, type,
4805 host_address_to_string (cond), (unsigned long) retval);
4806 return retval;
4807 }
4808
4809 static void
4810 debug_to_terminal_init (struct target_ops *self)
4811 {
4812 debug_target.to_terminal_init (&debug_target);
4813
4814 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4815 }
4816
4817 static void
4818 debug_to_terminal_inferior (struct target_ops *self)
4819 {
4820 debug_target.to_terminal_inferior (&debug_target);
4821
4822 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4823 }
4824
4825 static void
4826 debug_to_terminal_ours_for_output (struct target_ops *self)
4827 {
4828 debug_target.to_terminal_ours_for_output (&debug_target);
4829
4830 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4831 }
4832
4833 static void
4834 debug_to_terminal_ours (struct target_ops *self)
4835 {
4836 debug_target.to_terminal_ours (&debug_target);
4837
4838 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4839 }
4840
4841 static void
4842 debug_to_terminal_save_ours (struct target_ops *self)
4843 {
4844 debug_target.to_terminal_save_ours (&debug_target);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4847 }
4848
4849 static void
4850 debug_to_terminal_info (struct target_ops *self,
4851 const char *arg, int from_tty)
4852 {
4853 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4854
4855 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4856 from_tty);
4857 }
4858
4859 static void
4860 debug_to_load (struct target_ops *self, char *args, int from_tty)
4861 {
4862 debug_target.to_load (&debug_target, args, from_tty);
4863
4864 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4865 }
4866
4867 static void
4868 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4869 {
4870 debug_target.to_post_startup_inferior (&debug_target, ptid);
4871
4872 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4873 ptid_get_pid (ptid));
4874 }
4875
4876 static int
4877 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4878 {
4879 int retval;
4880
4881 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4882
4883 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4884 pid, retval);
4885
4886 return retval;
4887 }
4888
4889 static int
4890 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4891 {
4892 int retval;
4893
4894 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4895
4896 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4897 pid, retval);
4898
4899 return retval;
4900 }
4901
4902 static int
4903 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4904 {
4905 int retval;
4906
4907 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4908
4909 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4910 pid, retval);
4911
4912 return retval;
4913 }
4914
4915 static int
4916 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4917 {
4918 int retval;
4919
4920 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4921
4922 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4923 pid, retval);
4924
4925 return retval;
4926 }
4927
4928 static int
4929 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4930 {
4931 int retval;
4932
4933 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4934
4935 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4936 pid, retval);
4937
4938 return retval;
4939 }
4940
4941 static int
4942 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4943 {
4944 int retval;
4945
4946 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4947
4948 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4949 pid, retval);
4950
4951 return retval;
4952 }
4953
4954 static int
4955 debug_to_has_exited (struct target_ops *self,
4956 int pid, int wait_status, int *exit_status)
4957 {
4958 int has_exited;
4959
4960 has_exited = debug_target.to_has_exited (&debug_target,
4961 pid, wait_status, exit_status);
4962
4963 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4964 pid, wait_status, *exit_status, has_exited);
4965
4966 return has_exited;
4967 }
4968
4969 static int
4970 debug_to_can_run (struct target_ops *self)
4971 {
4972 int retval;
4973
4974 retval = debug_target.to_can_run (&debug_target);
4975
4976 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4977
4978 return retval;
4979 }
4980
4981 static struct gdbarch *
4982 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4983 {
4984 struct gdbarch *retval;
4985
4986 retval = debug_target.to_thread_architecture (ops, ptid);
4987
4988 fprintf_unfiltered (gdb_stdlog,
4989 "target_thread_architecture (%s) = %s [%s]\n",
4990 target_pid_to_str (ptid),
4991 host_address_to_string (retval),
4992 gdbarch_bfd_arch_info (retval)->printable_name);
4993 return retval;
4994 }
4995
4996 static void
4997 debug_to_stop (struct target_ops *self, ptid_t ptid)
4998 {
4999 debug_target.to_stop (&debug_target, ptid);
5000
5001 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
5002 target_pid_to_str (ptid));
5003 }
5004
5005 static void
5006 debug_to_rcmd (struct target_ops *self, char *command,
5007 struct ui_file *outbuf)
5008 {
5009 debug_target.to_rcmd (&debug_target, command, outbuf);
5010 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
5011 }
5012
5013 static char *
5014 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
5015 {
5016 char *exec_file;
5017
5018 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
5019
5020 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
5021 pid, exec_file);
5022
5023 return exec_file;
5024 }
5025
5026 static void
5027 setup_target_debug (void)
5028 {
5029 memcpy (&debug_target, &current_target, sizeof debug_target);
5030
5031 current_target.to_open = debug_to_open;
5032 current_target.to_post_attach = debug_to_post_attach;
5033 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5034 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5035 current_target.to_files_info = debug_to_files_info;
5036 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5037 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5038 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5039 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5040 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5041 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5042 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5043 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5044 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5045 current_target.to_watchpoint_addr_within_range
5046 = debug_to_watchpoint_addr_within_range;
5047 current_target.to_region_ok_for_hw_watchpoint
5048 = debug_to_region_ok_for_hw_watchpoint;
5049 current_target.to_can_accel_watchpoint_condition
5050 = debug_to_can_accel_watchpoint_condition;
5051 current_target.to_terminal_init = debug_to_terminal_init;
5052 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5053 current_target.to_terminal_ours_for_output
5054 = debug_to_terminal_ours_for_output;
5055 current_target.to_terminal_ours = debug_to_terminal_ours;
5056 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5057 current_target.to_terminal_info = debug_to_terminal_info;
5058 current_target.to_load = debug_to_load;
5059 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5060 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5061 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5062 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5063 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5064 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5065 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5066 current_target.to_has_exited = debug_to_has_exited;
5067 current_target.to_can_run = debug_to_can_run;
5068 current_target.to_stop = debug_to_stop;
5069 current_target.to_rcmd = debug_to_rcmd;
5070 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5071 current_target.to_thread_architecture = debug_to_thread_architecture;
5072 }
5073 \f
5074
5075 static char targ_desc[] =
5076 "Names of targets and files being debugged.\nShows the entire \
5077 stack of targets currently in use (including the exec-file,\n\
5078 core-file, and process, if any), as well as the symbol file name.";
5079
5080 static void
5081 do_monitor_command (char *cmd,
5082 int from_tty)
5083 {
5084 if ((current_target.to_rcmd
5085 == (void (*) (struct target_ops *, char *, struct ui_file *)) tcomplain)
5086 || (current_target.to_rcmd == debug_to_rcmd
5087 && (debug_target.to_rcmd
5088 == (void (*) (struct target_ops *,
5089 char *, struct ui_file *)) tcomplain)))
5090 error (_("\"monitor\" command not supported by this target."));
5091 target_rcmd (cmd, gdb_stdtarg);
5092 }
5093
5094 /* Print the name of each layers of our target stack. */
5095
5096 static void
5097 maintenance_print_target_stack (char *cmd, int from_tty)
5098 {
5099 struct target_ops *t;
5100
5101 printf_filtered (_("The current target stack is:\n"));
5102
5103 for (t = target_stack; t != NULL; t = t->beneath)
5104 {
5105 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5106 }
5107 }
5108
5109 /* Controls if async mode is permitted. */
5110 int target_async_permitted = 0;
5111
5112 /* The set command writes to this variable. If the inferior is
5113 executing, target_async_permitted is *not* updated. */
5114 static int target_async_permitted_1 = 0;
5115
5116 static void
5117 set_target_async_command (char *args, int from_tty,
5118 struct cmd_list_element *c)
5119 {
5120 if (have_live_inferiors ())
5121 {
5122 target_async_permitted_1 = target_async_permitted;
5123 error (_("Cannot change this setting while the inferior is running."));
5124 }
5125
5126 target_async_permitted = target_async_permitted_1;
5127 }
5128
5129 static void
5130 show_target_async_command (struct ui_file *file, int from_tty,
5131 struct cmd_list_element *c,
5132 const char *value)
5133 {
5134 fprintf_filtered (file,
5135 _("Controlling the inferior in "
5136 "asynchronous mode is %s.\n"), value);
5137 }
5138
5139 /* Temporary copies of permission settings. */
5140
5141 static int may_write_registers_1 = 1;
5142 static int may_write_memory_1 = 1;
5143 static int may_insert_breakpoints_1 = 1;
5144 static int may_insert_tracepoints_1 = 1;
5145 static int may_insert_fast_tracepoints_1 = 1;
5146 static int may_stop_1 = 1;
5147
5148 /* Make the user-set values match the real values again. */
5149
5150 void
5151 update_target_permissions (void)
5152 {
5153 may_write_registers_1 = may_write_registers;
5154 may_write_memory_1 = may_write_memory;
5155 may_insert_breakpoints_1 = may_insert_breakpoints;
5156 may_insert_tracepoints_1 = may_insert_tracepoints;
5157 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5158 may_stop_1 = may_stop;
5159 }
5160
5161 /* The one function handles (most of) the permission flags in the same
5162 way. */
5163
5164 static void
5165 set_target_permissions (char *args, int from_tty,
5166 struct cmd_list_element *c)
5167 {
5168 if (target_has_execution)
5169 {
5170 update_target_permissions ();
5171 error (_("Cannot change this setting while the inferior is running."));
5172 }
5173
5174 /* Make the real values match the user-changed values. */
5175 may_write_registers = may_write_registers_1;
5176 may_insert_breakpoints = may_insert_breakpoints_1;
5177 may_insert_tracepoints = may_insert_tracepoints_1;
5178 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5179 may_stop = may_stop_1;
5180 update_observer_mode ();
5181 }
5182
5183 /* Set memory write permission independently of observer mode. */
5184
5185 static void
5186 set_write_memory_permission (char *args, int from_tty,
5187 struct cmd_list_element *c)
5188 {
5189 /* Make the real values match the user-changed values. */
5190 may_write_memory = may_write_memory_1;
5191 update_observer_mode ();
5192 }
5193
5194
5195 void
5196 initialize_targets (void)
5197 {
5198 init_dummy_target ();
5199 push_target (&dummy_target);
5200
5201 add_info ("target", target_info, targ_desc);
5202 add_info ("files", target_info, targ_desc);
5203
5204 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5205 Set target debugging."), _("\
5206 Show target debugging."), _("\
5207 When non-zero, target debugging is enabled. Higher numbers are more\n\
5208 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5209 command."),
5210 NULL,
5211 show_targetdebug,
5212 &setdebuglist, &showdebuglist);
5213
5214 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5215 &trust_readonly, _("\
5216 Set mode for reading from readonly sections."), _("\
5217 Show mode for reading from readonly sections."), _("\
5218 When this mode is on, memory reads from readonly sections (such as .text)\n\
5219 will be read from the object file instead of from the target. This will\n\
5220 result in significant performance improvement for remote targets."),
5221 NULL,
5222 show_trust_readonly,
5223 &setlist, &showlist);
5224
5225 add_com ("monitor", class_obscure, do_monitor_command,
5226 _("Send a command to the remote monitor (remote targets only)."));
5227
5228 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5229 _("Print the name of each layer of the internal target stack."),
5230 &maintenanceprintlist);
5231
5232 add_setshow_boolean_cmd ("target-async", no_class,
5233 &target_async_permitted_1, _("\
5234 Set whether gdb controls the inferior in asynchronous mode."), _("\
5235 Show whether gdb controls the inferior in asynchronous mode."), _("\
5236 Tells gdb whether to control the inferior in asynchronous mode."),
5237 set_target_async_command,
5238 show_target_async_command,
5239 &setlist,
5240 &showlist);
5241
5242 add_setshow_boolean_cmd ("may-write-registers", class_support,
5243 &may_write_registers_1, _("\
5244 Set permission to write into registers."), _("\
5245 Show permission to write into registers."), _("\
5246 When this permission is on, GDB may write into the target's registers.\n\
5247 Otherwise, any sort of write attempt will result in an error."),
5248 set_target_permissions, NULL,
5249 &setlist, &showlist);
5250
5251 add_setshow_boolean_cmd ("may-write-memory", class_support,
5252 &may_write_memory_1, _("\
5253 Set permission to write into target memory."), _("\
5254 Show permission to write into target memory."), _("\
5255 When this permission is on, GDB may write into the target's memory.\n\
5256 Otherwise, any sort of write attempt will result in an error."),
5257 set_write_memory_permission, NULL,
5258 &setlist, &showlist);
5259
5260 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5261 &may_insert_breakpoints_1, _("\
5262 Set permission to insert breakpoints in the target."), _("\
5263 Show permission to insert breakpoints in the target."), _("\
5264 When this permission is on, GDB may insert breakpoints in the program.\n\
5265 Otherwise, any sort of insertion attempt will result in an error."),
5266 set_target_permissions, NULL,
5267 &setlist, &showlist);
5268
5269 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5270 &may_insert_tracepoints_1, _("\
5271 Set permission to insert tracepoints in the target."), _("\
5272 Show permission to insert tracepoints in the target."), _("\
5273 When this permission is on, GDB may insert tracepoints in the program.\n\
5274 Otherwise, any sort of insertion attempt will result in an error."),
5275 set_target_permissions, NULL,
5276 &setlist, &showlist);
5277
5278 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5279 &may_insert_fast_tracepoints_1, _("\
5280 Set permission to insert fast tracepoints in the target."), _("\
5281 Show permission to insert fast tracepoints in the target."), _("\
5282 When this permission is on, GDB may insert fast tracepoints.\n\
5283 Otherwise, any sort of insertion attempt will result in an error."),
5284 set_target_permissions, NULL,
5285 &setlist, &showlist);
5286
5287 add_setshow_boolean_cmd ("may-interrupt", class_support,
5288 &may_stop_1, _("\
5289 Set permission to interrupt or signal the target."), _("\
5290 Show permission to interrupt or signal the target."), _("\
5291 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5292 Otherwise, any attempt to interrupt or stop will be ignored."),
5293 set_target_permissions, NULL,
5294 &setlist, &showlist);
5295 }
This page took 0.132998 seconds and 3 git commands to generate.