Add target_ops argument to to_trace_stop
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void tcomplain (void) ATTRIBUTE_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (struct target_ops *self);
136
137 static void debug_to_terminal_inferior (struct target_ops *self);
138
139 static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141 static void debug_to_terminal_save_ours (struct target_ops *self);
142
143 static void debug_to_terminal_ours (struct target_ops *self);
144
145 static void debug_to_load (struct target_ops *self, char *, int);
146
147 static int debug_to_can_run (struct target_ops *self);
148
149 static void debug_to_stop (struct target_ops *self, ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The user just typed 'target' without the name of a target. */
216
217 static void
218 target_command (char *arg, int from_tty)
219 {
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222 }
223
224 /* Default target_has_* methods for process_stratum targets. */
225
226 int
227 default_child_has_all_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_memory (struct target_ops *ops)
238 {
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_stack (struct target_ops *ops)
248 {
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_registers (struct target_ops *ops)
258 {
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266 int
267 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268 {
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277
278 int
279 target_has_all_memory_1 (void)
280 {
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288 }
289
290 int
291 target_has_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_stack_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_registers_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_execution_1 (ptid_t the_ptid)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_current (void)
340 {
341 return target_has_execution_1 (inferior_ptid);
342 }
343
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347 void
348 complete_target_initialization (struct target_ops *t)
349 {
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370 }
371
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376 void
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379 {
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411 }
412
413 /* Add a possible target architecture to the list. */
414
415 void
416 add_target (struct target_ops *t)
417 {
418 add_target_with_completer (t, NULL);
419 }
420
421 /* See target.h. */
422
423 void
424 add_deprecated_target_alias (struct target_ops *t, char *alias)
425 {
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops *self)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 INHERIT (to_post_attach, t);
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 INHERIT (to_prepare_to_store, t);
601 INHERIT (deprecated_xfer_memory, t);
602 INHERIT (to_files_info, t);
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 INHERIT (to_can_use_hw_breakpoint, t);
606 INHERIT (to_insert_hw_breakpoint, t);
607 INHERIT (to_remove_hw_breakpoint, t);
608 /* Do not inherit to_ranged_break_num_registers. */
609 INHERIT (to_insert_watchpoint, t);
610 INHERIT (to_remove_watchpoint, t);
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 INHERIT (to_watchpoint_addr_within_range, t);
618 INHERIT (to_region_ok_for_hw_watchpoint, t);
619 INHERIT (to_can_accel_watchpoint_condition, t);
620 /* Do not inherit to_masked_watch_num_registers. */
621 INHERIT (to_terminal_init, t);
622 INHERIT (to_terminal_inferior, t);
623 INHERIT (to_terminal_ours_for_output, t);
624 INHERIT (to_terminal_ours, t);
625 INHERIT (to_terminal_save_ours, t);
626 INHERIT (to_terminal_info, t);
627 /* Do not inherit to_kill. */
628 INHERIT (to_load, t);
629 /* Do no inherit to_create_inferior. */
630 INHERIT (to_post_startup_inferior, t);
631 INHERIT (to_insert_fork_catchpoint, t);
632 INHERIT (to_remove_fork_catchpoint, t);
633 INHERIT (to_insert_vfork_catchpoint, t);
634 INHERIT (to_remove_vfork_catchpoint, t);
635 /* Do not inherit to_follow_fork. */
636 INHERIT (to_insert_exec_catchpoint, t);
637 INHERIT (to_remove_exec_catchpoint, t);
638 INHERIT (to_set_syscall_catchpoint, t);
639 INHERIT (to_has_exited, t);
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 INHERIT (to_extra_thread_info, t);
648 INHERIT (to_thread_name, t);
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 INHERIT (to_rcmd, t);
652 INHERIT (to_pid_to_exec_file, t);
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715 #undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (to_post_attach,
732 (void (*) (struct target_ops *, int))
733 target_ignore);
734 de_fault (to_prepare_to_store,
735 (void (*) (struct target_ops *, struct regcache *))
736 noprocess);
737 de_fault (deprecated_xfer_memory,
738 (int (*) (CORE_ADDR, gdb_byte *, int, int,
739 struct mem_attrib *, struct target_ops *))
740 nomemory);
741 de_fault (to_files_info,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (struct target_ops *, int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct target_ops *, struct gdbarch *,
749 struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_remove_hw_breakpoint,
752 (int (*) (struct target_ops *, struct gdbarch *,
753 struct bp_target_info *))
754 return_minus_one);
755 de_fault (to_insert_watchpoint,
756 (int (*) (struct target_ops *, CORE_ADDR, int, int,
757 struct expression *))
758 return_minus_one);
759 de_fault (to_remove_watchpoint,
760 (int (*) (struct target_ops *, CORE_ADDR, int, int,
761 struct expression *))
762 return_minus_one);
763 de_fault (to_watchpoint_addr_within_range,
764 default_watchpoint_addr_within_range);
765 de_fault (to_region_ok_for_hw_watchpoint,
766 default_region_ok_for_hw_watchpoint);
767 de_fault (to_can_accel_watchpoint_condition,
768 (int (*) (struct target_ops *, CORE_ADDR, int, int,
769 struct expression *))
770 return_zero);
771 de_fault (to_terminal_init,
772 (void (*) (struct target_ops *))
773 target_ignore);
774 de_fault (to_terminal_inferior,
775 (void (*) (struct target_ops *))
776 target_ignore);
777 de_fault (to_terminal_ours_for_output,
778 (void (*) (struct target_ops *))
779 target_ignore);
780 de_fault (to_terminal_ours,
781 (void (*) (struct target_ops *))
782 target_ignore);
783 de_fault (to_terminal_save_ours,
784 (void (*) (struct target_ops *))
785 target_ignore);
786 de_fault (to_terminal_info,
787 default_terminal_info);
788 de_fault (to_load,
789 (void (*) (struct target_ops *, char *, int))
790 tcomplain);
791 de_fault (to_post_startup_inferior,
792 (void (*) (struct target_ops *, ptid_t))
793 target_ignore);
794 de_fault (to_insert_fork_catchpoint,
795 (int (*) (struct target_ops *, int))
796 return_one);
797 de_fault (to_remove_fork_catchpoint,
798 (int (*) (struct target_ops *, int))
799 return_one);
800 de_fault (to_insert_vfork_catchpoint,
801 (int (*) (struct target_ops *, int))
802 return_one);
803 de_fault (to_remove_vfork_catchpoint,
804 (int (*) (struct target_ops *, int))
805 return_one);
806 de_fault (to_insert_exec_catchpoint,
807 (int (*) (struct target_ops *, int))
808 return_one);
809 de_fault (to_remove_exec_catchpoint,
810 (int (*) (struct target_ops *, int))
811 return_one);
812 de_fault (to_set_syscall_catchpoint,
813 (int (*) (struct target_ops *, int, int, int, int, int *))
814 return_one);
815 de_fault (to_has_exited,
816 (int (*) (struct target_ops *, int, int, int *))
817 return_zero);
818 de_fault (to_can_run,
819 (int (*) (struct target_ops *))
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct target_ops *, struct thread_info *))
823 return_null);
824 de_fault (to_thread_name,
825 (char *(*) (struct target_ops *, struct thread_info *))
826 return_null);
827 de_fault (to_stop,
828 (void (*) (struct target_ops *, ptid_t))
829 target_ignore);
830 de_fault (to_rcmd,
831 (void (*) (struct target_ops *, char *, struct ui_file *))
832 tcomplain);
833 de_fault (to_pid_to_exec_file,
834 (char *(*) (struct target_ops *, int))
835 return_null);
836 de_fault (to_thread_architecture,
837 default_thread_architecture);
838 current_target.to_read_description = NULL;
839 de_fault (to_get_ada_task_ptid,
840 (ptid_t (*) (struct target_ops *, long, long))
841 default_get_ada_task_ptid);
842 de_fault (to_supports_multi_process,
843 (int (*) (struct target_ops *))
844 return_zero);
845 de_fault (to_supports_enable_disable_tracepoint,
846 (int (*) (struct target_ops *))
847 return_zero);
848 de_fault (to_supports_string_tracing,
849 (int (*) (struct target_ops *))
850 return_zero);
851 de_fault (to_trace_init,
852 (void (*) (struct target_ops *))
853 tcomplain);
854 de_fault (to_download_tracepoint,
855 (void (*) (struct target_ops *, struct bp_location *))
856 tcomplain);
857 de_fault (to_can_download_tracepoint,
858 (int (*) (struct target_ops *))
859 return_zero);
860 de_fault (to_download_trace_state_variable,
861 (void (*) (struct target_ops *, struct trace_state_variable *))
862 tcomplain);
863 de_fault (to_enable_tracepoint,
864 (void (*) (struct target_ops *, struct bp_location *))
865 tcomplain);
866 de_fault (to_disable_tracepoint,
867 (void (*) (struct target_ops *, struct bp_location *))
868 tcomplain);
869 de_fault (to_trace_set_readonly_regions,
870 (void (*) (struct target_ops *))
871 tcomplain);
872 de_fault (to_trace_start,
873 (void (*) (struct target_ops *))
874 tcomplain);
875 de_fault (to_get_trace_status,
876 (int (*) (struct target_ops *, struct trace_status *))
877 return_minus_one);
878 de_fault (to_get_tracepoint_status,
879 (void (*) (struct target_ops *, struct breakpoint *,
880 struct uploaded_tp *))
881 tcomplain);
882 de_fault (to_trace_stop,
883 (void (*) (struct target_ops *))
884 tcomplain);
885 de_fault (to_trace_find,
886 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
887 return_minus_one);
888 de_fault (to_get_trace_state_variable_value,
889 (int (*) (int, LONGEST *))
890 return_zero);
891 de_fault (to_save_trace_data,
892 (int (*) (const char *))
893 tcomplain);
894 de_fault (to_upload_tracepoints,
895 (int (*) (struct uploaded_tp **))
896 return_zero);
897 de_fault (to_upload_trace_state_variables,
898 (int (*) (struct uploaded_tsv **))
899 return_zero);
900 de_fault (to_get_raw_trace_data,
901 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
902 tcomplain);
903 de_fault (to_get_min_fast_tracepoint_insn_len,
904 (int (*) (void))
905 return_minus_one);
906 de_fault (to_set_disconnected_tracing,
907 (void (*) (int))
908 target_ignore);
909 de_fault (to_set_circular_trace_buffer,
910 (void (*) (int))
911 target_ignore);
912 de_fault (to_set_trace_buffer_size,
913 (void (*) (LONGEST))
914 target_ignore);
915 de_fault (to_set_trace_notes,
916 (int (*) (const char *, const char *, const char *))
917 return_zero);
918 de_fault (to_get_tib_address,
919 (int (*) (ptid_t, CORE_ADDR *))
920 tcomplain);
921 de_fault (to_set_permissions,
922 (void (*) (void))
923 target_ignore);
924 de_fault (to_static_tracepoint_marker_at,
925 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
926 return_zero);
927 de_fault (to_static_tracepoint_markers_by_strid,
928 (VEC(static_tracepoint_marker_p) * (*) (const char *))
929 tcomplain);
930 de_fault (to_traceframe_info,
931 (struct traceframe_info * (*) (void))
932 return_null);
933 de_fault (to_supports_evaluation_of_breakpoint_conditions,
934 (int (*) (struct target_ops *))
935 return_zero);
936 de_fault (to_can_run_breakpoint_commands,
937 (int (*) (struct target_ops *))
938 return_zero);
939 de_fault (to_use_agent,
940 (int (*) (int))
941 tcomplain);
942 de_fault (to_can_use_agent,
943 (int (*) (void))
944 return_zero);
945 de_fault (to_augmented_libraries_svr4_read,
946 (int (*) (void))
947 return_zero);
948 de_fault (to_execution_direction, default_execution_direction);
949
950 #undef de_fault
951
952 /* Finally, position the target-stack beneath the squashed
953 "current_target". That way code looking for a non-inherited
954 target method can quickly and simply find it. */
955 current_target.beneath = target_stack;
956
957 if (targetdebug)
958 setup_target_debug ();
959 }
960
961 /* Push a new target type into the stack of the existing target accessors,
962 possibly superseding some of the existing accessors.
963
964 Rather than allow an empty stack, we always have the dummy target at
965 the bottom stratum, so we can call the function vectors without
966 checking them. */
967
968 void
969 push_target (struct target_ops *t)
970 {
971 struct target_ops **cur;
972
973 /* Check magic number. If wrong, it probably means someone changed
974 the struct definition, but not all the places that initialize one. */
975 if (t->to_magic != OPS_MAGIC)
976 {
977 fprintf_unfiltered (gdb_stderr,
978 "Magic number of %s target struct wrong\n",
979 t->to_shortname);
980 internal_error (__FILE__, __LINE__,
981 _("failed internal consistency check"));
982 }
983
984 /* Find the proper stratum to install this target in. */
985 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
986 {
987 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
988 break;
989 }
990
991 /* If there's already targets at this stratum, remove them. */
992 /* FIXME: cagney/2003-10-15: I think this should be popping all
993 targets to CUR, and not just those at this stratum level. */
994 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
995 {
996 /* There's already something at this stratum level. Close it,
997 and un-hook it from the stack. */
998 struct target_ops *tmp = (*cur);
999
1000 (*cur) = (*cur)->beneath;
1001 tmp->beneath = NULL;
1002 target_close (tmp);
1003 }
1004
1005 /* We have removed all targets in our stratum, now add the new one. */
1006 t->beneath = (*cur);
1007 (*cur) = t;
1008
1009 update_current_target ();
1010 }
1011
1012 /* Remove a target_ops vector from the stack, wherever it may be.
1013 Return how many times it was removed (0 or 1). */
1014
1015 int
1016 unpush_target (struct target_ops *t)
1017 {
1018 struct target_ops **cur;
1019 struct target_ops *tmp;
1020
1021 if (t->to_stratum == dummy_stratum)
1022 internal_error (__FILE__, __LINE__,
1023 _("Attempt to unpush the dummy target"));
1024
1025 /* Look for the specified target. Note that we assume that a target
1026 can only occur once in the target stack. */
1027
1028 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1029 {
1030 if ((*cur) == t)
1031 break;
1032 }
1033
1034 /* If we don't find target_ops, quit. Only open targets should be
1035 closed. */
1036 if ((*cur) == NULL)
1037 return 0;
1038
1039 /* Unchain the target. */
1040 tmp = (*cur);
1041 (*cur) = (*cur)->beneath;
1042 tmp->beneath = NULL;
1043
1044 update_current_target ();
1045
1046 /* Finally close the target. Note we do this after unchaining, so
1047 any target method calls from within the target_close
1048 implementation don't end up in T anymore. */
1049 target_close (t);
1050
1051 return 1;
1052 }
1053
1054 void
1055 pop_all_targets_above (enum strata above_stratum)
1056 {
1057 while ((int) (current_target.to_stratum) > (int) above_stratum)
1058 {
1059 if (!unpush_target (target_stack))
1060 {
1061 fprintf_unfiltered (gdb_stderr,
1062 "pop_all_targets couldn't find target %s\n",
1063 target_stack->to_shortname);
1064 internal_error (__FILE__, __LINE__,
1065 _("failed internal consistency check"));
1066 break;
1067 }
1068 }
1069 }
1070
1071 void
1072 pop_all_targets (void)
1073 {
1074 pop_all_targets_above (dummy_stratum);
1075 }
1076
1077 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1078
1079 int
1080 target_is_pushed (struct target_ops *t)
1081 {
1082 struct target_ops **cur;
1083
1084 /* Check magic number. If wrong, it probably means someone changed
1085 the struct definition, but not all the places that initialize one. */
1086 if (t->to_magic != OPS_MAGIC)
1087 {
1088 fprintf_unfiltered (gdb_stderr,
1089 "Magic number of %s target struct wrong\n",
1090 t->to_shortname);
1091 internal_error (__FILE__, __LINE__,
1092 _("failed internal consistency check"));
1093 }
1094
1095 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1096 if (*cur == t)
1097 return 1;
1098
1099 return 0;
1100 }
1101
1102 /* Using the objfile specified in OBJFILE, find the address for the
1103 current thread's thread-local storage with offset OFFSET. */
1104 CORE_ADDR
1105 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1106 {
1107 volatile CORE_ADDR addr = 0;
1108 struct target_ops *target;
1109
1110 for (target = current_target.beneath;
1111 target != NULL;
1112 target = target->beneath)
1113 {
1114 if (target->to_get_thread_local_address != NULL)
1115 break;
1116 }
1117
1118 if (target != NULL
1119 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1120 {
1121 ptid_t ptid = inferior_ptid;
1122 volatile struct gdb_exception ex;
1123
1124 TRY_CATCH (ex, RETURN_MASK_ALL)
1125 {
1126 CORE_ADDR lm_addr;
1127
1128 /* Fetch the load module address for this objfile. */
1129 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1130 objfile);
1131 /* If it's 0, throw the appropriate exception. */
1132 if (lm_addr == 0)
1133 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1134 _("TLS load module not found"));
1135
1136 addr = target->to_get_thread_local_address (target, ptid,
1137 lm_addr, offset);
1138 }
1139 /* If an error occurred, print TLS related messages here. Otherwise,
1140 throw the error to some higher catcher. */
1141 if (ex.reason < 0)
1142 {
1143 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1144
1145 switch (ex.error)
1146 {
1147 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1148 error (_("Cannot find thread-local variables "
1149 "in this thread library."));
1150 break;
1151 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1152 if (objfile_is_library)
1153 error (_("Cannot find shared library `%s' in dynamic"
1154 " linker's load module list"), objfile_name (objfile));
1155 else
1156 error (_("Cannot find executable file `%s' in dynamic"
1157 " linker's load module list"), objfile_name (objfile));
1158 break;
1159 case TLS_NOT_ALLOCATED_YET_ERROR:
1160 if (objfile_is_library)
1161 error (_("The inferior has not yet allocated storage for"
1162 " thread-local variables in\n"
1163 "the shared library `%s'\n"
1164 "for %s"),
1165 objfile_name (objfile), target_pid_to_str (ptid));
1166 else
1167 error (_("The inferior has not yet allocated storage for"
1168 " thread-local variables in\n"
1169 "the executable `%s'\n"
1170 "for %s"),
1171 objfile_name (objfile), target_pid_to_str (ptid));
1172 break;
1173 case TLS_GENERIC_ERROR:
1174 if (objfile_is_library)
1175 error (_("Cannot find thread-local storage for %s, "
1176 "shared library %s:\n%s"),
1177 target_pid_to_str (ptid),
1178 objfile_name (objfile), ex.message);
1179 else
1180 error (_("Cannot find thread-local storage for %s, "
1181 "executable file %s:\n%s"),
1182 target_pid_to_str (ptid),
1183 objfile_name (objfile), ex.message);
1184 break;
1185 default:
1186 throw_exception (ex);
1187 break;
1188 }
1189 }
1190 }
1191 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1192 TLS is an ABI-specific thing. But we don't do that yet. */
1193 else
1194 error (_("Cannot find thread-local variables on this target"));
1195
1196 return addr;
1197 }
1198
1199 const char *
1200 target_xfer_status_to_string (enum target_xfer_status err)
1201 {
1202 #define CASE(X) case X: return #X
1203 switch (err)
1204 {
1205 CASE(TARGET_XFER_E_IO);
1206 CASE(TARGET_XFER_E_UNAVAILABLE);
1207 default:
1208 return "<unknown>";
1209 }
1210 #undef CASE
1211 };
1212
1213
1214 #undef MIN
1215 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1216
1217 /* target_read_string -- read a null terminated string, up to LEN bytes,
1218 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1219 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1220 is responsible for freeing it. Return the number of bytes successfully
1221 read. */
1222
1223 int
1224 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1225 {
1226 int tlen, offset, i;
1227 gdb_byte buf[4];
1228 int errcode = 0;
1229 char *buffer;
1230 int buffer_allocated;
1231 char *bufptr;
1232 unsigned int nbytes_read = 0;
1233
1234 gdb_assert (string);
1235
1236 /* Small for testing. */
1237 buffer_allocated = 4;
1238 buffer = xmalloc (buffer_allocated);
1239 bufptr = buffer;
1240
1241 while (len > 0)
1242 {
1243 tlen = MIN (len, 4 - (memaddr & 3));
1244 offset = memaddr & 3;
1245
1246 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1247 if (errcode != 0)
1248 {
1249 /* The transfer request might have crossed the boundary to an
1250 unallocated region of memory. Retry the transfer, requesting
1251 a single byte. */
1252 tlen = 1;
1253 offset = 0;
1254 errcode = target_read_memory (memaddr, buf, 1);
1255 if (errcode != 0)
1256 goto done;
1257 }
1258
1259 if (bufptr - buffer + tlen > buffer_allocated)
1260 {
1261 unsigned int bytes;
1262
1263 bytes = bufptr - buffer;
1264 buffer_allocated *= 2;
1265 buffer = xrealloc (buffer, buffer_allocated);
1266 bufptr = buffer + bytes;
1267 }
1268
1269 for (i = 0; i < tlen; i++)
1270 {
1271 *bufptr++ = buf[i + offset];
1272 if (buf[i + offset] == '\000')
1273 {
1274 nbytes_read += i + 1;
1275 goto done;
1276 }
1277 }
1278
1279 memaddr += tlen;
1280 len -= tlen;
1281 nbytes_read += tlen;
1282 }
1283 done:
1284 *string = buffer;
1285 if (errnop != NULL)
1286 *errnop = errcode;
1287 return nbytes_read;
1288 }
1289
1290 struct target_section_table *
1291 target_get_section_table (struct target_ops *target)
1292 {
1293 struct target_ops *t;
1294
1295 if (targetdebug)
1296 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1297
1298 for (t = target; t != NULL; t = t->beneath)
1299 if (t->to_get_section_table != NULL)
1300 return (*t->to_get_section_table) (t);
1301
1302 return NULL;
1303 }
1304
1305 /* Find a section containing ADDR. */
1306
1307 struct target_section *
1308 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1309 {
1310 struct target_section_table *table = target_get_section_table (target);
1311 struct target_section *secp;
1312
1313 if (table == NULL)
1314 return NULL;
1315
1316 for (secp = table->sections; secp < table->sections_end; secp++)
1317 {
1318 if (addr >= secp->addr && addr < secp->endaddr)
1319 return secp;
1320 }
1321 return NULL;
1322 }
1323
1324 /* Read memory from the live target, even if currently inspecting a
1325 traceframe. The return is the same as that of target_read. */
1326
1327 static enum target_xfer_status
1328 target_read_live_memory (enum target_object object,
1329 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1330 ULONGEST *xfered_len)
1331 {
1332 enum target_xfer_status ret;
1333 struct cleanup *cleanup;
1334
1335 /* Switch momentarily out of tfind mode so to access live memory.
1336 Note that this must not clear global state, such as the frame
1337 cache, which must still remain valid for the previous traceframe.
1338 We may be _building_ the frame cache at this point. */
1339 cleanup = make_cleanup_restore_traceframe_number ();
1340 set_traceframe_number (-1);
1341
1342 ret = target_xfer_partial (current_target.beneath, object, NULL,
1343 myaddr, NULL, memaddr, len, xfered_len);
1344
1345 do_cleanups (cleanup);
1346 return ret;
1347 }
1348
1349 /* Using the set of read-only target sections of OPS, read live
1350 read-only memory. Note that the actual reads start from the
1351 top-most target again.
1352
1353 For interface/parameters/return description see target.h,
1354 to_xfer_partial. */
1355
1356 static enum target_xfer_status
1357 memory_xfer_live_readonly_partial (struct target_ops *ops,
1358 enum target_object object,
1359 gdb_byte *readbuf, ULONGEST memaddr,
1360 ULONGEST len, ULONGEST *xfered_len)
1361 {
1362 struct target_section *secp;
1363 struct target_section_table *table;
1364
1365 secp = target_section_by_addr (ops, memaddr);
1366 if (secp != NULL
1367 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1368 secp->the_bfd_section)
1369 & SEC_READONLY))
1370 {
1371 struct target_section *p;
1372 ULONGEST memend = memaddr + len;
1373
1374 table = target_get_section_table (ops);
1375
1376 for (p = table->sections; p < table->sections_end; p++)
1377 {
1378 if (memaddr >= p->addr)
1379 {
1380 if (memend <= p->endaddr)
1381 {
1382 /* Entire transfer is within this section. */
1383 return target_read_live_memory (object, memaddr,
1384 readbuf, len, xfered_len);
1385 }
1386 else if (memaddr >= p->endaddr)
1387 {
1388 /* This section ends before the transfer starts. */
1389 continue;
1390 }
1391 else
1392 {
1393 /* This section overlaps the transfer. Just do half. */
1394 len = p->endaddr - memaddr;
1395 return target_read_live_memory (object, memaddr,
1396 readbuf, len, xfered_len);
1397 }
1398 }
1399 }
1400 }
1401
1402 return TARGET_XFER_EOF;
1403 }
1404
1405 /* Read memory from more than one valid target. A core file, for
1406 instance, could have some of memory but delegate other bits to
1407 the target below it. So, we must manually try all targets. */
1408
1409 static enum target_xfer_status
1410 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1411 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1412 ULONGEST *xfered_len)
1413 {
1414 enum target_xfer_status res;
1415
1416 do
1417 {
1418 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1419 readbuf, writebuf, memaddr, len,
1420 xfered_len);
1421 if (res == TARGET_XFER_OK)
1422 break;
1423
1424 /* Stop if the target reports that the memory is not available. */
1425 if (res == TARGET_XFER_E_UNAVAILABLE)
1426 break;
1427
1428 /* We want to continue past core files to executables, but not
1429 past a running target's memory. */
1430 if (ops->to_has_all_memory (ops))
1431 break;
1432
1433 ops = ops->beneath;
1434 }
1435 while (ops != NULL);
1436
1437 return res;
1438 }
1439
1440 /* Perform a partial memory transfer.
1441 For docs see target.h, to_xfer_partial. */
1442
1443 static enum target_xfer_status
1444 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1445 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1446 ULONGEST len, ULONGEST *xfered_len)
1447 {
1448 enum target_xfer_status res;
1449 int reg_len;
1450 struct mem_region *region;
1451 struct inferior *inf;
1452
1453 /* For accesses to unmapped overlay sections, read directly from
1454 files. Must do this first, as MEMADDR may need adjustment. */
1455 if (readbuf != NULL && overlay_debugging)
1456 {
1457 struct obj_section *section = find_pc_overlay (memaddr);
1458
1459 if (pc_in_unmapped_range (memaddr, section))
1460 {
1461 struct target_section_table *table
1462 = target_get_section_table (ops);
1463 const char *section_name = section->the_bfd_section->name;
1464
1465 memaddr = overlay_mapped_address (memaddr, section);
1466 return section_table_xfer_memory_partial (readbuf, writebuf,
1467 memaddr, len, xfered_len,
1468 table->sections,
1469 table->sections_end,
1470 section_name);
1471 }
1472 }
1473
1474 /* Try the executable files, if "trust-readonly-sections" is set. */
1475 if (readbuf != NULL && trust_readonly)
1476 {
1477 struct target_section *secp;
1478 struct target_section_table *table;
1479
1480 secp = target_section_by_addr (ops, memaddr);
1481 if (secp != NULL
1482 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1483 secp->the_bfd_section)
1484 & SEC_READONLY))
1485 {
1486 table = target_get_section_table (ops);
1487 return section_table_xfer_memory_partial (readbuf, writebuf,
1488 memaddr, len, xfered_len,
1489 table->sections,
1490 table->sections_end,
1491 NULL);
1492 }
1493 }
1494
1495 /* If reading unavailable memory in the context of traceframes, and
1496 this address falls within a read-only section, fallback to
1497 reading from live memory. */
1498 if (readbuf != NULL && get_traceframe_number () != -1)
1499 {
1500 VEC(mem_range_s) *available;
1501
1502 /* If we fail to get the set of available memory, then the
1503 target does not support querying traceframe info, and so we
1504 attempt reading from the traceframe anyway (assuming the
1505 target implements the old QTro packet then). */
1506 if (traceframe_available_memory (&available, memaddr, len))
1507 {
1508 struct cleanup *old_chain;
1509
1510 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1511
1512 if (VEC_empty (mem_range_s, available)
1513 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1514 {
1515 /* Don't read into the traceframe's available
1516 memory. */
1517 if (!VEC_empty (mem_range_s, available))
1518 {
1519 LONGEST oldlen = len;
1520
1521 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1522 gdb_assert (len <= oldlen);
1523 }
1524
1525 do_cleanups (old_chain);
1526
1527 /* This goes through the topmost target again. */
1528 res = memory_xfer_live_readonly_partial (ops, object,
1529 readbuf, memaddr,
1530 len, xfered_len);
1531 if (res == TARGET_XFER_OK)
1532 return TARGET_XFER_OK;
1533 else
1534 {
1535 /* No use trying further, we know some memory starting
1536 at MEMADDR isn't available. */
1537 *xfered_len = len;
1538 return TARGET_XFER_E_UNAVAILABLE;
1539 }
1540 }
1541
1542 /* Don't try to read more than how much is available, in
1543 case the target implements the deprecated QTro packet to
1544 cater for older GDBs (the target's knowledge of read-only
1545 sections may be outdated by now). */
1546 len = VEC_index (mem_range_s, available, 0)->length;
1547
1548 do_cleanups (old_chain);
1549 }
1550 }
1551
1552 /* Try GDB's internal data cache. */
1553 region = lookup_mem_region (memaddr);
1554 /* region->hi == 0 means there's no upper bound. */
1555 if (memaddr + len < region->hi || region->hi == 0)
1556 reg_len = len;
1557 else
1558 reg_len = region->hi - memaddr;
1559
1560 switch (region->attrib.mode)
1561 {
1562 case MEM_RO:
1563 if (writebuf != NULL)
1564 return TARGET_XFER_E_IO;
1565 break;
1566
1567 case MEM_WO:
1568 if (readbuf != NULL)
1569 return TARGET_XFER_E_IO;
1570 break;
1571
1572 case MEM_FLASH:
1573 /* We only support writing to flash during "load" for now. */
1574 if (writebuf != NULL)
1575 error (_("Writing to flash memory forbidden in this context"));
1576 break;
1577
1578 case MEM_NONE:
1579 return TARGET_XFER_E_IO;
1580 }
1581
1582 if (!ptid_equal (inferior_ptid, null_ptid))
1583 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1584 else
1585 inf = NULL;
1586
1587 if (inf != NULL
1588 /* The dcache reads whole cache lines; that doesn't play well
1589 with reading from a trace buffer, because reading outside of
1590 the collected memory range fails. */
1591 && get_traceframe_number () == -1
1592 && (region->attrib.cache
1593 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1594 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1595 {
1596 DCACHE *dcache = target_dcache_get_or_init ();
1597 int l;
1598
1599 if (readbuf != NULL)
1600 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1601 else
1602 /* FIXME drow/2006-08-09: If we're going to preserve const
1603 correctness dcache_xfer_memory should take readbuf and
1604 writebuf. */
1605 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1606 reg_len, 1);
1607 if (l <= 0)
1608 return TARGET_XFER_E_IO;
1609 else
1610 {
1611 *xfered_len = (ULONGEST) l;
1612 return TARGET_XFER_OK;
1613 }
1614 }
1615
1616 /* If none of those methods found the memory we wanted, fall back
1617 to a target partial transfer. Normally a single call to
1618 to_xfer_partial is enough; if it doesn't recognize an object
1619 it will call the to_xfer_partial of the next target down.
1620 But for memory this won't do. Memory is the only target
1621 object which can be read from more than one valid target.
1622 A core file, for instance, could have some of memory but
1623 delegate other bits to the target below it. So, we must
1624 manually try all targets. */
1625
1626 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1627 xfered_len);
1628
1629 /* Make sure the cache gets updated no matter what - if we are writing
1630 to the stack. Even if this write is not tagged as such, we still need
1631 to update the cache. */
1632
1633 if (res == TARGET_XFER_OK
1634 && inf != NULL
1635 && writebuf != NULL
1636 && target_dcache_init_p ()
1637 && !region->attrib.cache
1638 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1639 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1640 {
1641 DCACHE *dcache = target_dcache_get ();
1642
1643 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1644 }
1645
1646 /* If we still haven't got anything, return the last error. We
1647 give up. */
1648 return res;
1649 }
1650
1651 /* Perform a partial memory transfer. For docs see target.h,
1652 to_xfer_partial. */
1653
1654 static enum target_xfer_status
1655 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1656 gdb_byte *readbuf, const gdb_byte *writebuf,
1657 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1658 {
1659 enum target_xfer_status res;
1660
1661 /* Zero length requests are ok and require no work. */
1662 if (len == 0)
1663 return TARGET_XFER_EOF;
1664
1665 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1666 breakpoint insns, thus hiding out from higher layers whether
1667 there are software breakpoints inserted in the code stream. */
1668 if (readbuf != NULL)
1669 {
1670 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1671 xfered_len);
1672
1673 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1674 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1675 }
1676 else
1677 {
1678 void *buf;
1679 struct cleanup *old_chain;
1680
1681 /* A large write request is likely to be partially satisfied
1682 by memory_xfer_partial_1. We will continually malloc
1683 and free a copy of the entire write request for breakpoint
1684 shadow handling even though we only end up writing a small
1685 subset of it. Cap writes to 4KB to mitigate this. */
1686 len = min (4096, len);
1687
1688 buf = xmalloc (len);
1689 old_chain = make_cleanup (xfree, buf);
1690 memcpy (buf, writebuf, len);
1691
1692 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1693 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1694 xfered_len);
1695
1696 do_cleanups (old_chain);
1697 }
1698
1699 return res;
1700 }
1701
1702 static void
1703 restore_show_memory_breakpoints (void *arg)
1704 {
1705 show_memory_breakpoints = (uintptr_t) arg;
1706 }
1707
1708 struct cleanup *
1709 make_show_memory_breakpoints_cleanup (int show)
1710 {
1711 int current = show_memory_breakpoints;
1712
1713 show_memory_breakpoints = show;
1714 return make_cleanup (restore_show_memory_breakpoints,
1715 (void *) (uintptr_t) current);
1716 }
1717
1718 /* For docs see target.h, to_xfer_partial. */
1719
1720 enum target_xfer_status
1721 target_xfer_partial (struct target_ops *ops,
1722 enum target_object object, const char *annex,
1723 gdb_byte *readbuf, const gdb_byte *writebuf,
1724 ULONGEST offset, ULONGEST len,
1725 ULONGEST *xfered_len)
1726 {
1727 enum target_xfer_status retval;
1728
1729 gdb_assert (ops->to_xfer_partial != NULL);
1730
1731 /* Transfer is done when LEN is zero. */
1732 if (len == 0)
1733 return TARGET_XFER_EOF;
1734
1735 if (writebuf && !may_write_memory)
1736 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1737 core_addr_to_string_nz (offset), plongest (len));
1738
1739 *xfered_len = 0;
1740
1741 /* If this is a memory transfer, let the memory-specific code
1742 have a look at it instead. Memory transfers are more
1743 complicated. */
1744 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1745 || object == TARGET_OBJECT_CODE_MEMORY)
1746 retval = memory_xfer_partial (ops, object, readbuf,
1747 writebuf, offset, len, xfered_len);
1748 else if (object == TARGET_OBJECT_RAW_MEMORY)
1749 {
1750 /* Request the normal memory object from other layers. */
1751 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1752 xfered_len);
1753 }
1754 else
1755 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1756 writebuf, offset, len, xfered_len);
1757
1758 if (targetdebug)
1759 {
1760 const unsigned char *myaddr = NULL;
1761
1762 fprintf_unfiltered (gdb_stdlog,
1763 "%s:target_xfer_partial "
1764 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1765 ops->to_shortname,
1766 (int) object,
1767 (annex ? annex : "(null)"),
1768 host_address_to_string (readbuf),
1769 host_address_to_string (writebuf),
1770 core_addr_to_string_nz (offset),
1771 pulongest (len), retval,
1772 pulongest (*xfered_len));
1773
1774 if (readbuf)
1775 myaddr = readbuf;
1776 if (writebuf)
1777 myaddr = writebuf;
1778 if (retval == TARGET_XFER_OK && myaddr != NULL)
1779 {
1780 int i;
1781
1782 fputs_unfiltered (", bytes =", gdb_stdlog);
1783 for (i = 0; i < *xfered_len; i++)
1784 {
1785 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1786 {
1787 if (targetdebug < 2 && i > 0)
1788 {
1789 fprintf_unfiltered (gdb_stdlog, " ...");
1790 break;
1791 }
1792 fprintf_unfiltered (gdb_stdlog, "\n");
1793 }
1794
1795 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1796 }
1797 }
1798
1799 fputc_unfiltered ('\n', gdb_stdlog);
1800 }
1801
1802 /* Check implementations of to_xfer_partial update *XFERED_LEN
1803 properly. Do assertion after printing debug messages, so that we
1804 can find more clues on assertion failure from debugging messages. */
1805 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1806 gdb_assert (*xfered_len > 0);
1807
1808 return retval;
1809 }
1810
1811 /* Read LEN bytes of target memory at address MEMADDR, placing the
1812 results in GDB's memory at MYADDR. Returns either 0 for success or
1813 TARGET_XFER_E_IO if any error occurs.
1814
1815 If an error occurs, no guarantee is made about the contents of the data at
1816 MYADDR. In particular, the caller should not depend upon partial reads
1817 filling the buffer with good data. There is no way for the caller to know
1818 how much good data might have been transfered anyway. Callers that can
1819 deal with partial reads should call target_read (which will retry until
1820 it makes no progress, and then return how much was transferred). */
1821
1822 int
1823 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1824 {
1825 /* Dispatch to the topmost target, not the flattened current_target.
1826 Memory accesses check target->to_has_(all_)memory, and the
1827 flattened target doesn't inherit those. */
1828 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1829 myaddr, memaddr, len) == len)
1830 return 0;
1831 else
1832 return TARGET_XFER_E_IO;
1833 }
1834
1835 /* Like target_read_memory, but specify explicitly that this is a read
1836 from the target's raw memory. That is, this read bypasses the
1837 dcache, breakpoint shadowing, etc. */
1838
1839 int
1840 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1841 {
1842 /* See comment in target_read_memory about why the request starts at
1843 current_target.beneath. */
1844 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1845 myaddr, memaddr, len) == len)
1846 return 0;
1847 else
1848 return TARGET_XFER_E_IO;
1849 }
1850
1851 /* Like target_read_memory, but specify explicitly that this is a read from
1852 the target's stack. This may trigger different cache behavior. */
1853
1854 int
1855 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1856 {
1857 /* See comment in target_read_memory about why the request starts at
1858 current_target.beneath. */
1859 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1860 myaddr, memaddr, len) == len)
1861 return 0;
1862 else
1863 return TARGET_XFER_E_IO;
1864 }
1865
1866 /* Like target_read_memory, but specify explicitly that this is a read from
1867 the target's code. This may trigger different cache behavior. */
1868
1869 int
1870 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1871 {
1872 /* See comment in target_read_memory about why the request starts at
1873 current_target.beneath. */
1874 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1875 myaddr, memaddr, len) == len)
1876 return 0;
1877 else
1878 return TARGET_XFER_E_IO;
1879 }
1880
1881 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1882 Returns either 0 for success or TARGET_XFER_E_IO if any
1883 error occurs. If an error occurs, no guarantee is made about how
1884 much data got written. Callers that can deal with partial writes
1885 should call target_write. */
1886
1887 int
1888 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1889 {
1890 /* See comment in target_read_memory about why the request starts at
1891 current_target.beneath. */
1892 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1893 myaddr, memaddr, len) == len)
1894 return 0;
1895 else
1896 return TARGET_XFER_E_IO;
1897 }
1898
1899 /* Write LEN bytes from MYADDR to target raw memory at address
1900 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1901 if any error occurs. If an error occurs, no guarantee is made
1902 about how much data got written. Callers that can deal with
1903 partial writes should call target_write. */
1904
1905 int
1906 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1907 {
1908 /* See comment in target_read_memory about why the request starts at
1909 current_target.beneath. */
1910 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1911 myaddr, memaddr, len) == len)
1912 return 0;
1913 else
1914 return TARGET_XFER_E_IO;
1915 }
1916
1917 /* Fetch the target's memory map. */
1918
1919 VEC(mem_region_s) *
1920 target_memory_map (void)
1921 {
1922 VEC(mem_region_s) *result;
1923 struct mem_region *last_one, *this_one;
1924 int ix;
1925 struct target_ops *t;
1926
1927 if (targetdebug)
1928 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1929
1930 for (t = current_target.beneath; t != NULL; t = t->beneath)
1931 if (t->to_memory_map != NULL)
1932 break;
1933
1934 if (t == NULL)
1935 return NULL;
1936
1937 result = t->to_memory_map (t);
1938 if (result == NULL)
1939 return NULL;
1940
1941 qsort (VEC_address (mem_region_s, result),
1942 VEC_length (mem_region_s, result),
1943 sizeof (struct mem_region), mem_region_cmp);
1944
1945 /* Check that regions do not overlap. Simultaneously assign
1946 a numbering for the "mem" commands to use to refer to
1947 each region. */
1948 last_one = NULL;
1949 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1950 {
1951 this_one->number = ix;
1952
1953 if (last_one && last_one->hi > this_one->lo)
1954 {
1955 warning (_("Overlapping regions in memory map: ignoring"));
1956 VEC_free (mem_region_s, result);
1957 return NULL;
1958 }
1959 last_one = this_one;
1960 }
1961
1962 return result;
1963 }
1964
1965 void
1966 target_flash_erase (ULONGEST address, LONGEST length)
1967 {
1968 struct target_ops *t;
1969
1970 for (t = current_target.beneath; t != NULL; t = t->beneath)
1971 if (t->to_flash_erase != NULL)
1972 {
1973 if (targetdebug)
1974 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1975 hex_string (address), phex (length, 0));
1976 t->to_flash_erase (t, address, length);
1977 return;
1978 }
1979
1980 tcomplain ();
1981 }
1982
1983 void
1984 target_flash_done (void)
1985 {
1986 struct target_ops *t;
1987
1988 for (t = current_target.beneath; t != NULL; t = t->beneath)
1989 if (t->to_flash_done != NULL)
1990 {
1991 if (targetdebug)
1992 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1993 t->to_flash_done (t);
1994 return;
1995 }
1996
1997 tcomplain ();
1998 }
1999
2000 static void
2001 show_trust_readonly (struct ui_file *file, int from_tty,
2002 struct cmd_list_element *c, const char *value)
2003 {
2004 fprintf_filtered (file,
2005 _("Mode for reading from readonly sections is %s.\n"),
2006 value);
2007 }
2008
2009 /* More generic transfers. */
2010
2011 static enum target_xfer_status
2012 default_xfer_partial (struct target_ops *ops, enum target_object object,
2013 const char *annex, gdb_byte *readbuf,
2014 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2015 ULONGEST *xfered_len)
2016 {
2017 if (object == TARGET_OBJECT_MEMORY
2018 && ops->deprecated_xfer_memory != NULL)
2019 /* If available, fall back to the target's
2020 "deprecated_xfer_memory" method. */
2021 {
2022 int xfered = -1;
2023
2024 errno = 0;
2025 if (writebuf != NULL)
2026 {
2027 void *buffer = xmalloc (len);
2028 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2029
2030 memcpy (buffer, writebuf, len);
2031 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2032 1/*write*/, NULL, ops);
2033 do_cleanups (cleanup);
2034 }
2035 if (readbuf != NULL)
2036 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2037 0/*read*/, NULL, ops);
2038 if (xfered > 0)
2039 {
2040 *xfered_len = (ULONGEST) xfered;
2041 return TARGET_XFER_E_IO;
2042 }
2043 else if (xfered == 0 && errno == 0)
2044 /* "deprecated_xfer_memory" uses 0, cross checked against
2045 ERRNO as one indication of an error. */
2046 return TARGET_XFER_EOF;
2047 else
2048 return TARGET_XFER_E_IO;
2049 }
2050 else
2051 {
2052 gdb_assert (ops->beneath != NULL);
2053 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2054 readbuf, writebuf, offset, len,
2055 xfered_len);
2056 }
2057 }
2058
2059 /* Target vector read/write partial wrapper functions. */
2060
2061 static enum target_xfer_status
2062 target_read_partial (struct target_ops *ops,
2063 enum target_object object,
2064 const char *annex, gdb_byte *buf,
2065 ULONGEST offset, ULONGEST len,
2066 ULONGEST *xfered_len)
2067 {
2068 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2069 xfered_len);
2070 }
2071
2072 static enum target_xfer_status
2073 target_write_partial (struct target_ops *ops,
2074 enum target_object object,
2075 const char *annex, const gdb_byte *buf,
2076 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2077 {
2078 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2079 xfered_len);
2080 }
2081
2082 /* Wrappers to perform the full transfer. */
2083
2084 /* For docs on target_read see target.h. */
2085
2086 LONGEST
2087 target_read (struct target_ops *ops,
2088 enum target_object object,
2089 const char *annex, gdb_byte *buf,
2090 ULONGEST offset, LONGEST len)
2091 {
2092 LONGEST xfered = 0;
2093
2094 while (xfered < len)
2095 {
2096 ULONGEST xfered_len;
2097 enum target_xfer_status status;
2098
2099 status = target_read_partial (ops, object, annex,
2100 (gdb_byte *) buf + xfered,
2101 offset + xfered, len - xfered,
2102 &xfered_len);
2103
2104 /* Call an observer, notifying them of the xfer progress? */
2105 if (status == TARGET_XFER_EOF)
2106 return xfered;
2107 else if (status == TARGET_XFER_OK)
2108 {
2109 xfered += xfered_len;
2110 QUIT;
2111 }
2112 else
2113 return -1;
2114
2115 }
2116 return len;
2117 }
2118
2119 /* Assuming that the entire [begin, end) range of memory cannot be
2120 read, try to read whatever subrange is possible to read.
2121
2122 The function returns, in RESULT, either zero or one memory block.
2123 If there's a readable subrange at the beginning, it is completely
2124 read and returned. Any further readable subrange will not be read.
2125 Otherwise, if there's a readable subrange at the end, it will be
2126 completely read and returned. Any readable subranges before it
2127 (obviously, not starting at the beginning), will be ignored. In
2128 other cases -- either no readable subrange, or readable subrange(s)
2129 that is neither at the beginning, or end, nothing is returned.
2130
2131 The purpose of this function is to handle a read across a boundary
2132 of accessible memory in a case when memory map is not available.
2133 The above restrictions are fine for this case, but will give
2134 incorrect results if the memory is 'patchy'. However, supporting
2135 'patchy' memory would require trying to read every single byte,
2136 and it seems unacceptable solution. Explicit memory map is
2137 recommended for this case -- and target_read_memory_robust will
2138 take care of reading multiple ranges then. */
2139
2140 static void
2141 read_whatever_is_readable (struct target_ops *ops,
2142 ULONGEST begin, ULONGEST end,
2143 VEC(memory_read_result_s) **result)
2144 {
2145 gdb_byte *buf = xmalloc (end - begin);
2146 ULONGEST current_begin = begin;
2147 ULONGEST current_end = end;
2148 int forward;
2149 memory_read_result_s r;
2150 ULONGEST xfered_len;
2151
2152 /* If we previously failed to read 1 byte, nothing can be done here. */
2153 if (end - begin <= 1)
2154 {
2155 xfree (buf);
2156 return;
2157 }
2158
2159 /* Check that either first or the last byte is readable, and give up
2160 if not. This heuristic is meant to permit reading accessible memory
2161 at the boundary of accessible region. */
2162 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2163 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2164 {
2165 forward = 1;
2166 ++current_begin;
2167 }
2168 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2169 buf + (end-begin) - 1, end - 1, 1,
2170 &xfered_len) == TARGET_XFER_OK)
2171 {
2172 forward = 0;
2173 --current_end;
2174 }
2175 else
2176 {
2177 xfree (buf);
2178 return;
2179 }
2180
2181 /* Loop invariant is that the [current_begin, current_end) was previously
2182 found to be not readable as a whole.
2183
2184 Note loop condition -- if the range has 1 byte, we can't divide the range
2185 so there's no point trying further. */
2186 while (current_end - current_begin > 1)
2187 {
2188 ULONGEST first_half_begin, first_half_end;
2189 ULONGEST second_half_begin, second_half_end;
2190 LONGEST xfer;
2191 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2192
2193 if (forward)
2194 {
2195 first_half_begin = current_begin;
2196 first_half_end = middle;
2197 second_half_begin = middle;
2198 second_half_end = current_end;
2199 }
2200 else
2201 {
2202 first_half_begin = middle;
2203 first_half_end = current_end;
2204 second_half_begin = current_begin;
2205 second_half_end = middle;
2206 }
2207
2208 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2209 buf + (first_half_begin - begin),
2210 first_half_begin,
2211 first_half_end - first_half_begin);
2212
2213 if (xfer == first_half_end - first_half_begin)
2214 {
2215 /* This half reads up fine. So, the error must be in the
2216 other half. */
2217 current_begin = second_half_begin;
2218 current_end = second_half_end;
2219 }
2220 else
2221 {
2222 /* This half is not readable. Because we've tried one byte, we
2223 know some part of this half if actually redable. Go to the next
2224 iteration to divide again and try to read.
2225
2226 We don't handle the other half, because this function only tries
2227 to read a single readable subrange. */
2228 current_begin = first_half_begin;
2229 current_end = first_half_end;
2230 }
2231 }
2232
2233 if (forward)
2234 {
2235 /* The [begin, current_begin) range has been read. */
2236 r.begin = begin;
2237 r.end = current_begin;
2238 r.data = buf;
2239 }
2240 else
2241 {
2242 /* The [current_end, end) range has been read. */
2243 LONGEST rlen = end - current_end;
2244
2245 r.data = xmalloc (rlen);
2246 memcpy (r.data, buf + current_end - begin, rlen);
2247 r.begin = current_end;
2248 r.end = end;
2249 xfree (buf);
2250 }
2251 VEC_safe_push(memory_read_result_s, (*result), &r);
2252 }
2253
2254 void
2255 free_memory_read_result_vector (void *x)
2256 {
2257 VEC(memory_read_result_s) *v = x;
2258 memory_read_result_s *current;
2259 int ix;
2260
2261 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2262 {
2263 xfree (current->data);
2264 }
2265 VEC_free (memory_read_result_s, v);
2266 }
2267
2268 VEC(memory_read_result_s) *
2269 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2270 {
2271 VEC(memory_read_result_s) *result = 0;
2272
2273 LONGEST xfered = 0;
2274 while (xfered < len)
2275 {
2276 struct mem_region *region = lookup_mem_region (offset + xfered);
2277 LONGEST rlen;
2278
2279 /* If there is no explicit region, a fake one should be created. */
2280 gdb_assert (region);
2281
2282 if (region->hi == 0)
2283 rlen = len - xfered;
2284 else
2285 rlen = region->hi - offset;
2286
2287 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2288 {
2289 /* Cannot read this region. Note that we can end up here only
2290 if the region is explicitly marked inaccessible, or
2291 'inaccessible-by-default' is in effect. */
2292 xfered += rlen;
2293 }
2294 else
2295 {
2296 LONGEST to_read = min (len - xfered, rlen);
2297 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2298
2299 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2300 (gdb_byte *) buffer,
2301 offset + xfered, to_read);
2302 /* Call an observer, notifying them of the xfer progress? */
2303 if (xfer <= 0)
2304 {
2305 /* Got an error reading full chunk. See if maybe we can read
2306 some subrange. */
2307 xfree (buffer);
2308 read_whatever_is_readable (ops, offset + xfered,
2309 offset + xfered + to_read, &result);
2310 xfered += to_read;
2311 }
2312 else
2313 {
2314 struct memory_read_result r;
2315 r.data = buffer;
2316 r.begin = offset + xfered;
2317 r.end = r.begin + xfer;
2318 VEC_safe_push (memory_read_result_s, result, &r);
2319 xfered += xfer;
2320 }
2321 QUIT;
2322 }
2323 }
2324 return result;
2325 }
2326
2327
2328 /* An alternative to target_write with progress callbacks. */
2329
2330 LONGEST
2331 target_write_with_progress (struct target_ops *ops,
2332 enum target_object object,
2333 const char *annex, const gdb_byte *buf,
2334 ULONGEST offset, LONGEST len,
2335 void (*progress) (ULONGEST, void *), void *baton)
2336 {
2337 LONGEST xfered = 0;
2338
2339 /* Give the progress callback a chance to set up. */
2340 if (progress)
2341 (*progress) (0, baton);
2342
2343 while (xfered < len)
2344 {
2345 ULONGEST xfered_len;
2346 enum target_xfer_status status;
2347
2348 status = target_write_partial (ops, object, annex,
2349 (gdb_byte *) buf + xfered,
2350 offset + xfered, len - xfered,
2351 &xfered_len);
2352
2353 if (status == TARGET_XFER_EOF)
2354 return xfered;
2355 if (TARGET_XFER_STATUS_ERROR_P (status))
2356 return -1;
2357
2358 gdb_assert (status == TARGET_XFER_OK);
2359 if (progress)
2360 (*progress) (xfered_len, baton);
2361
2362 xfered += xfered_len;
2363 QUIT;
2364 }
2365 return len;
2366 }
2367
2368 /* For docs on target_write see target.h. */
2369
2370 LONGEST
2371 target_write (struct target_ops *ops,
2372 enum target_object object,
2373 const char *annex, const gdb_byte *buf,
2374 ULONGEST offset, LONGEST len)
2375 {
2376 return target_write_with_progress (ops, object, annex, buf, offset, len,
2377 NULL, NULL);
2378 }
2379
2380 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2381 the size of the transferred data. PADDING additional bytes are
2382 available in *BUF_P. This is a helper function for
2383 target_read_alloc; see the declaration of that function for more
2384 information. */
2385
2386 static LONGEST
2387 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2388 const char *annex, gdb_byte **buf_p, int padding)
2389 {
2390 size_t buf_alloc, buf_pos;
2391 gdb_byte *buf;
2392
2393 /* This function does not have a length parameter; it reads the
2394 entire OBJECT). Also, it doesn't support objects fetched partly
2395 from one target and partly from another (in a different stratum,
2396 e.g. a core file and an executable). Both reasons make it
2397 unsuitable for reading memory. */
2398 gdb_assert (object != TARGET_OBJECT_MEMORY);
2399
2400 /* Start by reading up to 4K at a time. The target will throttle
2401 this number down if necessary. */
2402 buf_alloc = 4096;
2403 buf = xmalloc (buf_alloc);
2404 buf_pos = 0;
2405 while (1)
2406 {
2407 ULONGEST xfered_len;
2408 enum target_xfer_status status;
2409
2410 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2411 buf_pos, buf_alloc - buf_pos - padding,
2412 &xfered_len);
2413
2414 if (status == TARGET_XFER_EOF)
2415 {
2416 /* Read all there was. */
2417 if (buf_pos == 0)
2418 xfree (buf);
2419 else
2420 *buf_p = buf;
2421 return buf_pos;
2422 }
2423 else if (status != TARGET_XFER_OK)
2424 {
2425 /* An error occurred. */
2426 xfree (buf);
2427 return TARGET_XFER_E_IO;
2428 }
2429
2430 buf_pos += xfered_len;
2431
2432 /* If the buffer is filling up, expand it. */
2433 if (buf_alloc < buf_pos * 2)
2434 {
2435 buf_alloc *= 2;
2436 buf = xrealloc (buf, buf_alloc);
2437 }
2438
2439 QUIT;
2440 }
2441 }
2442
2443 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2444 the size of the transferred data. See the declaration in "target.h"
2445 function for more information about the return value. */
2446
2447 LONGEST
2448 target_read_alloc (struct target_ops *ops, enum target_object object,
2449 const char *annex, gdb_byte **buf_p)
2450 {
2451 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2452 }
2453
2454 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2455 returned as a string, allocated using xmalloc. If an error occurs
2456 or the transfer is unsupported, NULL is returned. Empty objects
2457 are returned as allocated but empty strings. A warning is issued
2458 if the result contains any embedded NUL bytes. */
2459
2460 char *
2461 target_read_stralloc (struct target_ops *ops, enum target_object object,
2462 const char *annex)
2463 {
2464 gdb_byte *buffer;
2465 char *bufstr;
2466 LONGEST i, transferred;
2467
2468 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2469 bufstr = (char *) buffer;
2470
2471 if (transferred < 0)
2472 return NULL;
2473
2474 if (transferred == 0)
2475 return xstrdup ("");
2476
2477 bufstr[transferred] = 0;
2478
2479 /* Check for embedded NUL bytes; but allow trailing NULs. */
2480 for (i = strlen (bufstr); i < transferred; i++)
2481 if (bufstr[i] != 0)
2482 {
2483 warning (_("target object %d, annex %s, "
2484 "contained unexpected null characters"),
2485 (int) object, annex ? annex : "(none)");
2486 break;
2487 }
2488
2489 return bufstr;
2490 }
2491
2492 /* Memory transfer methods. */
2493
2494 void
2495 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2496 LONGEST len)
2497 {
2498 /* This method is used to read from an alternate, non-current
2499 target. This read must bypass the overlay support (as symbols
2500 don't match this target), and GDB's internal cache (wrong cache
2501 for this target). */
2502 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2503 != len)
2504 memory_error (TARGET_XFER_E_IO, addr);
2505 }
2506
2507 ULONGEST
2508 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2509 int len, enum bfd_endian byte_order)
2510 {
2511 gdb_byte buf[sizeof (ULONGEST)];
2512
2513 gdb_assert (len <= sizeof (buf));
2514 get_target_memory (ops, addr, buf, len);
2515 return extract_unsigned_integer (buf, len, byte_order);
2516 }
2517
2518 /* See target.h. */
2519
2520 int
2521 target_insert_breakpoint (struct gdbarch *gdbarch,
2522 struct bp_target_info *bp_tgt)
2523 {
2524 if (!may_insert_breakpoints)
2525 {
2526 warning (_("May not insert breakpoints"));
2527 return 1;
2528 }
2529
2530 return current_target.to_insert_breakpoint (&current_target,
2531 gdbarch, bp_tgt);
2532 }
2533
2534 /* See target.h. */
2535
2536 int
2537 target_remove_breakpoint (struct gdbarch *gdbarch,
2538 struct bp_target_info *bp_tgt)
2539 {
2540 /* This is kind of a weird case to handle, but the permission might
2541 have been changed after breakpoints were inserted - in which case
2542 we should just take the user literally and assume that any
2543 breakpoints should be left in place. */
2544 if (!may_insert_breakpoints)
2545 {
2546 warning (_("May not remove breakpoints"));
2547 return 1;
2548 }
2549
2550 return current_target.to_remove_breakpoint (&current_target,
2551 gdbarch, bp_tgt);
2552 }
2553
2554 static void
2555 target_info (char *args, int from_tty)
2556 {
2557 struct target_ops *t;
2558 int has_all_mem = 0;
2559
2560 if (symfile_objfile != NULL)
2561 printf_unfiltered (_("Symbols from \"%s\".\n"),
2562 objfile_name (symfile_objfile));
2563
2564 for (t = target_stack; t != NULL; t = t->beneath)
2565 {
2566 if (!(*t->to_has_memory) (t))
2567 continue;
2568
2569 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2570 continue;
2571 if (has_all_mem)
2572 printf_unfiltered (_("\tWhile running this, "
2573 "GDB does not access memory from...\n"));
2574 printf_unfiltered ("%s:\n", t->to_longname);
2575 (t->to_files_info) (t);
2576 has_all_mem = (*t->to_has_all_memory) (t);
2577 }
2578 }
2579
2580 /* This function is called before any new inferior is created, e.g.
2581 by running a program, attaching, or connecting to a target.
2582 It cleans up any state from previous invocations which might
2583 change between runs. This is a subset of what target_preopen
2584 resets (things which might change between targets). */
2585
2586 void
2587 target_pre_inferior (int from_tty)
2588 {
2589 /* Clear out solib state. Otherwise the solib state of the previous
2590 inferior might have survived and is entirely wrong for the new
2591 target. This has been observed on GNU/Linux using glibc 2.3. How
2592 to reproduce:
2593
2594 bash$ ./foo&
2595 [1] 4711
2596 bash$ ./foo&
2597 [1] 4712
2598 bash$ gdb ./foo
2599 [...]
2600 (gdb) attach 4711
2601 (gdb) detach
2602 (gdb) attach 4712
2603 Cannot access memory at address 0xdeadbeef
2604 */
2605
2606 /* In some OSs, the shared library list is the same/global/shared
2607 across inferiors. If code is shared between processes, so are
2608 memory regions and features. */
2609 if (!gdbarch_has_global_solist (target_gdbarch ()))
2610 {
2611 no_shared_libraries (NULL, from_tty);
2612
2613 invalidate_target_mem_regions ();
2614
2615 target_clear_description ();
2616 }
2617
2618 agent_capability_invalidate ();
2619 }
2620
2621 /* Callback for iterate_over_inferiors. Gets rid of the given
2622 inferior. */
2623
2624 static int
2625 dispose_inferior (struct inferior *inf, void *args)
2626 {
2627 struct thread_info *thread;
2628
2629 thread = any_thread_of_process (inf->pid);
2630 if (thread)
2631 {
2632 switch_to_thread (thread->ptid);
2633
2634 /* Core inferiors actually should be detached, not killed. */
2635 if (target_has_execution)
2636 target_kill ();
2637 else
2638 target_detach (NULL, 0);
2639 }
2640
2641 return 0;
2642 }
2643
2644 /* This is to be called by the open routine before it does
2645 anything. */
2646
2647 void
2648 target_preopen (int from_tty)
2649 {
2650 dont_repeat ();
2651
2652 if (have_inferiors ())
2653 {
2654 if (!from_tty
2655 || !have_live_inferiors ()
2656 || query (_("A program is being debugged already. Kill it? ")))
2657 iterate_over_inferiors (dispose_inferior, NULL);
2658 else
2659 error (_("Program not killed."));
2660 }
2661
2662 /* Calling target_kill may remove the target from the stack. But if
2663 it doesn't (which seems like a win for UDI), remove it now. */
2664 /* Leave the exec target, though. The user may be switching from a
2665 live process to a core of the same program. */
2666 pop_all_targets_above (file_stratum);
2667
2668 target_pre_inferior (from_tty);
2669 }
2670
2671 /* Detach a target after doing deferred register stores. */
2672
2673 void
2674 target_detach (const char *args, int from_tty)
2675 {
2676 struct target_ops* t;
2677
2678 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2679 /* Don't remove global breakpoints here. They're removed on
2680 disconnection from the target. */
2681 ;
2682 else
2683 /* If we're in breakpoints-always-inserted mode, have to remove
2684 them before detaching. */
2685 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2686
2687 prepare_for_detach ();
2688
2689 for (t = current_target.beneath; t != NULL; t = t->beneath)
2690 {
2691 if (t->to_detach != NULL)
2692 {
2693 t->to_detach (t, args, from_tty);
2694 if (targetdebug)
2695 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2696 args, from_tty);
2697 return;
2698 }
2699 }
2700
2701 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2702 }
2703
2704 void
2705 target_disconnect (char *args, int from_tty)
2706 {
2707 struct target_ops *t;
2708
2709 /* If we're in breakpoints-always-inserted mode or if breakpoints
2710 are global across processes, we have to remove them before
2711 disconnecting. */
2712 remove_breakpoints ();
2713
2714 for (t = current_target.beneath; t != NULL; t = t->beneath)
2715 if (t->to_disconnect != NULL)
2716 {
2717 if (targetdebug)
2718 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2719 args, from_tty);
2720 t->to_disconnect (t, args, from_tty);
2721 return;
2722 }
2723
2724 tcomplain ();
2725 }
2726
2727 ptid_t
2728 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2729 {
2730 struct target_ops *t;
2731 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2732 status, options);
2733
2734 if (targetdebug)
2735 {
2736 char *status_string;
2737 char *options_string;
2738
2739 status_string = target_waitstatus_to_string (status);
2740 options_string = target_options_to_string (options);
2741 fprintf_unfiltered (gdb_stdlog,
2742 "target_wait (%d, status, options={%s})"
2743 " = %d, %s\n",
2744 ptid_get_pid (ptid), options_string,
2745 ptid_get_pid (retval), status_string);
2746 xfree (status_string);
2747 xfree (options_string);
2748 }
2749
2750 return retval;
2751 }
2752
2753 char *
2754 target_pid_to_str (ptid_t ptid)
2755 {
2756 struct target_ops *t;
2757
2758 for (t = current_target.beneath; t != NULL; t = t->beneath)
2759 {
2760 if (t->to_pid_to_str != NULL)
2761 return (*t->to_pid_to_str) (t, ptid);
2762 }
2763
2764 return normal_pid_to_str (ptid);
2765 }
2766
2767 char *
2768 target_thread_name (struct thread_info *info)
2769 {
2770 struct target_ops *t;
2771
2772 for (t = current_target.beneath; t != NULL; t = t->beneath)
2773 {
2774 if (t->to_thread_name != NULL)
2775 return (*t->to_thread_name) (t, info);
2776 }
2777
2778 return NULL;
2779 }
2780
2781 void
2782 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2783 {
2784 struct target_ops *t;
2785
2786 target_dcache_invalidate ();
2787
2788 current_target.to_resume (&current_target, ptid, step, signal);
2789 if (targetdebug)
2790 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2791 ptid_get_pid (ptid),
2792 step ? "step" : "continue",
2793 gdb_signal_to_name (signal));
2794
2795 registers_changed_ptid (ptid);
2796 set_executing (ptid, 1);
2797 set_running (ptid, 1);
2798 clear_inline_frame_state (ptid);
2799 }
2800
2801 void
2802 target_pass_signals (int numsigs, unsigned char *pass_signals)
2803 {
2804 struct target_ops *t;
2805
2806 for (t = current_target.beneath; t != NULL; t = t->beneath)
2807 {
2808 if (t->to_pass_signals != NULL)
2809 {
2810 if (targetdebug)
2811 {
2812 int i;
2813
2814 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2815 numsigs);
2816
2817 for (i = 0; i < numsigs; i++)
2818 if (pass_signals[i])
2819 fprintf_unfiltered (gdb_stdlog, " %s",
2820 gdb_signal_to_name (i));
2821
2822 fprintf_unfiltered (gdb_stdlog, " })\n");
2823 }
2824
2825 (*t->to_pass_signals) (t, numsigs, pass_signals);
2826 return;
2827 }
2828 }
2829 }
2830
2831 void
2832 target_program_signals (int numsigs, unsigned char *program_signals)
2833 {
2834 struct target_ops *t;
2835
2836 for (t = current_target.beneath; t != NULL; t = t->beneath)
2837 {
2838 if (t->to_program_signals != NULL)
2839 {
2840 if (targetdebug)
2841 {
2842 int i;
2843
2844 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2845 numsigs);
2846
2847 for (i = 0; i < numsigs; i++)
2848 if (program_signals[i])
2849 fprintf_unfiltered (gdb_stdlog, " %s",
2850 gdb_signal_to_name (i));
2851
2852 fprintf_unfiltered (gdb_stdlog, " })\n");
2853 }
2854
2855 (*t->to_program_signals) (t, numsigs, program_signals);
2856 return;
2857 }
2858 }
2859 }
2860
2861 /* Look through the list of possible targets for a target that can
2862 follow forks. */
2863
2864 int
2865 target_follow_fork (int follow_child, int detach_fork)
2866 {
2867 struct target_ops *t;
2868
2869 for (t = current_target.beneath; t != NULL; t = t->beneath)
2870 {
2871 if (t->to_follow_fork != NULL)
2872 {
2873 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2874
2875 if (targetdebug)
2876 fprintf_unfiltered (gdb_stdlog,
2877 "target_follow_fork (%d, %d) = %d\n",
2878 follow_child, detach_fork, retval);
2879 return retval;
2880 }
2881 }
2882
2883 /* Some target returned a fork event, but did not know how to follow it. */
2884 internal_error (__FILE__, __LINE__,
2885 _("could not find a target to follow fork"));
2886 }
2887
2888 void
2889 target_mourn_inferior (void)
2890 {
2891 struct target_ops *t;
2892
2893 for (t = current_target.beneath; t != NULL; t = t->beneath)
2894 {
2895 if (t->to_mourn_inferior != NULL)
2896 {
2897 t->to_mourn_inferior (t);
2898 if (targetdebug)
2899 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2900
2901 /* We no longer need to keep handles on any of the object files.
2902 Make sure to release them to avoid unnecessarily locking any
2903 of them while we're not actually debugging. */
2904 bfd_cache_close_all ();
2905
2906 return;
2907 }
2908 }
2909
2910 internal_error (__FILE__, __LINE__,
2911 _("could not find a target to follow mourn inferior"));
2912 }
2913
2914 /* Look for a target which can describe architectural features, starting
2915 from TARGET. If we find one, return its description. */
2916
2917 const struct target_desc *
2918 target_read_description (struct target_ops *target)
2919 {
2920 struct target_ops *t;
2921
2922 for (t = target; t != NULL; t = t->beneath)
2923 if (t->to_read_description != NULL)
2924 {
2925 const struct target_desc *tdesc;
2926
2927 tdesc = t->to_read_description (t);
2928 if (tdesc)
2929 return tdesc;
2930 }
2931
2932 return NULL;
2933 }
2934
2935 /* The default implementation of to_search_memory.
2936 This implements a basic search of memory, reading target memory and
2937 performing the search here (as opposed to performing the search in on the
2938 target side with, for example, gdbserver). */
2939
2940 int
2941 simple_search_memory (struct target_ops *ops,
2942 CORE_ADDR start_addr, ULONGEST search_space_len,
2943 const gdb_byte *pattern, ULONGEST pattern_len,
2944 CORE_ADDR *found_addrp)
2945 {
2946 /* NOTE: also defined in find.c testcase. */
2947 #define SEARCH_CHUNK_SIZE 16000
2948 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2949 /* Buffer to hold memory contents for searching. */
2950 gdb_byte *search_buf;
2951 unsigned search_buf_size;
2952 struct cleanup *old_cleanups;
2953
2954 search_buf_size = chunk_size + pattern_len - 1;
2955
2956 /* No point in trying to allocate a buffer larger than the search space. */
2957 if (search_space_len < search_buf_size)
2958 search_buf_size = search_space_len;
2959
2960 search_buf = malloc (search_buf_size);
2961 if (search_buf == NULL)
2962 error (_("Unable to allocate memory to perform the search."));
2963 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2964
2965 /* Prime the search buffer. */
2966
2967 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2968 search_buf, start_addr, search_buf_size) != search_buf_size)
2969 {
2970 warning (_("Unable to access %s bytes of target "
2971 "memory at %s, halting search."),
2972 pulongest (search_buf_size), hex_string (start_addr));
2973 do_cleanups (old_cleanups);
2974 return -1;
2975 }
2976
2977 /* Perform the search.
2978
2979 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2980 When we've scanned N bytes we copy the trailing bytes to the start and
2981 read in another N bytes. */
2982
2983 while (search_space_len >= pattern_len)
2984 {
2985 gdb_byte *found_ptr;
2986 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2987
2988 found_ptr = memmem (search_buf, nr_search_bytes,
2989 pattern, pattern_len);
2990
2991 if (found_ptr != NULL)
2992 {
2993 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2994
2995 *found_addrp = found_addr;
2996 do_cleanups (old_cleanups);
2997 return 1;
2998 }
2999
3000 /* Not found in this chunk, skip to next chunk. */
3001
3002 /* Don't let search_space_len wrap here, it's unsigned. */
3003 if (search_space_len >= chunk_size)
3004 search_space_len -= chunk_size;
3005 else
3006 search_space_len = 0;
3007
3008 if (search_space_len >= pattern_len)
3009 {
3010 unsigned keep_len = search_buf_size - chunk_size;
3011 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3012 int nr_to_read;
3013
3014 /* Copy the trailing part of the previous iteration to the front
3015 of the buffer for the next iteration. */
3016 gdb_assert (keep_len == pattern_len - 1);
3017 memcpy (search_buf, search_buf + chunk_size, keep_len);
3018
3019 nr_to_read = min (search_space_len - keep_len, chunk_size);
3020
3021 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3022 search_buf + keep_len, read_addr,
3023 nr_to_read) != nr_to_read)
3024 {
3025 warning (_("Unable to access %s bytes of target "
3026 "memory at %s, halting search."),
3027 plongest (nr_to_read),
3028 hex_string (read_addr));
3029 do_cleanups (old_cleanups);
3030 return -1;
3031 }
3032
3033 start_addr += chunk_size;
3034 }
3035 }
3036
3037 /* Not found. */
3038
3039 do_cleanups (old_cleanups);
3040 return 0;
3041 }
3042
3043 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3044 sequence of bytes in PATTERN with length PATTERN_LEN.
3045
3046 The result is 1 if found, 0 if not found, and -1 if there was an error
3047 requiring halting of the search (e.g. memory read error).
3048 If the pattern is found the address is recorded in FOUND_ADDRP. */
3049
3050 int
3051 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3052 const gdb_byte *pattern, ULONGEST pattern_len,
3053 CORE_ADDR *found_addrp)
3054 {
3055 struct target_ops *t;
3056 int found;
3057
3058 /* We don't use INHERIT to set current_target.to_search_memory,
3059 so we have to scan the target stack and handle targetdebug
3060 ourselves. */
3061
3062 if (targetdebug)
3063 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3064 hex_string (start_addr));
3065
3066 for (t = current_target.beneath; t != NULL; t = t->beneath)
3067 if (t->to_search_memory != NULL)
3068 break;
3069
3070 if (t != NULL)
3071 {
3072 found = t->to_search_memory (t, start_addr, search_space_len,
3073 pattern, pattern_len, found_addrp);
3074 }
3075 else
3076 {
3077 /* If a special version of to_search_memory isn't available, use the
3078 simple version. */
3079 found = simple_search_memory (current_target.beneath,
3080 start_addr, search_space_len,
3081 pattern, pattern_len, found_addrp);
3082 }
3083
3084 if (targetdebug)
3085 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3086
3087 return found;
3088 }
3089
3090 /* Look through the currently pushed targets. If none of them will
3091 be able to restart the currently running process, issue an error
3092 message. */
3093
3094 void
3095 target_require_runnable (void)
3096 {
3097 struct target_ops *t;
3098
3099 for (t = target_stack; t != NULL; t = t->beneath)
3100 {
3101 /* If this target knows how to create a new program, then
3102 assume we will still be able to after killing the current
3103 one. Either killing and mourning will not pop T, or else
3104 find_default_run_target will find it again. */
3105 if (t->to_create_inferior != NULL)
3106 return;
3107
3108 /* Do not worry about thread_stratum targets that can not
3109 create inferiors. Assume they will be pushed again if
3110 necessary, and continue to the process_stratum. */
3111 if (t->to_stratum == thread_stratum
3112 || t->to_stratum == arch_stratum)
3113 continue;
3114
3115 error (_("The \"%s\" target does not support \"run\". "
3116 "Try \"help target\" or \"continue\"."),
3117 t->to_shortname);
3118 }
3119
3120 /* This function is only called if the target is running. In that
3121 case there should have been a process_stratum target and it
3122 should either know how to create inferiors, or not... */
3123 internal_error (__FILE__, __LINE__, _("No targets found"));
3124 }
3125
3126 /* Look through the list of possible targets for a target that can
3127 execute a run or attach command without any other data. This is
3128 used to locate the default process stratum.
3129
3130 If DO_MESG is not NULL, the result is always valid (error() is
3131 called for errors); else, return NULL on error. */
3132
3133 static struct target_ops *
3134 find_default_run_target (char *do_mesg)
3135 {
3136 struct target_ops **t;
3137 struct target_ops *runable = NULL;
3138 int count;
3139
3140 count = 0;
3141
3142 for (t = target_structs; t < target_structs + target_struct_size;
3143 ++t)
3144 {
3145 if ((*t)->to_can_run && target_can_run (*t))
3146 {
3147 runable = *t;
3148 ++count;
3149 }
3150 }
3151
3152 if (count != 1)
3153 {
3154 if (do_mesg)
3155 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3156 else
3157 return NULL;
3158 }
3159
3160 return runable;
3161 }
3162
3163 void
3164 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3165 {
3166 struct target_ops *t;
3167
3168 t = find_default_run_target ("attach");
3169 (t->to_attach) (t, args, from_tty);
3170 return;
3171 }
3172
3173 void
3174 find_default_create_inferior (struct target_ops *ops,
3175 char *exec_file, char *allargs, char **env,
3176 int from_tty)
3177 {
3178 struct target_ops *t;
3179
3180 t = find_default_run_target ("run");
3181 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3182 return;
3183 }
3184
3185 static int
3186 find_default_can_async_p (struct target_ops *ignore)
3187 {
3188 struct target_ops *t;
3189
3190 /* This may be called before the target is pushed on the stack;
3191 look for the default process stratum. If there's none, gdb isn't
3192 configured with a native debugger, and target remote isn't
3193 connected yet. */
3194 t = find_default_run_target (NULL);
3195 if (t && t->to_can_async_p != delegate_can_async_p)
3196 return (t->to_can_async_p) (t);
3197 return 0;
3198 }
3199
3200 static int
3201 find_default_is_async_p (struct target_ops *ignore)
3202 {
3203 struct target_ops *t;
3204
3205 /* This may be called before the target is pushed on the stack;
3206 look for the default process stratum. If there's none, gdb isn't
3207 configured with a native debugger, and target remote isn't
3208 connected yet. */
3209 t = find_default_run_target (NULL);
3210 if (t && t->to_is_async_p != delegate_is_async_p)
3211 return (t->to_is_async_p) (t);
3212 return 0;
3213 }
3214
3215 static int
3216 find_default_supports_non_stop (struct target_ops *self)
3217 {
3218 struct target_ops *t;
3219
3220 t = find_default_run_target (NULL);
3221 if (t && t->to_supports_non_stop)
3222 return (t->to_supports_non_stop) (t);
3223 return 0;
3224 }
3225
3226 int
3227 target_supports_non_stop (void)
3228 {
3229 struct target_ops *t;
3230
3231 for (t = &current_target; t != NULL; t = t->beneath)
3232 if (t->to_supports_non_stop)
3233 return t->to_supports_non_stop (t);
3234
3235 return 0;
3236 }
3237
3238 /* Implement the "info proc" command. */
3239
3240 int
3241 target_info_proc (char *args, enum info_proc_what what)
3242 {
3243 struct target_ops *t;
3244
3245 /* If we're already connected to something that can get us OS
3246 related data, use it. Otherwise, try using the native
3247 target. */
3248 if (current_target.to_stratum >= process_stratum)
3249 t = current_target.beneath;
3250 else
3251 t = find_default_run_target (NULL);
3252
3253 for (; t != NULL; t = t->beneath)
3254 {
3255 if (t->to_info_proc != NULL)
3256 {
3257 t->to_info_proc (t, args, what);
3258
3259 if (targetdebug)
3260 fprintf_unfiltered (gdb_stdlog,
3261 "target_info_proc (\"%s\", %d)\n", args, what);
3262
3263 return 1;
3264 }
3265 }
3266
3267 return 0;
3268 }
3269
3270 static int
3271 find_default_supports_disable_randomization (struct target_ops *self)
3272 {
3273 struct target_ops *t;
3274
3275 t = find_default_run_target (NULL);
3276 if (t && t->to_supports_disable_randomization)
3277 return (t->to_supports_disable_randomization) (t);
3278 return 0;
3279 }
3280
3281 int
3282 target_supports_disable_randomization (void)
3283 {
3284 struct target_ops *t;
3285
3286 for (t = &current_target; t != NULL; t = t->beneath)
3287 if (t->to_supports_disable_randomization)
3288 return t->to_supports_disable_randomization (t);
3289
3290 return 0;
3291 }
3292
3293 char *
3294 target_get_osdata (const char *type)
3295 {
3296 struct target_ops *t;
3297
3298 /* If we're already connected to something that can get us OS
3299 related data, use it. Otherwise, try using the native
3300 target. */
3301 if (current_target.to_stratum >= process_stratum)
3302 t = current_target.beneath;
3303 else
3304 t = find_default_run_target ("get OS data");
3305
3306 if (!t)
3307 return NULL;
3308
3309 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3310 }
3311
3312 /* Determine the current address space of thread PTID. */
3313
3314 struct address_space *
3315 target_thread_address_space (ptid_t ptid)
3316 {
3317 struct address_space *aspace;
3318 struct inferior *inf;
3319 struct target_ops *t;
3320
3321 for (t = current_target.beneath; t != NULL; t = t->beneath)
3322 {
3323 if (t->to_thread_address_space != NULL)
3324 {
3325 aspace = t->to_thread_address_space (t, ptid);
3326 gdb_assert (aspace);
3327
3328 if (targetdebug)
3329 fprintf_unfiltered (gdb_stdlog,
3330 "target_thread_address_space (%s) = %d\n",
3331 target_pid_to_str (ptid),
3332 address_space_num (aspace));
3333 return aspace;
3334 }
3335 }
3336
3337 /* Fall-back to the "main" address space of the inferior. */
3338 inf = find_inferior_pid (ptid_get_pid (ptid));
3339
3340 if (inf == NULL || inf->aspace == NULL)
3341 internal_error (__FILE__, __LINE__,
3342 _("Can't determine the current "
3343 "address space of thread %s\n"),
3344 target_pid_to_str (ptid));
3345
3346 return inf->aspace;
3347 }
3348
3349
3350 /* Target file operations. */
3351
3352 static struct target_ops *
3353 default_fileio_target (void)
3354 {
3355 /* If we're already connected to something that can perform
3356 file I/O, use it. Otherwise, try using the native target. */
3357 if (current_target.to_stratum >= process_stratum)
3358 return current_target.beneath;
3359 else
3360 return find_default_run_target ("file I/O");
3361 }
3362
3363 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3364 target file descriptor, or -1 if an error occurs (and set
3365 *TARGET_ERRNO). */
3366 int
3367 target_fileio_open (const char *filename, int flags, int mode,
3368 int *target_errno)
3369 {
3370 struct target_ops *t;
3371
3372 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3373 {
3374 if (t->to_fileio_open != NULL)
3375 {
3376 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3377
3378 if (targetdebug)
3379 fprintf_unfiltered (gdb_stdlog,
3380 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3381 filename, flags, mode,
3382 fd, fd != -1 ? 0 : *target_errno);
3383 return fd;
3384 }
3385 }
3386
3387 *target_errno = FILEIO_ENOSYS;
3388 return -1;
3389 }
3390
3391 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3392 Return the number of bytes written, or -1 if an error occurs
3393 (and set *TARGET_ERRNO). */
3394 int
3395 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3396 ULONGEST offset, int *target_errno)
3397 {
3398 struct target_ops *t;
3399
3400 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3401 {
3402 if (t->to_fileio_pwrite != NULL)
3403 {
3404 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3405 target_errno);
3406
3407 if (targetdebug)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "target_fileio_pwrite (%d,...,%d,%s) "
3410 "= %d (%d)\n",
3411 fd, len, pulongest (offset),
3412 ret, ret != -1 ? 0 : *target_errno);
3413 return ret;
3414 }
3415 }
3416
3417 *target_errno = FILEIO_ENOSYS;
3418 return -1;
3419 }
3420
3421 /* Read up to LEN bytes FD on the target into READ_BUF.
3422 Return the number of bytes read, or -1 if an error occurs
3423 (and set *TARGET_ERRNO). */
3424 int
3425 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3426 ULONGEST offset, int *target_errno)
3427 {
3428 struct target_ops *t;
3429
3430 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3431 {
3432 if (t->to_fileio_pread != NULL)
3433 {
3434 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3435 target_errno);
3436
3437 if (targetdebug)
3438 fprintf_unfiltered (gdb_stdlog,
3439 "target_fileio_pread (%d,...,%d,%s) "
3440 "= %d (%d)\n",
3441 fd, len, pulongest (offset),
3442 ret, ret != -1 ? 0 : *target_errno);
3443 return ret;
3444 }
3445 }
3446
3447 *target_errno = FILEIO_ENOSYS;
3448 return -1;
3449 }
3450
3451 /* Close FD on the target. Return 0, or -1 if an error occurs
3452 (and set *TARGET_ERRNO). */
3453 int
3454 target_fileio_close (int fd, int *target_errno)
3455 {
3456 struct target_ops *t;
3457
3458 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3459 {
3460 if (t->to_fileio_close != NULL)
3461 {
3462 int ret = t->to_fileio_close (t, fd, target_errno);
3463
3464 if (targetdebug)
3465 fprintf_unfiltered (gdb_stdlog,
3466 "target_fileio_close (%d) = %d (%d)\n",
3467 fd, ret, ret != -1 ? 0 : *target_errno);
3468 return ret;
3469 }
3470 }
3471
3472 *target_errno = FILEIO_ENOSYS;
3473 return -1;
3474 }
3475
3476 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3477 occurs (and set *TARGET_ERRNO). */
3478 int
3479 target_fileio_unlink (const char *filename, int *target_errno)
3480 {
3481 struct target_ops *t;
3482
3483 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3484 {
3485 if (t->to_fileio_unlink != NULL)
3486 {
3487 int ret = t->to_fileio_unlink (t, filename, target_errno);
3488
3489 if (targetdebug)
3490 fprintf_unfiltered (gdb_stdlog,
3491 "target_fileio_unlink (%s) = %d (%d)\n",
3492 filename, ret, ret != -1 ? 0 : *target_errno);
3493 return ret;
3494 }
3495 }
3496
3497 *target_errno = FILEIO_ENOSYS;
3498 return -1;
3499 }
3500
3501 /* Read value of symbolic link FILENAME on the target. Return a
3502 null-terminated string allocated via xmalloc, or NULL if an error
3503 occurs (and set *TARGET_ERRNO). */
3504 char *
3505 target_fileio_readlink (const char *filename, int *target_errno)
3506 {
3507 struct target_ops *t;
3508
3509 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3510 {
3511 if (t->to_fileio_readlink != NULL)
3512 {
3513 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3514
3515 if (targetdebug)
3516 fprintf_unfiltered (gdb_stdlog,
3517 "target_fileio_readlink (%s) = %s (%d)\n",
3518 filename, ret? ret : "(nil)",
3519 ret? 0 : *target_errno);
3520 return ret;
3521 }
3522 }
3523
3524 *target_errno = FILEIO_ENOSYS;
3525 return NULL;
3526 }
3527
3528 static void
3529 target_fileio_close_cleanup (void *opaque)
3530 {
3531 int fd = *(int *) opaque;
3532 int target_errno;
3533
3534 target_fileio_close (fd, &target_errno);
3535 }
3536
3537 /* Read target file FILENAME. Store the result in *BUF_P and
3538 return the size of the transferred data. PADDING additional bytes are
3539 available in *BUF_P. This is a helper function for
3540 target_fileio_read_alloc; see the declaration of that function for more
3541 information. */
3542
3543 static LONGEST
3544 target_fileio_read_alloc_1 (const char *filename,
3545 gdb_byte **buf_p, int padding)
3546 {
3547 struct cleanup *close_cleanup;
3548 size_t buf_alloc, buf_pos;
3549 gdb_byte *buf;
3550 LONGEST n;
3551 int fd;
3552 int target_errno;
3553
3554 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3555 if (fd == -1)
3556 return -1;
3557
3558 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3559
3560 /* Start by reading up to 4K at a time. The target will throttle
3561 this number down if necessary. */
3562 buf_alloc = 4096;
3563 buf = xmalloc (buf_alloc);
3564 buf_pos = 0;
3565 while (1)
3566 {
3567 n = target_fileio_pread (fd, &buf[buf_pos],
3568 buf_alloc - buf_pos - padding, buf_pos,
3569 &target_errno);
3570 if (n < 0)
3571 {
3572 /* An error occurred. */
3573 do_cleanups (close_cleanup);
3574 xfree (buf);
3575 return -1;
3576 }
3577 else if (n == 0)
3578 {
3579 /* Read all there was. */
3580 do_cleanups (close_cleanup);
3581 if (buf_pos == 0)
3582 xfree (buf);
3583 else
3584 *buf_p = buf;
3585 return buf_pos;
3586 }
3587
3588 buf_pos += n;
3589
3590 /* If the buffer is filling up, expand it. */
3591 if (buf_alloc < buf_pos * 2)
3592 {
3593 buf_alloc *= 2;
3594 buf = xrealloc (buf, buf_alloc);
3595 }
3596
3597 QUIT;
3598 }
3599 }
3600
3601 /* Read target file FILENAME. Store the result in *BUF_P and return
3602 the size of the transferred data. See the declaration in "target.h"
3603 function for more information about the return value. */
3604
3605 LONGEST
3606 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3607 {
3608 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3609 }
3610
3611 /* Read target file FILENAME. The result is NUL-terminated and
3612 returned as a string, allocated using xmalloc. If an error occurs
3613 or the transfer is unsupported, NULL is returned. Empty objects
3614 are returned as allocated but empty strings. A warning is issued
3615 if the result contains any embedded NUL bytes. */
3616
3617 char *
3618 target_fileio_read_stralloc (const char *filename)
3619 {
3620 gdb_byte *buffer;
3621 char *bufstr;
3622 LONGEST i, transferred;
3623
3624 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3625 bufstr = (char *) buffer;
3626
3627 if (transferred < 0)
3628 return NULL;
3629
3630 if (transferred == 0)
3631 return xstrdup ("");
3632
3633 bufstr[transferred] = 0;
3634
3635 /* Check for embedded NUL bytes; but allow trailing NULs. */
3636 for (i = strlen (bufstr); i < transferred; i++)
3637 if (bufstr[i] != 0)
3638 {
3639 warning (_("target file %s "
3640 "contained unexpected null characters"),
3641 filename);
3642 break;
3643 }
3644
3645 return bufstr;
3646 }
3647
3648
3649 static int
3650 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3651 CORE_ADDR addr, int len)
3652 {
3653 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3654 }
3655
3656 static int
3657 default_watchpoint_addr_within_range (struct target_ops *target,
3658 CORE_ADDR addr,
3659 CORE_ADDR start, int length)
3660 {
3661 return addr >= start && addr < start + length;
3662 }
3663
3664 static struct gdbarch *
3665 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3666 {
3667 return target_gdbarch ();
3668 }
3669
3670 static int
3671 return_zero (void)
3672 {
3673 return 0;
3674 }
3675
3676 static int
3677 return_one (void)
3678 {
3679 return 1;
3680 }
3681
3682 static int
3683 return_minus_one (void)
3684 {
3685 return -1;
3686 }
3687
3688 static void *
3689 return_null (void)
3690 {
3691 return 0;
3692 }
3693
3694 /*
3695 * Find the next target down the stack from the specified target.
3696 */
3697
3698 struct target_ops *
3699 find_target_beneath (struct target_ops *t)
3700 {
3701 return t->beneath;
3702 }
3703
3704 /* See target.h. */
3705
3706 struct target_ops *
3707 find_target_at (enum strata stratum)
3708 {
3709 struct target_ops *t;
3710
3711 for (t = current_target.beneath; t != NULL; t = t->beneath)
3712 if (t->to_stratum == stratum)
3713 return t;
3714
3715 return NULL;
3716 }
3717
3718 \f
3719 /* The inferior process has died. Long live the inferior! */
3720
3721 void
3722 generic_mourn_inferior (void)
3723 {
3724 ptid_t ptid;
3725
3726 ptid = inferior_ptid;
3727 inferior_ptid = null_ptid;
3728
3729 /* Mark breakpoints uninserted in case something tries to delete a
3730 breakpoint while we delete the inferior's threads (which would
3731 fail, since the inferior is long gone). */
3732 mark_breakpoints_out ();
3733
3734 if (!ptid_equal (ptid, null_ptid))
3735 {
3736 int pid = ptid_get_pid (ptid);
3737 exit_inferior (pid);
3738 }
3739
3740 /* Note this wipes step-resume breakpoints, so needs to be done
3741 after exit_inferior, which ends up referencing the step-resume
3742 breakpoints through clear_thread_inferior_resources. */
3743 breakpoint_init_inferior (inf_exited);
3744
3745 registers_changed ();
3746
3747 reopen_exec_file ();
3748 reinit_frame_cache ();
3749
3750 if (deprecated_detach_hook)
3751 deprecated_detach_hook ();
3752 }
3753 \f
3754 /* Convert a normal process ID to a string. Returns the string in a
3755 static buffer. */
3756
3757 char *
3758 normal_pid_to_str (ptid_t ptid)
3759 {
3760 static char buf[32];
3761
3762 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3763 return buf;
3764 }
3765
3766 static char *
3767 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3768 {
3769 return normal_pid_to_str (ptid);
3770 }
3771
3772 /* Error-catcher for target_find_memory_regions. */
3773 static int
3774 dummy_find_memory_regions (struct target_ops *self,
3775 find_memory_region_ftype ignore1, void *ignore2)
3776 {
3777 error (_("Command not implemented for this target."));
3778 return 0;
3779 }
3780
3781 /* Error-catcher for target_make_corefile_notes. */
3782 static char *
3783 dummy_make_corefile_notes (struct target_ops *self,
3784 bfd *ignore1, int *ignore2)
3785 {
3786 error (_("Command not implemented for this target."));
3787 return NULL;
3788 }
3789
3790 /* Error-catcher for target_get_bookmark. */
3791 static gdb_byte *
3792 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3793 {
3794 tcomplain ();
3795 return NULL;
3796 }
3797
3798 /* Error-catcher for target_goto_bookmark. */
3799 static void
3800 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3801 {
3802 tcomplain ();
3803 }
3804
3805 /* Set up the handful of non-empty slots needed by the dummy target
3806 vector. */
3807
3808 static void
3809 init_dummy_target (void)
3810 {
3811 dummy_target.to_shortname = "None";
3812 dummy_target.to_longname = "None";
3813 dummy_target.to_doc = "";
3814 dummy_target.to_attach = find_default_attach;
3815 dummy_target.to_detach =
3816 (void (*)(struct target_ops *, const char *, int))target_ignore;
3817 dummy_target.to_create_inferior = find_default_create_inferior;
3818 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3819 dummy_target.to_supports_disable_randomization
3820 = find_default_supports_disable_randomization;
3821 dummy_target.to_pid_to_str = dummy_pid_to_str;
3822 dummy_target.to_stratum = dummy_stratum;
3823 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3824 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3825 dummy_target.to_get_bookmark = dummy_get_bookmark;
3826 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3827 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3828 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3829 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3830 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3831 dummy_target.to_has_execution
3832 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3833 dummy_target.to_magic = OPS_MAGIC;
3834
3835 install_dummy_methods (&dummy_target);
3836 }
3837 \f
3838 static void
3839 debug_to_open (char *args, int from_tty)
3840 {
3841 debug_target.to_open (args, from_tty);
3842
3843 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3844 }
3845
3846 void
3847 target_close (struct target_ops *targ)
3848 {
3849 gdb_assert (!target_is_pushed (targ));
3850
3851 if (targ->to_xclose != NULL)
3852 targ->to_xclose (targ);
3853 else if (targ->to_close != NULL)
3854 targ->to_close (targ);
3855
3856 if (targetdebug)
3857 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3858 }
3859
3860 void
3861 target_attach (char *args, int from_tty)
3862 {
3863 struct target_ops *t;
3864
3865 for (t = current_target.beneath; t != NULL; t = t->beneath)
3866 {
3867 if (t->to_attach != NULL)
3868 {
3869 t->to_attach (t, args, from_tty);
3870 if (targetdebug)
3871 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3872 args, from_tty);
3873 return;
3874 }
3875 }
3876
3877 internal_error (__FILE__, __LINE__,
3878 _("could not find a target to attach"));
3879 }
3880
3881 int
3882 target_thread_alive (ptid_t ptid)
3883 {
3884 struct target_ops *t;
3885
3886 for (t = current_target.beneath; t != NULL; t = t->beneath)
3887 {
3888 if (t->to_thread_alive != NULL)
3889 {
3890 int retval;
3891
3892 retval = t->to_thread_alive (t, ptid);
3893 if (targetdebug)
3894 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3895 ptid_get_pid (ptid), retval);
3896
3897 return retval;
3898 }
3899 }
3900
3901 return 0;
3902 }
3903
3904 void
3905 target_find_new_threads (void)
3906 {
3907 struct target_ops *t;
3908
3909 for (t = current_target.beneath; t != NULL; t = t->beneath)
3910 {
3911 if (t->to_find_new_threads != NULL)
3912 {
3913 t->to_find_new_threads (t);
3914 if (targetdebug)
3915 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3916
3917 return;
3918 }
3919 }
3920 }
3921
3922 void
3923 target_stop (ptid_t ptid)
3924 {
3925 if (!may_stop)
3926 {
3927 warning (_("May not interrupt or stop the target, ignoring attempt"));
3928 return;
3929 }
3930
3931 (*current_target.to_stop) (&current_target, ptid);
3932 }
3933
3934 static void
3935 debug_to_post_attach (struct target_ops *self, int pid)
3936 {
3937 debug_target.to_post_attach (&debug_target, pid);
3938
3939 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3940 }
3941
3942 /* Concatenate ELEM to LIST, a comma separate list, and return the
3943 result. The LIST incoming argument is released. */
3944
3945 static char *
3946 str_comma_list_concat_elem (char *list, const char *elem)
3947 {
3948 if (list == NULL)
3949 return xstrdup (elem);
3950 else
3951 return reconcat (list, list, ", ", elem, (char *) NULL);
3952 }
3953
3954 /* Helper for target_options_to_string. If OPT is present in
3955 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3956 Returns the new resulting string. OPT is removed from
3957 TARGET_OPTIONS. */
3958
3959 static char *
3960 do_option (int *target_options, char *ret,
3961 int opt, char *opt_str)
3962 {
3963 if ((*target_options & opt) != 0)
3964 {
3965 ret = str_comma_list_concat_elem (ret, opt_str);
3966 *target_options &= ~opt;
3967 }
3968
3969 return ret;
3970 }
3971
3972 char *
3973 target_options_to_string (int target_options)
3974 {
3975 char *ret = NULL;
3976
3977 #define DO_TARG_OPTION(OPT) \
3978 ret = do_option (&target_options, ret, OPT, #OPT)
3979
3980 DO_TARG_OPTION (TARGET_WNOHANG);
3981
3982 if (target_options != 0)
3983 ret = str_comma_list_concat_elem (ret, "unknown???");
3984
3985 if (ret == NULL)
3986 ret = xstrdup ("");
3987 return ret;
3988 }
3989
3990 static void
3991 debug_print_register (const char * func,
3992 struct regcache *regcache, int regno)
3993 {
3994 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3995
3996 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3997 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3998 && gdbarch_register_name (gdbarch, regno) != NULL
3999 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
4000 fprintf_unfiltered (gdb_stdlog, "(%s)",
4001 gdbarch_register_name (gdbarch, regno));
4002 else
4003 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
4004 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
4005 {
4006 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4007 int i, size = register_size (gdbarch, regno);
4008 gdb_byte buf[MAX_REGISTER_SIZE];
4009
4010 regcache_raw_collect (regcache, regno, buf);
4011 fprintf_unfiltered (gdb_stdlog, " = ");
4012 for (i = 0; i < size; i++)
4013 {
4014 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4015 }
4016 if (size <= sizeof (LONGEST))
4017 {
4018 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4019
4020 fprintf_unfiltered (gdb_stdlog, " %s %s",
4021 core_addr_to_string_nz (val), plongest (val));
4022 }
4023 }
4024 fprintf_unfiltered (gdb_stdlog, "\n");
4025 }
4026
4027 void
4028 target_fetch_registers (struct regcache *regcache, int regno)
4029 {
4030 struct target_ops *t;
4031
4032 for (t = current_target.beneath; t != NULL; t = t->beneath)
4033 {
4034 if (t->to_fetch_registers != NULL)
4035 {
4036 t->to_fetch_registers (t, regcache, regno);
4037 if (targetdebug)
4038 debug_print_register ("target_fetch_registers", regcache, regno);
4039 return;
4040 }
4041 }
4042 }
4043
4044 void
4045 target_store_registers (struct regcache *regcache, int regno)
4046 {
4047 struct target_ops *t;
4048
4049 if (!may_write_registers)
4050 error (_("Writing to registers is not allowed (regno %d)"), regno);
4051
4052 current_target.to_store_registers (&current_target, regcache, regno);
4053 if (targetdebug)
4054 {
4055 debug_print_register ("target_store_registers", regcache, regno);
4056 }
4057 }
4058
4059 int
4060 target_core_of_thread (ptid_t ptid)
4061 {
4062 struct target_ops *t;
4063
4064 for (t = current_target.beneath; t != NULL; t = t->beneath)
4065 {
4066 if (t->to_core_of_thread != NULL)
4067 {
4068 int retval = t->to_core_of_thread (t, ptid);
4069
4070 if (targetdebug)
4071 fprintf_unfiltered (gdb_stdlog,
4072 "target_core_of_thread (%d) = %d\n",
4073 ptid_get_pid (ptid), retval);
4074 return retval;
4075 }
4076 }
4077
4078 return -1;
4079 }
4080
4081 int
4082 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4083 {
4084 struct target_ops *t;
4085
4086 for (t = current_target.beneath; t != NULL; t = t->beneath)
4087 {
4088 if (t->to_verify_memory != NULL)
4089 {
4090 int retval = t->to_verify_memory (t, data, memaddr, size);
4091
4092 if (targetdebug)
4093 fprintf_unfiltered (gdb_stdlog,
4094 "target_verify_memory (%s, %s) = %d\n",
4095 paddress (target_gdbarch (), memaddr),
4096 pulongest (size),
4097 retval);
4098 return retval;
4099 }
4100 }
4101
4102 tcomplain ();
4103 }
4104
4105 /* The documentation for this function is in its prototype declaration in
4106 target.h. */
4107
4108 int
4109 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4110 {
4111 struct target_ops *t;
4112
4113 for (t = current_target.beneath; t != NULL; t = t->beneath)
4114 if (t->to_insert_mask_watchpoint != NULL)
4115 {
4116 int ret;
4117
4118 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4119
4120 if (targetdebug)
4121 fprintf_unfiltered (gdb_stdlog, "\
4122 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4123 core_addr_to_string (addr),
4124 core_addr_to_string (mask), rw, ret);
4125
4126 return ret;
4127 }
4128
4129 return 1;
4130 }
4131
4132 /* The documentation for this function is in its prototype declaration in
4133 target.h. */
4134
4135 int
4136 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4137 {
4138 struct target_ops *t;
4139
4140 for (t = current_target.beneath; t != NULL; t = t->beneath)
4141 if (t->to_remove_mask_watchpoint != NULL)
4142 {
4143 int ret;
4144
4145 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4146
4147 if (targetdebug)
4148 fprintf_unfiltered (gdb_stdlog, "\
4149 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4150 core_addr_to_string (addr),
4151 core_addr_to_string (mask), rw, ret);
4152
4153 return ret;
4154 }
4155
4156 return 1;
4157 }
4158
4159 /* The documentation for this function is in its prototype declaration
4160 in target.h. */
4161
4162 int
4163 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4164 {
4165 struct target_ops *t;
4166
4167 for (t = current_target.beneath; t != NULL; t = t->beneath)
4168 if (t->to_masked_watch_num_registers != NULL)
4169 return t->to_masked_watch_num_registers (t, addr, mask);
4170
4171 return -1;
4172 }
4173
4174 /* The documentation for this function is in its prototype declaration
4175 in target.h. */
4176
4177 int
4178 target_ranged_break_num_registers (void)
4179 {
4180 struct target_ops *t;
4181
4182 for (t = current_target.beneath; t != NULL; t = t->beneath)
4183 if (t->to_ranged_break_num_registers != NULL)
4184 return t->to_ranged_break_num_registers (t);
4185
4186 return -1;
4187 }
4188
4189 /* See target.h. */
4190
4191 struct btrace_target_info *
4192 target_enable_btrace (ptid_t ptid)
4193 {
4194 struct target_ops *t;
4195
4196 for (t = current_target.beneath; t != NULL; t = t->beneath)
4197 if (t->to_enable_btrace != NULL)
4198 return t->to_enable_btrace (ptid);
4199
4200 tcomplain ();
4201 return NULL;
4202 }
4203
4204 /* See target.h. */
4205
4206 void
4207 target_disable_btrace (struct btrace_target_info *btinfo)
4208 {
4209 struct target_ops *t;
4210
4211 for (t = current_target.beneath; t != NULL; t = t->beneath)
4212 if (t->to_disable_btrace != NULL)
4213 {
4214 t->to_disable_btrace (btinfo);
4215 return;
4216 }
4217
4218 tcomplain ();
4219 }
4220
4221 /* See target.h. */
4222
4223 void
4224 target_teardown_btrace (struct btrace_target_info *btinfo)
4225 {
4226 struct target_ops *t;
4227
4228 for (t = current_target.beneath; t != NULL; t = t->beneath)
4229 if (t->to_teardown_btrace != NULL)
4230 {
4231 t->to_teardown_btrace (btinfo);
4232 return;
4233 }
4234
4235 tcomplain ();
4236 }
4237
4238 /* See target.h. */
4239
4240 enum btrace_error
4241 target_read_btrace (VEC (btrace_block_s) **btrace,
4242 struct btrace_target_info *btinfo,
4243 enum btrace_read_type type)
4244 {
4245 struct target_ops *t;
4246
4247 for (t = current_target.beneath; t != NULL; t = t->beneath)
4248 if (t->to_read_btrace != NULL)
4249 return t->to_read_btrace (btrace, btinfo, type);
4250
4251 tcomplain ();
4252 return BTRACE_ERR_NOT_SUPPORTED;
4253 }
4254
4255 /* See target.h. */
4256
4257 void
4258 target_stop_recording (void)
4259 {
4260 struct target_ops *t;
4261
4262 for (t = current_target.beneath; t != NULL; t = t->beneath)
4263 if (t->to_stop_recording != NULL)
4264 {
4265 t->to_stop_recording ();
4266 return;
4267 }
4268
4269 /* This is optional. */
4270 }
4271
4272 /* See target.h. */
4273
4274 void
4275 target_info_record (void)
4276 {
4277 struct target_ops *t;
4278
4279 for (t = current_target.beneath; t != NULL; t = t->beneath)
4280 if (t->to_info_record != NULL)
4281 {
4282 t->to_info_record ();
4283 return;
4284 }
4285
4286 tcomplain ();
4287 }
4288
4289 /* See target.h. */
4290
4291 void
4292 target_save_record (const char *filename)
4293 {
4294 struct target_ops *t;
4295
4296 for (t = current_target.beneath; t != NULL; t = t->beneath)
4297 if (t->to_save_record != NULL)
4298 {
4299 t->to_save_record (filename);
4300 return;
4301 }
4302
4303 tcomplain ();
4304 }
4305
4306 /* See target.h. */
4307
4308 int
4309 target_supports_delete_record (void)
4310 {
4311 struct target_ops *t;
4312
4313 for (t = current_target.beneath; t != NULL; t = t->beneath)
4314 if (t->to_delete_record != NULL)
4315 return 1;
4316
4317 return 0;
4318 }
4319
4320 /* See target.h. */
4321
4322 void
4323 target_delete_record (void)
4324 {
4325 struct target_ops *t;
4326
4327 for (t = current_target.beneath; t != NULL; t = t->beneath)
4328 if (t->to_delete_record != NULL)
4329 {
4330 t->to_delete_record ();
4331 return;
4332 }
4333
4334 tcomplain ();
4335 }
4336
4337 /* See target.h. */
4338
4339 int
4340 target_record_is_replaying (void)
4341 {
4342 struct target_ops *t;
4343
4344 for (t = current_target.beneath; t != NULL; t = t->beneath)
4345 if (t->to_record_is_replaying != NULL)
4346 return t->to_record_is_replaying ();
4347
4348 return 0;
4349 }
4350
4351 /* See target.h. */
4352
4353 void
4354 target_goto_record_begin (void)
4355 {
4356 struct target_ops *t;
4357
4358 for (t = current_target.beneath; t != NULL; t = t->beneath)
4359 if (t->to_goto_record_begin != NULL)
4360 {
4361 t->to_goto_record_begin ();
4362 return;
4363 }
4364
4365 tcomplain ();
4366 }
4367
4368 /* See target.h. */
4369
4370 void
4371 target_goto_record_end (void)
4372 {
4373 struct target_ops *t;
4374
4375 for (t = current_target.beneath; t != NULL; t = t->beneath)
4376 if (t->to_goto_record_end != NULL)
4377 {
4378 t->to_goto_record_end ();
4379 return;
4380 }
4381
4382 tcomplain ();
4383 }
4384
4385 /* See target.h. */
4386
4387 void
4388 target_goto_record (ULONGEST insn)
4389 {
4390 struct target_ops *t;
4391
4392 for (t = current_target.beneath; t != NULL; t = t->beneath)
4393 if (t->to_goto_record != NULL)
4394 {
4395 t->to_goto_record (insn);
4396 return;
4397 }
4398
4399 tcomplain ();
4400 }
4401
4402 /* See target.h. */
4403
4404 void
4405 target_insn_history (int size, int flags)
4406 {
4407 struct target_ops *t;
4408
4409 for (t = current_target.beneath; t != NULL; t = t->beneath)
4410 if (t->to_insn_history != NULL)
4411 {
4412 t->to_insn_history (size, flags);
4413 return;
4414 }
4415
4416 tcomplain ();
4417 }
4418
4419 /* See target.h. */
4420
4421 void
4422 target_insn_history_from (ULONGEST from, int size, int flags)
4423 {
4424 struct target_ops *t;
4425
4426 for (t = current_target.beneath; t != NULL; t = t->beneath)
4427 if (t->to_insn_history_from != NULL)
4428 {
4429 t->to_insn_history_from (from, size, flags);
4430 return;
4431 }
4432
4433 tcomplain ();
4434 }
4435
4436 /* See target.h. */
4437
4438 void
4439 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4440 {
4441 struct target_ops *t;
4442
4443 for (t = current_target.beneath; t != NULL; t = t->beneath)
4444 if (t->to_insn_history_range != NULL)
4445 {
4446 t->to_insn_history_range (begin, end, flags);
4447 return;
4448 }
4449
4450 tcomplain ();
4451 }
4452
4453 /* See target.h. */
4454
4455 void
4456 target_call_history (int size, int flags)
4457 {
4458 struct target_ops *t;
4459
4460 for (t = current_target.beneath; t != NULL; t = t->beneath)
4461 if (t->to_call_history != NULL)
4462 {
4463 t->to_call_history (size, flags);
4464 return;
4465 }
4466
4467 tcomplain ();
4468 }
4469
4470 /* See target.h. */
4471
4472 void
4473 target_call_history_from (ULONGEST begin, int size, int flags)
4474 {
4475 struct target_ops *t;
4476
4477 for (t = current_target.beneath; t != NULL; t = t->beneath)
4478 if (t->to_call_history_from != NULL)
4479 {
4480 t->to_call_history_from (begin, size, flags);
4481 return;
4482 }
4483
4484 tcomplain ();
4485 }
4486
4487 /* See target.h. */
4488
4489 void
4490 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4491 {
4492 struct target_ops *t;
4493
4494 for (t = current_target.beneath; t != NULL; t = t->beneath)
4495 if (t->to_call_history_range != NULL)
4496 {
4497 t->to_call_history_range (begin, end, flags);
4498 return;
4499 }
4500
4501 tcomplain ();
4502 }
4503
4504 static void
4505 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4506 {
4507 debug_target.to_prepare_to_store (&debug_target, regcache);
4508
4509 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4510 }
4511
4512 /* See target.h. */
4513
4514 const struct frame_unwind *
4515 target_get_unwinder (void)
4516 {
4517 struct target_ops *t;
4518
4519 for (t = current_target.beneath; t != NULL; t = t->beneath)
4520 if (t->to_get_unwinder != NULL)
4521 return t->to_get_unwinder;
4522
4523 return NULL;
4524 }
4525
4526 /* See target.h. */
4527
4528 const struct frame_unwind *
4529 target_get_tailcall_unwinder (void)
4530 {
4531 struct target_ops *t;
4532
4533 for (t = current_target.beneath; t != NULL; t = t->beneath)
4534 if (t->to_get_tailcall_unwinder != NULL)
4535 return t->to_get_tailcall_unwinder;
4536
4537 return NULL;
4538 }
4539
4540 /* See target.h. */
4541
4542 CORE_ADDR
4543 forward_target_decr_pc_after_break (struct target_ops *ops,
4544 struct gdbarch *gdbarch)
4545 {
4546 for (; ops != NULL; ops = ops->beneath)
4547 if (ops->to_decr_pc_after_break != NULL)
4548 return ops->to_decr_pc_after_break (ops, gdbarch);
4549
4550 return gdbarch_decr_pc_after_break (gdbarch);
4551 }
4552
4553 /* See target.h. */
4554
4555 CORE_ADDR
4556 target_decr_pc_after_break (struct gdbarch *gdbarch)
4557 {
4558 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4559 }
4560
4561 static int
4562 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4563 int write, struct mem_attrib *attrib,
4564 struct target_ops *target)
4565 {
4566 int retval;
4567
4568 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4569 attrib, target);
4570
4571 fprintf_unfiltered (gdb_stdlog,
4572 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4573 paddress (target_gdbarch (), memaddr), len,
4574 write ? "write" : "read", retval);
4575
4576 if (retval > 0)
4577 {
4578 int i;
4579
4580 fputs_unfiltered (", bytes =", gdb_stdlog);
4581 for (i = 0; i < retval; i++)
4582 {
4583 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4584 {
4585 if (targetdebug < 2 && i > 0)
4586 {
4587 fprintf_unfiltered (gdb_stdlog, " ...");
4588 break;
4589 }
4590 fprintf_unfiltered (gdb_stdlog, "\n");
4591 }
4592
4593 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4594 }
4595 }
4596
4597 fputc_unfiltered ('\n', gdb_stdlog);
4598
4599 return retval;
4600 }
4601
4602 static void
4603 debug_to_files_info (struct target_ops *target)
4604 {
4605 debug_target.to_files_info (target);
4606
4607 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4608 }
4609
4610 static int
4611 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4612 struct bp_target_info *bp_tgt)
4613 {
4614 int retval;
4615
4616 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4617
4618 fprintf_unfiltered (gdb_stdlog,
4619 "target_insert_breakpoint (%s, xxx) = %ld\n",
4620 core_addr_to_string (bp_tgt->placed_address),
4621 (unsigned long) retval);
4622 return retval;
4623 }
4624
4625 static int
4626 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4627 struct bp_target_info *bp_tgt)
4628 {
4629 int retval;
4630
4631 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4632
4633 fprintf_unfiltered (gdb_stdlog,
4634 "target_remove_breakpoint (%s, xxx) = %ld\n",
4635 core_addr_to_string (bp_tgt->placed_address),
4636 (unsigned long) retval);
4637 return retval;
4638 }
4639
4640 static int
4641 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4642 int type, int cnt, int from_tty)
4643 {
4644 int retval;
4645
4646 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4647 type, cnt, from_tty);
4648
4649 fprintf_unfiltered (gdb_stdlog,
4650 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4651 (unsigned long) type,
4652 (unsigned long) cnt,
4653 (unsigned long) from_tty,
4654 (unsigned long) retval);
4655 return retval;
4656 }
4657
4658 static int
4659 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4660 CORE_ADDR addr, int len)
4661 {
4662 CORE_ADDR retval;
4663
4664 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4665 addr, len);
4666
4667 fprintf_unfiltered (gdb_stdlog,
4668 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4669 core_addr_to_string (addr), (unsigned long) len,
4670 core_addr_to_string (retval));
4671 return retval;
4672 }
4673
4674 static int
4675 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4676 CORE_ADDR addr, int len, int rw,
4677 struct expression *cond)
4678 {
4679 int retval;
4680
4681 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4682 addr, len,
4683 rw, cond);
4684
4685 fprintf_unfiltered (gdb_stdlog,
4686 "target_can_accel_watchpoint_condition "
4687 "(%s, %d, %d, %s) = %ld\n",
4688 core_addr_to_string (addr), len, rw,
4689 host_address_to_string (cond), (unsigned long) retval);
4690 return retval;
4691 }
4692
4693 static int
4694 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4695 {
4696 int retval;
4697
4698 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4699
4700 fprintf_unfiltered (gdb_stdlog,
4701 "target_stopped_by_watchpoint () = %ld\n",
4702 (unsigned long) retval);
4703 return retval;
4704 }
4705
4706 static int
4707 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4708 {
4709 int retval;
4710
4711 retval = debug_target.to_stopped_data_address (target, addr);
4712
4713 fprintf_unfiltered (gdb_stdlog,
4714 "target_stopped_data_address ([%s]) = %ld\n",
4715 core_addr_to_string (*addr),
4716 (unsigned long)retval);
4717 return retval;
4718 }
4719
4720 static int
4721 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4722 CORE_ADDR addr,
4723 CORE_ADDR start, int length)
4724 {
4725 int retval;
4726
4727 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4728 start, length);
4729
4730 fprintf_filtered (gdb_stdlog,
4731 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4732 core_addr_to_string (addr), core_addr_to_string (start),
4733 length, retval);
4734 return retval;
4735 }
4736
4737 static int
4738 debug_to_insert_hw_breakpoint (struct target_ops *self,
4739 struct gdbarch *gdbarch,
4740 struct bp_target_info *bp_tgt)
4741 {
4742 int retval;
4743
4744 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4745 gdbarch, bp_tgt);
4746
4747 fprintf_unfiltered (gdb_stdlog,
4748 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4749 core_addr_to_string (bp_tgt->placed_address),
4750 (unsigned long) retval);
4751 return retval;
4752 }
4753
4754 static int
4755 debug_to_remove_hw_breakpoint (struct target_ops *self,
4756 struct gdbarch *gdbarch,
4757 struct bp_target_info *bp_tgt)
4758 {
4759 int retval;
4760
4761 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4762 gdbarch, bp_tgt);
4763
4764 fprintf_unfiltered (gdb_stdlog,
4765 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4766 core_addr_to_string (bp_tgt->placed_address),
4767 (unsigned long) retval);
4768 return retval;
4769 }
4770
4771 static int
4772 debug_to_insert_watchpoint (struct target_ops *self,
4773 CORE_ADDR addr, int len, int type,
4774 struct expression *cond)
4775 {
4776 int retval;
4777
4778 retval = debug_target.to_insert_watchpoint (&debug_target,
4779 addr, len, type, cond);
4780
4781 fprintf_unfiltered (gdb_stdlog,
4782 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4783 core_addr_to_string (addr), len, type,
4784 host_address_to_string (cond), (unsigned long) retval);
4785 return retval;
4786 }
4787
4788 static int
4789 debug_to_remove_watchpoint (struct target_ops *self,
4790 CORE_ADDR addr, int len, int type,
4791 struct expression *cond)
4792 {
4793 int retval;
4794
4795 retval = debug_target.to_remove_watchpoint (&debug_target,
4796 addr, len, type, cond);
4797
4798 fprintf_unfiltered (gdb_stdlog,
4799 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4800 core_addr_to_string (addr), len, type,
4801 host_address_to_string (cond), (unsigned long) retval);
4802 return retval;
4803 }
4804
4805 static void
4806 debug_to_terminal_init (struct target_ops *self)
4807 {
4808 debug_target.to_terminal_init (&debug_target);
4809
4810 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4811 }
4812
4813 static void
4814 debug_to_terminal_inferior (struct target_ops *self)
4815 {
4816 debug_target.to_terminal_inferior (&debug_target);
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4819 }
4820
4821 static void
4822 debug_to_terminal_ours_for_output (struct target_ops *self)
4823 {
4824 debug_target.to_terminal_ours_for_output (&debug_target);
4825
4826 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4827 }
4828
4829 static void
4830 debug_to_terminal_ours (struct target_ops *self)
4831 {
4832 debug_target.to_terminal_ours (&debug_target);
4833
4834 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4835 }
4836
4837 static void
4838 debug_to_terminal_save_ours (struct target_ops *self)
4839 {
4840 debug_target.to_terminal_save_ours (&debug_target);
4841
4842 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4843 }
4844
4845 static void
4846 debug_to_terminal_info (struct target_ops *self,
4847 const char *arg, int from_tty)
4848 {
4849 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4850
4851 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4852 from_tty);
4853 }
4854
4855 static void
4856 debug_to_load (struct target_ops *self, char *args, int from_tty)
4857 {
4858 debug_target.to_load (&debug_target, args, from_tty);
4859
4860 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4861 }
4862
4863 static void
4864 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4865 {
4866 debug_target.to_post_startup_inferior (&debug_target, ptid);
4867
4868 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4869 ptid_get_pid (ptid));
4870 }
4871
4872 static int
4873 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4874 {
4875 int retval;
4876
4877 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4878
4879 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4880 pid, retval);
4881
4882 return retval;
4883 }
4884
4885 static int
4886 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4887 {
4888 int retval;
4889
4890 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4891
4892 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4893 pid, retval);
4894
4895 return retval;
4896 }
4897
4898 static int
4899 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4900 {
4901 int retval;
4902
4903 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4904
4905 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4906 pid, retval);
4907
4908 return retval;
4909 }
4910
4911 static int
4912 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4913 {
4914 int retval;
4915
4916 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4917
4918 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4919 pid, retval);
4920
4921 return retval;
4922 }
4923
4924 static int
4925 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4926 {
4927 int retval;
4928
4929 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4930
4931 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4932 pid, retval);
4933
4934 return retval;
4935 }
4936
4937 static int
4938 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4939 {
4940 int retval;
4941
4942 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4943
4944 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4945 pid, retval);
4946
4947 return retval;
4948 }
4949
4950 static int
4951 debug_to_has_exited (struct target_ops *self,
4952 int pid, int wait_status, int *exit_status)
4953 {
4954 int has_exited;
4955
4956 has_exited = debug_target.to_has_exited (&debug_target,
4957 pid, wait_status, exit_status);
4958
4959 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4960 pid, wait_status, *exit_status, has_exited);
4961
4962 return has_exited;
4963 }
4964
4965 static int
4966 debug_to_can_run (struct target_ops *self)
4967 {
4968 int retval;
4969
4970 retval = debug_target.to_can_run (&debug_target);
4971
4972 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4973
4974 return retval;
4975 }
4976
4977 static struct gdbarch *
4978 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4979 {
4980 struct gdbarch *retval;
4981
4982 retval = debug_target.to_thread_architecture (ops, ptid);
4983
4984 fprintf_unfiltered (gdb_stdlog,
4985 "target_thread_architecture (%s) = %s [%s]\n",
4986 target_pid_to_str (ptid),
4987 host_address_to_string (retval),
4988 gdbarch_bfd_arch_info (retval)->printable_name);
4989 return retval;
4990 }
4991
4992 static void
4993 debug_to_stop (struct target_ops *self, ptid_t ptid)
4994 {
4995 debug_target.to_stop (&debug_target, ptid);
4996
4997 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4998 target_pid_to_str (ptid));
4999 }
5000
5001 static void
5002 debug_to_rcmd (struct target_ops *self, char *command,
5003 struct ui_file *outbuf)
5004 {
5005 debug_target.to_rcmd (&debug_target, command, outbuf);
5006 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
5007 }
5008
5009 static char *
5010 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
5011 {
5012 char *exec_file;
5013
5014 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
5015
5016 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
5017 pid, exec_file);
5018
5019 return exec_file;
5020 }
5021
5022 static void
5023 setup_target_debug (void)
5024 {
5025 memcpy (&debug_target, &current_target, sizeof debug_target);
5026
5027 current_target.to_open = debug_to_open;
5028 current_target.to_post_attach = debug_to_post_attach;
5029 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5030 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5031 current_target.to_files_info = debug_to_files_info;
5032 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5033 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5034 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5035 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5036 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5037 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5038 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5039 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5040 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5041 current_target.to_watchpoint_addr_within_range
5042 = debug_to_watchpoint_addr_within_range;
5043 current_target.to_region_ok_for_hw_watchpoint
5044 = debug_to_region_ok_for_hw_watchpoint;
5045 current_target.to_can_accel_watchpoint_condition
5046 = debug_to_can_accel_watchpoint_condition;
5047 current_target.to_terminal_init = debug_to_terminal_init;
5048 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5049 current_target.to_terminal_ours_for_output
5050 = debug_to_terminal_ours_for_output;
5051 current_target.to_terminal_ours = debug_to_terminal_ours;
5052 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5053 current_target.to_terminal_info = debug_to_terminal_info;
5054 current_target.to_load = debug_to_load;
5055 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5056 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5057 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5058 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5059 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5060 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5061 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5062 current_target.to_has_exited = debug_to_has_exited;
5063 current_target.to_can_run = debug_to_can_run;
5064 current_target.to_stop = debug_to_stop;
5065 current_target.to_rcmd = debug_to_rcmd;
5066 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5067 current_target.to_thread_architecture = debug_to_thread_architecture;
5068 }
5069 \f
5070
5071 static char targ_desc[] =
5072 "Names of targets and files being debugged.\nShows the entire \
5073 stack of targets currently in use (including the exec-file,\n\
5074 core-file, and process, if any), as well as the symbol file name.";
5075
5076 static void
5077 do_monitor_command (char *cmd,
5078 int from_tty)
5079 {
5080 if ((current_target.to_rcmd
5081 == (void (*) (struct target_ops *, char *, struct ui_file *)) tcomplain)
5082 || (current_target.to_rcmd == debug_to_rcmd
5083 && (debug_target.to_rcmd
5084 == (void (*) (struct target_ops *,
5085 char *, struct ui_file *)) tcomplain)))
5086 error (_("\"monitor\" command not supported by this target."));
5087 target_rcmd (cmd, gdb_stdtarg);
5088 }
5089
5090 /* Print the name of each layers of our target stack. */
5091
5092 static void
5093 maintenance_print_target_stack (char *cmd, int from_tty)
5094 {
5095 struct target_ops *t;
5096
5097 printf_filtered (_("The current target stack is:\n"));
5098
5099 for (t = target_stack; t != NULL; t = t->beneath)
5100 {
5101 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5102 }
5103 }
5104
5105 /* Controls if async mode is permitted. */
5106 int target_async_permitted = 0;
5107
5108 /* The set command writes to this variable. If the inferior is
5109 executing, target_async_permitted is *not* updated. */
5110 static int target_async_permitted_1 = 0;
5111
5112 static void
5113 set_target_async_command (char *args, int from_tty,
5114 struct cmd_list_element *c)
5115 {
5116 if (have_live_inferiors ())
5117 {
5118 target_async_permitted_1 = target_async_permitted;
5119 error (_("Cannot change this setting while the inferior is running."));
5120 }
5121
5122 target_async_permitted = target_async_permitted_1;
5123 }
5124
5125 static void
5126 show_target_async_command (struct ui_file *file, int from_tty,
5127 struct cmd_list_element *c,
5128 const char *value)
5129 {
5130 fprintf_filtered (file,
5131 _("Controlling the inferior in "
5132 "asynchronous mode is %s.\n"), value);
5133 }
5134
5135 /* Temporary copies of permission settings. */
5136
5137 static int may_write_registers_1 = 1;
5138 static int may_write_memory_1 = 1;
5139 static int may_insert_breakpoints_1 = 1;
5140 static int may_insert_tracepoints_1 = 1;
5141 static int may_insert_fast_tracepoints_1 = 1;
5142 static int may_stop_1 = 1;
5143
5144 /* Make the user-set values match the real values again. */
5145
5146 void
5147 update_target_permissions (void)
5148 {
5149 may_write_registers_1 = may_write_registers;
5150 may_write_memory_1 = may_write_memory;
5151 may_insert_breakpoints_1 = may_insert_breakpoints;
5152 may_insert_tracepoints_1 = may_insert_tracepoints;
5153 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5154 may_stop_1 = may_stop;
5155 }
5156
5157 /* The one function handles (most of) the permission flags in the same
5158 way. */
5159
5160 static void
5161 set_target_permissions (char *args, int from_tty,
5162 struct cmd_list_element *c)
5163 {
5164 if (target_has_execution)
5165 {
5166 update_target_permissions ();
5167 error (_("Cannot change this setting while the inferior is running."));
5168 }
5169
5170 /* Make the real values match the user-changed values. */
5171 may_write_registers = may_write_registers_1;
5172 may_insert_breakpoints = may_insert_breakpoints_1;
5173 may_insert_tracepoints = may_insert_tracepoints_1;
5174 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5175 may_stop = may_stop_1;
5176 update_observer_mode ();
5177 }
5178
5179 /* Set memory write permission independently of observer mode. */
5180
5181 static void
5182 set_write_memory_permission (char *args, int from_tty,
5183 struct cmd_list_element *c)
5184 {
5185 /* Make the real values match the user-changed values. */
5186 may_write_memory = may_write_memory_1;
5187 update_observer_mode ();
5188 }
5189
5190
5191 void
5192 initialize_targets (void)
5193 {
5194 init_dummy_target ();
5195 push_target (&dummy_target);
5196
5197 add_info ("target", target_info, targ_desc);
5198 add_info ("files", target_info, targ_desc);
5199
5200 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5201 Set target debugging."), _("\
5202 Show target debugging."), _("\
5203 When non-zero, target debugging is enabled. Higher numbers are more\n\
5204 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5205 command."),
5206 NULL,
5207 show_targetdebug,
5208 &setdebuglist, &showdebuglist);
5209
5210 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5211 &trust_readonly, _("\
5212 Set mode for reading from readonly sections."), _("\
5213 Show mode for reading from readonly sections."), _("\
5214 When this mode is on, memory reads from readonly sections (such as .text)\n\
5215 will be read from the object file instead of from the target. This will\n\
5216 result in significant performance improvement for remote targets."),
5217 NULL,
5218 show_trust_readonly,
5219 &setlist, &showlist);
5220
5221 add_com ("monitor", class_obscure, do_monitor_command,
5222 _("Send a command to the remote monitor (remote targets only)."));
5223
5224 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5225 _("Print the name of each layer of the internal target stack."),
5226 &maintenanceprintlist);
5227
5228 add_setshow_boolean_cmd ("target-async", no_class,
5229 &target_async_permitted_1, _("\
5230 Set whether gdb controls the inferior in asynchronous mode."), _("\
5231 Show whether gdb controls the inferior in asynchronous mode."), _("\
5232 Tells gdb whether to control the inferior in asynchronous mode."),
5233 set_target_async_command,
5234 show_target_async_command,
5235 &setlist,
5236 &showlist);
5237
5238 add_setshow_boolean_cmd ("may-write-registers", class_support,
5239 &may_write_registers_1, _("\
5240 Set permission to write into registers."), _("\
5241 Show permission to write into registers."), _("\
5242 When this permission is on, GDB may write into the target's registers.\n\
5243 Otherwise, any sort of write attempt will result in an error."),
5244 set_target_permissions, NULL,
5245 &setlist, &showlist);
5246
5247 add_setshow_boolean_cmd ("may-write-memory", class_support,
5248 &may_write_memory_1, _("\
5249 Set permission to write into target memory."), _("\
5250 Show permission to write into target memory."), _("\
5251 When this permission is on, GDB may write into the target's memory.\n\
5252 Otherwise, any sort of write attempt will result in an error."),
5253 set_write_memory_permission, NULL,
5254 &setlist, &showlist);
5255
5256 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5257 &may_insert_breakpoints_1, _("\
5258 Set permission to insert breakpoints in the target."), _("\
5259 Show permission to insert breakpoints in the target."), _("\
5260 When this permission is on, GDB may insert breakpoints in the program.\n\
5261 Otherwise, any sort of insertion attempt will result in an error."),
5262 set_target_permissions, NULL,
5263 &setlist, &showlist);
5264
5265 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5266 &may_insert_tracepoints_1, _("\
5267 Set permission to insert tracepoints in the target."), _("\
5268 Show permission to insert tracepoints in the target."), _("\
5269 When this permission is on, GDB may insert tracepoints in the program.\n\
5270 Otherwise, any sort of insertion attempt will result in an error."),
5271 set_target_permissions, NULL,
5272 &setlist, &showlist);
5273
5274 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5275 &may_insert_fast_tracepoints_1, _("\
5276 Set permission to insert fast tracepoints in the target."), _("\
5277 Show permission to insert fast tracepoints in the target."), _("\
5278 When this permission is on, GDB may insert fast tracepoints.\n\
5279 Otherwise, any sort of insertion attempt will result in an error."),
5280 set_target_permissions, NULL,
5281 &setlist, &showlist);
5282
5283 add_setshow_boolean_cmd ("may-interrupt", class_support,
5284 &may_stop_1, _("\
5285 Set permission to interrupt or signal the target."), _("\
5286 Show permission to interrupt or signal the target."), _("\
5287 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5288 Otherwise, any attempt to interrupt or stop will be ignored."),
5289 set_target_permissions, NULL,
5290 &setlist, &showlist);
5291 }
This page took 0.167545 seconds and 5 git commands to generate.