convert to_can_use_hw_breakpoint
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 static void *return_null (void);
71
72 void target_ignore (void);
73
74 static void target_command (char *, int);
75
76 static struct target_ops *find_default_run_target (char *);
77
78 static target_xfer_partial_ftype default_xfer_partial;
79
80 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
81 ptid_t ptid);
82
83 static int find_default_can_async_p (struct target_ops *ignore);
84
85 static int find_default_is_async_p (struct target_ops *ignore);
86
87 #include "target-delegates.c"
88
89 static void init_dummy_target (void);
90
91 static struct target_ops debug_target;
92
93 static void debug_to_open (char *, int);
94
95 static void debug_to_prepare_to_store (struct target_ops *self,
96 struct regcache *);
97
98 static void debug_to_files_info (struct target_ops *);
99
100 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
107 int, int, int);
108
109 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
110 struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
114 struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (struct target_ops *self,
118 CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (struct target_ops *self,
122 CORE_ADDR, int, int,
123 struct expression *);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
131 CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
134 CORE_ADDR, int, int,
135 struct expression *);
136
137 static void debug_to_terminal_init (struct target_ops *self);
138
139 static void debug_to_terminal_inferior (struct target_ops *self);
140
141 static void debug_to_terminal_ours_for_output (struct target_ops *self);
142
143 static void debug_to_terminal_save_ours (struct target_ops *self);
144
145 static void debug_to_terminal_ours (struct target_ops *self);
146
147 static void debug_to_load (struct target_ops *self, char *, int);
148
149 static int debug_to_can_run (struct target_ops *self);
150
151 static void debug_to_stop (struct target_ops *self, ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_allocsize;
159 #define DEFAULT_ALLOCSIZE 10
160
161 /* The initial current target, so that there is always a semi-valid
162 current target. */
163
164 static struct target_ops dummy_target;
165
166 /* Top of target stack. */
167
168 static struct target_ops *target_stack;
169
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
172
173 struct target_ops current_target;
174
175 /* Command list for target. */
176
177 static struct cmd_list_element *targetlist = NULL;
178
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
181
182 static int trust_readonly = 0;
183
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
186
187 static int show_memory_breakpoints = 0;
188
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
192
193 int may_write_registers = 1;
194
195 int may_write_memory = 1;
196
197 int may_insert_breakpoints = 1;
198
199 int may_insert_tracepoints = 1;
200
201 int may_insert_fast_tracepoints = 1;
202
203 int may_stop = 1;
204
205 /* Non-zero if we want to see trace of target level stuff. */
206
207 static unsigned int targetdebug = 0;
208 static void
209 show_targetdebug (struct ui_file *file, int from_tty,
210 struct cmd_list_element *c, const char *value)
211 {
212 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
213 }
214
215 static void setup_target_debug (void);
216
217 /* The user just typed 'target' without the name of a target. */
218
219 static void
220 target_command (char *arg, int from_tty)
221 {
222 fputs_filtered ("Argument required (target name). Try `help target'\n",
223 gdb_stdout);
224 }
225
226 /* Default target_has_* methods for process_stratum targets. */
227
228 int
229 default_child_has_all_memory (struct target_ops *ops)
230 {
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_memory (struct target_ops *ops)
240 {
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_stack (struct target_ops *ops)
250 {
251 /* If no inferior selected, there's no stack. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_registers (struct target_ops *ops)
260 {
261 /* Can't read registers from no inferior. */
262 if (ptid_equal (inferior_ptid, null_ptid))
263 return 0;
264
265 return 1;
266 }
267
268 int
269 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
270 {
271 /* If there's no thread selected, then we can't make it run through
272 hoops. */
273 if (ptid_equal (the_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279
280 int
281 target_has_all_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_all_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_memory_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_memory (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_stack_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_stack (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_registers_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_registers (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_1 (ptid_t the_ptid)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_execution (t, the_ptid))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_execution_current (void)
342 {
343 return target_has_execution_1 (inferior_ptid);
344 }
345
346 /* Complete initialization of T. This ensures that various fields in
347 T are set, if needed by the target implementation. */
348
349 void
350 complete_target_initialization (struct target_ops *t)
351 {
352 /* Provide default values for all "must have" methods. */
353 if (t->to_xfer_partial == NULL)
354 t->to_xfer_partial = default_xfer_partial;
355
356 if (t->to_has_all_memory == NULL)
357 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_memory == NULL)
360 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
361
362 if (t->to_has_stack == NULL)
363 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
364
365 if (t->to_has_registers == NULL)
366 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_execution == NULL)
369 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
370
371 install_delegators (t);
372 }
373
374 /* Add possible target architecture T to the list and add a new
375 command 'target T->to_shortname'. Set COMPLETER as the command's
376 completer if not NULL. */
377
378 void
379 add_target_with_completer (struct target_ops *t,
380 completer_ftype *completer)
381 {
382 struct cmd_list_element *c;
383
384 complete_target_initialization (t);
385
386 if (!target_structs)
387 {
388 target_struct_allocsize = DEFAULT_ALLOCSIZE;
389 target_structs = (struct target_ops **) xmalloc
390 (target_struct_allocsize * sizeof (*target_structs));
391 }
392 if (target_struct_size >= target_struct_allocsize)
393 {
394 target_struct_allocsize *= 2;
395 target_structs = (struct target_ops **)
396 xrealloc ((char *) target_structs,
397 target_struct_allocsize * sizeof (*target_structs));
398 }
399 target_structs[target_struct_size++] = t;
400
401 if (targetlist == NULL)
402 add_prefix_cmd ("target", class_run, target_command, _("\
403 Connect to a target machine or process.\n\
404 The first argument is the type or protocol of the target machine.\n\
405 Remaining arguments are interpreted by the target protocol. For more\n\
406 information on the arguments for a particular protocol, type\n\
407 `help target ' followed by the protocol name."),
408 &targetlist, "target ", 0, &cmdlist);
409 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
410 &targetlist);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
413 }
414
415 /* Add a possible target architecture to the list. */
416
417 void
418 add_target (struct target_ops *t)
419 {
420 add_target_with_completer (t, NULL);
421 }
422
423 /* See target.h. */
424
425 void
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 {
428 struct cmd_list_element *c;
429 char *alt;
430
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 see PR cli/15104. */
433 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
434 alt = xstrprintf ("target %s", t->to_shortname);
435 deprecate_cmd (c, alt);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (&current_target, arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) (&current_target);
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops *self)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 /* Install the delegators. */
580 install_delegators (&current_target);
581
582 #define INHERIT(FIELD, TARGET) \
583 if (!current_target.FIELD) \
584 current_target.FIELD = (TARGET)->FIELD
585
586 for (t = target_stack; t; t = t->beneath)
587 {
588 INHERIT (to_shortname, t);
589 INHERIT (to_longname, t);
590 INHERIT (to_doc, t);
591 /* Do not inherit to_open. */
592 /* Do not inherit to_close. */
593 /* Do not inherit to_attach. */
594 /* Do not inherit to_post_attach. */
595 INHERIT (to_attach_no_wait, t);
596 /* Do not inherit to_detach. */
597 /* Do not inherit to_disconnect. */
598 /* Do not inherit to_resume. */
599 /* Do not inherit to_wait. */
600 /* Do not inherit to_fetch_registers. */
601 /* Do not inherit to_store_registers. */
602 /* Do not inherit to_prepare_to_store. */
603 INHERIT (deprecated_xfer_memory, t);
604 /* Do not inherit to_files_info. */
605 /* Do not inherit to_insert_breakpoint. */
606 /* Do not inherit to_remove_breakpoint. */
607 /* Do not inherit to_can_use_hw_breakpoint. */
608 INHERIT (to_insert_hw_breakpoint, t);
609 INHERIT (to_remove_hw_breakpoint, t);
610 /* Do not inherit to_ranged_break_num_registers. */
611 INHERIT (to_insert_watchpoint, t);
612 INHERIT (to_remove_watchpoint, t);
613 /* Do not inherit to_insert_mask_watchpoint. */
614 /* Do not inherit to_remove_mask_watchpoint. */
615 /* Do not inherit to_stopped_data_address. */
616 INHERIT (to_have_steppable_watchpoint, t);
617 INHERIT (to_have_continuable_watchpoint, t);
618 /* Do not inherit to_stopped_by_watchpoint. */
619 INHERIT (to_watchpoint_addr_within_range, t);
620 INHERIT (to_region_ok_for_hw_watchpoint, t);
621 INHERIT (to_can_accel_watchpoint_condition, t);
622 /* Do not inherit to_masked_watch_num_registers. */
623 INHERIT (to_terminal_init, t);
624 INHERIT (to_terminal_inferior, t);
625 INHERIT (to_terminal_ours_for_output, t);
626 INHERIT (to_terminal_ours, t);
627 INHERIT (to_terminal_save_ours, t);
628 INHERIT (to_terminal_info, t);
629 /* Do not inherit to_kill. */
630 INHERIT (to_load, t);
631 /* Do no inherit to_create_inferior. */
632 INHERIT (to_post_startup_inferior, t);
633 INHERIT (to_insert_fork_catchpoint, t);
634 INHERIT (to_remove_fork_catchpoint, t);
635 INHERIT (to_insert_vfork_catchpoint, t);
636 INHERIT (to_remove_vfork_catchpoint, t);
637 /* Do not inherit to_follow_fork. */
638 INHERIT (to_insert_exec_catchpoint, t);
639 INHERIT (to_remove_exec_catchpoint, t);
640 INHERIT (to_set_syscall_catchpoint, t);
641 INHERIT (to_has_exited, t);
642 /* Do not inherit to_mourn_inferior. */
643 INHERIT (to_can_run, t);
644 /* Do not inherit to_pass_signals. */
645 /* Do not inherit to_program_signals. */
646 /* Do not inherit to_thread_alive. */
647 /* Do not inherit to_find_new_threads. */
648 /* Do not inherit to_pid_to_str. */
649 INHERIT (to_extra_thread_info, t);
650 INHERIT (to_thread_name, t);
651 INHERIT (to_stop, t);
652 /* Do not inherit to_xfer_partial. */
653 /* Do not inherit to_rcmd. */
654 INHERIT (to_pid_to_exec_file, t);
655 INHERIT (to_log_command, t);
656 INHERIT (to_stratum, t);
657 /* Do not inherit to_has_all_memory. */
658 /* Do not inherit to_has_memory. */
659 /* Do not inherit to_has_stack. */
660 /* Do not inherit to_has_registers. */
661 /* Do not inherit to_has_execution. */
662 INHERIT (to_has_thread_control, t);
663 /* Do not inherit to_can_async_p. */
664 /* Do not inherit to_is_async_p. */
665 /* Do not inherit to_async. */
666 INHERIT (to_find_memory_regions, t);
667 INHERIT (to_make_corefile_notes, t);
668 INHERIT (to_get_bookmark, t);
669 INHERIT (to_goto_bookmark, t);
670 /* Do not inherit to_get_thread_local_address. */
671 INHERIT (to_can_execute_reverse, t);
672 INHERIT (to_execution_direction, t);
673 INHERIT (to_thread_architecture, t);
674 /* Do not inherit to_read_description. */
675 INHERIT (to_get_ada_task_ptid, t);
676 /* Do not inherit to_search_memory. */
677 INHERIT (to_supports_multi_process, t);
678 INHERIT (to_supports_enable_disable_tracepoint, t);
679 INHERIT (to_supports_string_tracing, t);
680 INHERIT (to_trace_init, t);
681 INHERIT (to_download_tracepoint, t);
682 INHERIT (to_can_download_tracepoint, t);
683 INHERIT (to_download_trace_state_variable, t);
684 INHERIT (to_enable_tracepoint, t);
685 INHERIT (to_disable_tracepoint, t);
686 INHERIT (to_trace_set_readonly_regions, t);
687 INHERIT (to_trace_start, t);
688 INHERIT (to_get_trace_status, t);
689 INHERIT (to_get_tracepoint_status, t);
690 INHERIT (to_trace_stop, t);
691 INHERIT (to_trace_find, t);
692 INHERIT (to_get_trace_state_variable_value, t);
693 INHERIT (to_save_trace_data, t);
694 INHERIT (to_upload_tracepoints, t);
695 INHERIT (to_upload_trace_state_variables, t);
696 INHERIT (to_get_raw_trace_data, t);
697 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
698 INHERIT (to_set_disconnected_tracing, t);
699 INHERIT (to_set_circular_trace_buffer, t);
700 INHERIT (to_set_trace_buffer_size, t);
701 INHERIT (to_set_trace_notes, t);
702 INHERIT (to_get_tib_address, t);
703 INHERIT (to_set_permissions, t);
704 INHERIT (to_static_tracepoint_marker_at, t);
705 INHERIT (to_static_tracepoint_markers_by_strid, t);
706 INHERIT (to_traceframe_info, t);
707 INHERIT (to_use_agent, t);
708 INHERIT (to_can_use_agent, t);
709 INHERIT (to_augmented_libraries_svr4_read, t);
710 INHERIT (to_magic, t);
711 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
712 INHERIT (to_can_run_breakpoint_commands, t);
713 /* Do not inherit to_memory_map. */
714 /* Do not inherit to_flash_erase. */
715 /* Do not inherit to_flash_done. */
716 }
717 #undef INHERIT
718
719 /* Clean up a target struct so it no longer has any zero pointers in
720 it. Some entries are defaulted to a method that print an error,
721 others are hard-wired to a standard recursive default. */
722
723 #define de_fault(field, value) \
724 if (!current_target.field) \
725 current_target.field = value
726
727 de_fault (to_open,
728 (void (*) (char *, int))
729 tcomplain);
730 de_fault (to_close,
731 (void (*) (struct target_ops *))
732 target_ignore);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_insert_hw_breakpoint,
738 (int (*) (struct target_ops *, struct gdbarch *,
739 struct bp_target_info *))
740 return_minus_one);
741 de_fault (to_remove_hw_breakpoint,
742 (int (*) (struct target_ops *, struct gdbarch *,
743 struct bp_target_info *))
744 return_minus_one);
745 de_fault (to_insert_watchpoint,
746 (int (*) (struct target_ops *, CORE_ADDR, int, int,
747 struct expression *))
748 return_minus_one);
749 de_fault (to_remove_watchpoint,
750 (int (*) (struct target_ops *, CORE_ADDR, int, int,
751 struct expression *))
752 return_minus_one);
753 de_fault (to_watchpoint_addr_within_range,
754 default_watchpoint_addr_within_range);
755 de_fault (to_region_ok_for_hw_watchpoint,
756 default_region_ok_for_hw_watchpoint);
757 de_fault (to_can_accel_watchpoint_condition,
758 (int (*) (struct target_ops *, CORE_ADDR, int, int,
759 struct expression *))
760 return_zero);
761 de_fault (to_terminal_init,
762 (void (*) (struct target_ops *))
763 target_ignore);
764 de_fault (to_terminal_inferior,
765 (void (*) (struct target_ops *))
766 target_ignore);
767 de_fault (to_terminal_ours_for_output,
768 (void (*) (struct target_ops *))
769 target_ignore);
770 de_fault (to_terminal_ours,
771 (void (*) (struct target_ops *))
772 target_ignore);
773 de_fault (to_terminal_save_ours,
774 (void (*) (struct target_ops *))
775 target_ignore);
776 de_fault (to_terminal_info,
777 default_terminal_info);
778 de_fault (to_load,
779 (void (*) (struct target_ops *, char *, int))
780 tcomplain);
781 de_fault (to_post_startup_inferior,
782 (void (*) (struct target_ops *, ptid_t))
783 target_ignore);
784 de_fault (to_insert_fork_catchpoint,
785 (int (*) (struct target_ops *, int))
786 return_one);
787 de_fault (to_remove_fork_catchpoint,
788 (int (*) (struct target_ops *, int))
789 return_one);
790 de_fault (to_insert_vfork_catchpoint,
791 (int (*) (struct target_ops *, int))
792 return_one);
793 de_fault (to_remove_vfork_catchpoint,
794 (int (*) (struct target_ops *, int))
795 return_one);
796 de_fault (to_insert_exec_catchpoint,
797 (int (*) (struct target_ops *, int))
798 return_one);
799 de_fault (to_remove_exec_catchpoint,
800 (int (*) (struct target_ops *, int))
801 return_one);
802 de_fault (to_set_syscall_catchpoint,
803 (int (*) (struct target_ops *, int, int, int, int, int *))
804 return_one);
805 de_fault (to_has_exited,
806 (int (*) (struct target_ops *, int, int, int *))
807 return_zero);
808 de_fault (to_can_run,
809 (int (*) (struct target_ops *))
810 return_zero);
811 de_fault (to_extra_thread_info,
812 (char *(*) (struct target_ops *, struct thread_info *))
813 return_null);
814 de_fault (to_thread_name,
815 (char *(*) (struct target_ops *, struct thread_info *))
816 return_null);
817 de_fault (to_stop,
818 (void (*) (struct target_ops *, ptid_t))
819 target_ignore);
820 de_fault (to_pid_to_exec_file,
821 (char *(*) (struct target_ops *, int))
822 return_null);
823 de_fault (to_thread_architecture,
824 default_thread_architecture);
825 current_target.to_read_description = NULL;
826 de_fault (to_get_ada_task_ptid,
827 (ptid_t (*) (struct target_ops *, long, long))
828 default_get_ada_task_ptid);
829 de_fault (to_supports_multi_process,
830 (int (*) (struct target_ops *))
831 return_zero);
832 de_fault (to_supports_enable_disable_tracepoint,
833 (int (*) (struct target_ops *))
834 return_zero);
835 de_fault (to_supports_string_tracing,
836 (int (*) (struct target_ops *))
837 return_zero);
838 de_fault (to_trace_init,
839 (void (*) (struct target_ops *))
840 tcomplain);
841 de_fault (to_download_tracepoint,
842 (void (*) (struct target_ops *, struct bp_location *))
843 tcomplain);
844 de_fault (to_can_download_tracepoint,
845 (int (*) (struct target_ops *))
846 return_zero);
847 de_fault (to_download_trace_state_variable,
848 (void (*) (struct target_ops *, struct trace_state_variable *))
849 tcomplain);
850 de_fault (to_enable_tracepoint,
851 (void (*) (struct target_ops *, struct bp_location *))
852 tcomplain);
853 de_fault (to_disable_tracepoint,
854 (void (*) (struct target_ops *, struct bp_location *))
855 tcomplain);
856 de_fault (to_trace_set_readonly_regions,
857 (void (*) (struct target_ops *))
858 tcomplain);
859 de_fault (to_trace_start,
860 (void (*) (struct target_ops *))
861 tcomplain);
862 de_fault (to_get_trace_status,
863 (int (*) (struct target_ops *, struct trace_status *))
864 return_minus_one);
865 de_fault (to_get_tracepoint_status,
866 (void (*) (struct target_ops *, struct breakpoint *,
867 struct uploaded_tp *))
868 tcomplain);
869 de_fault (to_trace_stop,
870 (void (*) (struct target_ops *))
871 tcomplain);
872 de_fault (to_trace_find,
873 (int (*) (struct target_ops *,
874 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
875 return_minus_one);
876 de_fault (to_get_trace_state_variable_value,
877 (int (*) (struct target_ops *, int, LONGEST *))
878 return_zero);
879 de_fault (to_save_trace_data,
880 (int (*) (struct target_ops *, const char *))
881 tcomplain);
882 de_fault (to_upload_tracepoints,
883 (int (*) (struct target_ops *, struct uploaded_tp **))
884 return_zero);
885 de_fault (to_upload_trace_state_variables,
886 (int (*) (struct target_ops *, struct uploaded_tsv **))
887 return_zero);
888 de_fault (to_get_raw_trace_data,
889 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
890 tcomplain);
891 de_fault (to_get_min_fast_tracepoint_insn_len,
892 (int (*) (struct target_ops *))
893 return_minus_one);
894 de_fault (to_set_disconnected_tracing,
895 (void (*) (struct target_ops *, int))
896 target_ignore);
897 de_fault (to_set_circular_trace_buffer,
898 (void (*) (struct target_ops *, int))
899 target_ignore);
900 de_fault (to_set_trace_buffer_size,
901 (void (*) (struct target_ops *, LONGEST))
902 target_ignore);
903 de_fault (to_set_trace_notes,
904 (int (*) (struct target_ops *,
905 const char *, const char *, const char *))
906 return_zero);
907 de_fault (to_get_tib_address,
908 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
909 tcomplain);
910 de_fault (to_set_permissions,
911 (void (*) (struct target_ops *))
912 target_ignore);
913 de_fault (to_static_tracepoint_marker_at,
914 (int (*) (struct target_ops *,
915 CORE_ADDR, struct static_tracepoint_marker *))
916 return_zero);
917 de_fault (to_static_tracepoint_markers_by_strid,
918 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
919 const char *))
920 tcomplain);
921 de_fault (to_traceframe_info,
922 (struct traceframe_info * (*) (struct target_ops *))
923 return_null);
924 de_fault (to_supports_evaluation_of_breakpoint_conditions,
925 (int (*) (struct target_ops *))
926 return_zero);
927 de_fault (to_can_run_breakpoint_commands,
928 (int (*) (struct target_ops *))
929 return_zero);
930 de_fault (to_use_agent,
931 (int (*) (struct target_ops *, int))
932 tcomplain);
933 de_fault (to_can_use_agent,
934 (int (*) (struct target_ops *))
935 return_zero);
936 de_fault (to_augmented_libraries_svr4_read,
937 (int (*) (struct target_ops *))
938 return_zero);
939 de_fault (to_execution_direction, default_execution_direction);
940
941 #undef de_fault
942
943 /* Finally, position the target-stack beneath the squashed
944 "current_target". That way code looking for a non-inherited
945 target method can quickly and simply find it. */
946 current_target.beneath = target_stack;
947
948 if (targetdebug)
949 setup_target_debug ();
950 }
951
952 /* Push a new target type into the stack of the existing target accessors,
953 possibly superseding some of the existing accessors.
954
955 Rather than allow an empty stack, we always have the dummy target at
956 the bottom stratum, so we can call the function vectors without
957 checking them. */
958
959 void
960 push_target (struct target_ops *t)
961 {
962 struct target_ops **cur;
963
964 /* Check magic number. If wrong, it probably means someone changed
965 the struct definition, but not all the places that initialize one. */
966 if (t->to_magic != OPS_MAGIC)
967 {
968 fprintf_unfiltered (gdb_stderr,
969 "Magic number of %s target struct wrong\n",
970 t->to_shortname);
971 internal_error (__FILE__, __LINE__,
972 _("failed internal consistency check"));
973 }
974
975 /* Find the proper stratum to install this target in. */
976 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
977 {
978 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
979 break;
980 }
981
982 /* If there's already targets at this stratum, remove them. */
983 /* FIXME: cagney/2003-10-15: I think this should be popping all
984 targets to CUR, and not just those at this stratum level. */
985 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
986 {
987 /* There's already something at this stratum level. Close it,
988 and un-hook it from the stack. */
989 struct target_ops *tmp = (*cur);
990
991 (*cur) = (*cur)->beneath;
992 tmp->beneath = NULL;
993 target_close (tmp);
994 }
995
996 /* We have removed all targets in our stratum, now add the new one. */
997 t->beneath = (*cur);
998 (*cur) = t;
999
1000 update_current_target ();
1001 }
1002
1003 /* Remove a target_ops vector from the stack, wherever it may be.
1004 Return how many times it was removed (0 or 1). */
1005
1006 int
1007 unpush_target (struct target_ops *t)
1008 {
1009 struct target_ops **cur;
1010 struct target_ops *tmp;
1011
1012 if (t->to_stratum == dummy_stratum)
1013 internal_error (__FILE__, __LINE__,
1014 _("Attempt to unpush the dummy target"));
1015
1016 /* Look for the specified target. Note that we assume that a target
1017 can only occur once in the target stack. */
1018
1019 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1020 {
1021 if ((*cur) == t)
1022 break;
1023 }
1024
1025 /* If we don't find target_ops, quit. Only open targets should be
1026 closed. */
1027 if ((*cur) == NULL)
1028 return 0;
1029
1030 /* Unchain the target. */
1031 tmp = (*cur);
1032 (*cur) = (*cur)->beneath;
1033 tmp->beneath = NULL;
1034
1035 update_current_target ();
1036
1037 /* Finally close the target. Note we do this after unchaining, so
1038 any target method calls from within the target_close
1039 implementation don't end up in T anymore. */
1040 target_close (t);
1041
1042 return 1;
1043 }
1044
1045 void
1046 pop_all_targets_above (enum strata above_stratum)
1047 {
1048 while ((int) (current_target.to_stratum) > (int) above_stratum)
1049 {
1050 if (!unpush_target (target_stack))
1051 {
1052 fprintf_unfiltered (gdb_stderr,
1053 "pop_all_targets couldn't find target %s\n",
1054 target_stack->to_shortname);
1055 internal_error (__FILE__, __LINE__,
1056 _("failed internal consistency check"));
1057 break;
1058 }
1059 }
1060 }
1061
1062 void
1063 pop_all_targets (void)
1064 {
1065 pop_all_targets_above (dummy_stratum);
1066 }
1067
1068 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1069
1070 int
1071 target_is_pushed (struct target_ops *t)
1072 {
1073 struct target_ops **cur;
1074
1075 /* Check magic number. If wrong, it probably means someone changed
1076 the struct definition, but not all the places that initialize one. */
1077 if (t->to_magic != OPS_MAGIC)
1078 {
1079 fprintf_unfiltered (gdb_stderr,
1080 "Magic number of %s target struct wrong\n",
1081 t->to_shortname);
1082 internal_error (__FILE__, __LINE__,
1083 _("failed internal consistency check"));
1084 }
1085
1086 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1087 if (*cur == t)
1088 return 1;
1089
1090 return 0;
1091 }
1092
1093 /* Using the objfile specified in OBJFILE, find the address for the
1094 current thread's thread-local storage with offset OFFSET. */
1095 CORE_ADDR
1096 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1097 {
1098 volatile CORE_ADDR addr = 0;
1099 struct target_ops *target;
1100
1101 for (target = current_target.beneath;
1102 target != NULL;
1103 target = target->beneath)
1104 {
1105 if (target->to_get_thread_local_address != NULL)
1106 break;
1107 }
1108
1109 if (target != NULL
1110 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1111 {
1112 ptid_t ptid = inferior_ptid;
1113 volatile struct gdb_exception ex;
1114
1115 TRY_CATCH (ex, RETURN_MASK_ALL)
1116 {
1117 CORE_ADDR lm_addr;
1118
1119 /* Fetch the load module address for this objfile. */
1120 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1121 objfile);
1122 /* If it's 0, throw the appropriate exception. */
1123 if (lm_addr == 0)
1124 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1125 _("TLS load module not found"));
1126
1127 addr = target->to_get_thread_local_address (target, ptid,
1128 lm_addr, offset);
1129 }
1130 /* If an error occurred, print TLS related messages here. Otherwise,
1131 throw the error to some higher catcher. */
1132 if (ex.reason < 0)
1133 {
1134 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1135
1136 switch (ex.error)
1137 {
1138 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1139 error (_("Cannot find thread-local variables "
1140 "in this thread library."));
1141 break;
1142 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1143 if (objfile_is_library)
1144 error (_("Cannot find shared library `%s' in dynamic"
1145 " linker's load module list"), objfile_name (objfile));
1146 else
1147 error (_("Cannot find executable file `%s' in dynamic"
1148 " linker's load module list"), objfile_name (objfile));
1149 break;
1150 case TLS_NOT_ALLOCATED_YET_ERROR:
1151 if (objfile_is_library)
1152 error (_("The inferior has not yet allocated storage for"
1153 " thread-local variables in\n"
1154 "the shared library `%s'\n"
1155 "for %s"),
1156 objfile_name (objfile), target_pid_to_str (ptid));
1157 else
1158 error (_("The inferior has not yet allocated storage for"
1159 " thread-local variables in\n"
1160 "the executable `%s'\n"
1161 "for %s"),
1162 objfile_name (objfile), target_pid_to_str (ptid));
1163 break;
1164 case TLS_GENERIC_ERROR:
1165 if (objfile_is_library)
1166 error (_("Cannot find thread-local storage for %s, "
1167 "shared library %s:\n%s"),
1168 target_pid_to_str (ptid),
1169 objfile_name (objfile), ex.message);
1170 else
1171 error (_("Cannot find thread-local storage for %s, "
1172 "executable file %s:\n%s"),
1173 target_pid_to_str (ptid),
1174 objfile_name (objfile), ex.message);
1175 break;
1176 default:
1177 throw_exception (ex);
1178 break;
1179 }
1180 }
1181 }
1182 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1183 TLS is an ABI-specific thing. But we don't do that yet. */
1184 else
1185 error (_("Cannot find thread-local variables on this target"));
1186
1187 return addr;
1188 }
1189
1190 const char *
1191 target_xfer_status_to_string (enum target_xfer_status err)
1192 {
1193 #define CASE(X) case X: return #X
1194 switch (err)
1195 {
1196 CASE(TARGET_XFER_E_IO);
1197 CASE(TARGET_XFER_E_UNAVAILABLE);
1198 default:
1199 return "<unknown>";
1200 }
1201 #undef CASE
1202 };
1203
1204
1205 #undef MIN
1206 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1207
1208 /* target_read_string -- read a null terminated string, up to LEN bytes,
1209 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1210 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1211 is responsible for freeing it. Return the number of bytes successfully
1212 read. */
1213
1214 int
1215 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1216 {
1217 int tlen, offset, i;
1218 gdb_byte buf[4];
1219 int errcode = 0;
1220 char *buffer;
1221 int buffer_allocated;
1222 char *bufptr;
1223 unsigned int nbytes_read = 0;
1224
1225 gdb_assert (string);
1226
1227 /* Small for testing. */
1228 buffer_allocated = 4;
1229 buffer = xmalloc (buffer_allocated);
1230 bufptr = buffer;
1231
1232 while (len > 0)
1233 {
1234 tlen = MIN (len, 4 - (memaddr & 3));
1235 offset = memaddr & 3;
1236
1237 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1238 if (errcode != 0)
1239 {
1240 /* The transfer request might have crossed the boundary to an
1241 unallocated region of memory. Retry the transfer, requesting
1242 a single byte. */
1243 tlen = 1;
1244 offset = 0;
1245 errcode = target_read_memory (memaddr, buf, 1);
1246 if (errcode != 0)
1247 goto done;
1248 }
1249
1250 if (bufptr - buffer + tlen > buffer_allocated)
1251 {
1252 unsigned int bytes;
1253
1254 bytes = bufptr - buffer;
1255 buffer_allocated *= 2;
1256 buffer = xrealloc (buffer, buffer_allocated);
1257 bufptr = buffer + bytes;
1258 }
1259
1260 for (i = 0; i < tlen; i++)
1261 {
1262 *bufptr++ = buf[i + offset];
1263 if (buf[i + offset] == '\000')
1264 {
1265 nbytes_read += i + 1;
1266 goto done;
1267 }
1268 }
1269
1270 memaddr += tlen;
1271 len -= tlen;
1272 nbytes_read += tlen;
1273 }
1274 done:
1275 *string = buffer;
1276 if (errnop != NULL)
1277 *errnop = errcode;
1278 return nbytes_read;
1279 }
1280
1281 struct target_section_table *
1282 target_get_section_table (struct target_ops *target)
1283 {
1284 struct target_ops *t;
1285
1286 if (targetdebug)
1287 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1288
1289 for (t = target; t != NULL; t = t->beneath)
1290 if (t->to_get_section_table != NULL)
1291 return (*t->to_get_section_table) (t);
1292
1293 return NULL;
1294 }
1295
1296 /* Find a section containing ADDR. */
1297
1298 struct target_section *
1299 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1300 {
1301 struct target_section_table *table = target_get_section_table (target);
1302 struct target_section *secp;
1303
1304 if (table == NULL)
1305 return NULL;
1306
1307 for (secp = table->sections; secp < table->sections_end; secp++)
1308 {
1309 if (addr >= secp->addr && addr < secp->endaddr)
1310 return secp;
1311 }
1312 return NULL;
1313 }
1314
1315 /* Read memory from the live target, even if currently inspecting a
1316 traceframe. The return is the same as that of target_read. */
1317
1318 static enum target_xfer_status
1319 target_read_live_memory (enum target_object object,
1320 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1321 ULONGEST *xfered_len)
1322 {
1323 enum target_xfer_status ret;
1324 struct cleanup *cleanup;
1325
1326 /* Switch momentarily out of tfind mode so to access live memory.
1327 Note that this must not clear global state, such as the frame
1328 cache, which must still remain valid for the previous traceframe.
1329 We may be _building_ the frame cache at this point. */
1330 cleanup = make_cleanup_restore_traceframe_number ();
1331 set_traceframe_number (-1);
1332
1333 ret = target_xfer_partial (current_target.beneath, object, NULL,
1334 myaddr, NULL, memaddr, len, xfered_len);
1335
1336 do_cleanups (cleanup);
1337 return ret;
1338 }
1339
1340 /* Using the set of read-only target sections of OPS, read live
1341 read-only memory. Note that the actual reads start from the
1342 top-most target again.
1343
1344 For interface/parameters/return description see target.h,
1345 to_xfer_partial. */
1346
1347 static enum target_xfer_status
1348 memory_xfer_live_readonly_partial (struct target_ops *ops,
1349 enum target_object object,
1350 gdb_byte *readbuf, ULONGEST memaddr,
1351 ULONGEST len, ULONGEST *xfered_len)
1352 {
1353 struct target_section *secp;
1354 struct target_section_table *table;
1355
1356 secp = target_section_by_addr (ops, memaddr);
1357 if (secp != NULL
1358 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1359 secp->the_bfd_section)
1360 & SEC_READONLY))
1361 {
1362 struct target_section *p;
1363 ULONGEST memend = memaddr + len;
1364
1365 table = target_get_section_table (ops);
1366
1367 for (p = table->sections; p < table->sections_end; p++)
1368 {
1369 if (memaddr >= p->addr)
1370 {
1371 if (memend <= p->endaddr)
1372 {
1373 /* Entire transfer is within this section. */
1374 return target_read_live_memory (object, memaddr,
1375 readbuf, len, xfered_len);
1376 }
1377 else if (memaddr >= p->endaddr)
1378 {
1379 /* This section ends before the transfer starts. */
1380 continue;
1381 }
1382 else
1383 {
1384 /* This section overlaps the transfer. Just do half. */
1385 len = p->endaddr - memaddr;
1386 return target_read_live_memory (object, memaddr,
1387 readbuf, len, xfered_len);
1388 }
1389 }
1390 }
1391 }
1392
1393 return TARGET_XFER_EOF;
1394 }
1395
1396 /* Read memory from more than one valid target. A core file, for
1397 instance, could have some of memory but delegate other bits to
1398 the target below it. So, we must manually try all targets. */
1399
1400 static enum target_xfer_status
1401 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1402 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1403 ULONGEST *xfered_len)
1404 {
1405 enum target_xfer_status res;
1406
1407 do
1408 {
1409 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1410 readbuf, writebuf, memaddr, len,
1411 xfered_len);
1412 if (res == TARGET_XFER_OK)
1413 break;
1414
1415 /* Stop if the target reports that the memory is not available. */
1416 if (res == TARGET_XFER_E_UNAVAILABLE)
1417 break;
1418
1419 /* We want to continue past core files to executables, but not
1420 past a running target's memory. */
1421 if (ops->to_has_all_memory (ops))
1422 break;
1423
1424 ops = ops->beneath;
1425 }
1426 while (ops != NULL);
1427
1428 return res;
1429 }
1430
1431 /* Perform a partial memory transfer.
1432 For docs see target.h, to_xfer_partial. */
1433
1434 static enum target_xfer_status
1435 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1436 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1437 ULONGEST len, ULONGEST *xfered_len)
1438 {
1439 enum target_xfer_status res;
1440 int reg_len;
1441 struct mem_region *region;
1442 struct inferior *inf;
1443
1444 /* For accesses to unmapped overlay sections, read directly from
1445 files. Must do this first, as MEMADDR may need adjustment. */
1446 if (readbuf != NULL && overlay_debugging)
1447 {
1448 struct obj_section *section = find_pc_overlay (memaddr);
1449
1450 if (pc_in_unmapped_range (memaddr, section))
1451 {
1452 struct target_section_table *table
1453 = target_get_section_table (ops);
1454 const char *section_name = section->the_bfd_section->name;
1455
1456 memaddr = overlay_mapped_address (memaddr, section);
1457 return section_table_xfer_memory_partial (readbuf, writebuf,
1458 memaddr, len, xfered_len,
1459 table->sections,
1460 table->sections_end,
1461 section_name);
1462 }
1463 }
1464
1465 /* Try the executable files, if "trust-readonly-sections" is set. */
1466 if (readbuf != NULL && trust_readonly)
1467 {
1468 struct target_section *secp;
1469 struct target_section_table *table;
1470
1471 secp = target_section_by_addr (ops, memaddr);
1472 if (secp != NULL
1473 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1474 secp->the_bfd_section)
1475 & SEC_READONLY))
1476 {
1477 table = target_get_section_table (ops);
1478 return section_table_xfer_memory_partial (readbuf, writebuf,
1479 memaddr, len, xfered_len,
1480 table->sections,
1481 table->sections_end,
1482 NULL);
1483 }
1484 }
1485
1486 /* If reading unavailable memory in the context of traceframes, and
1487 this address falls within a read-only section, fallback to
1488 reading from live memory. */
1489 if (readbuf != NULL && get_traceframe_number () != -1)
1490 {
1491 VEC(mem_range_s) *available;
1492
1493 /* If we fail to get the set of available memory, then the
1494 target does not support querying traceframe info, and so we
1495 attempt reading from the traceframe anyway (assuming the
1496 target implements the old QTro packet then). */
1497 if (traceframe_available_memory (&available, memaddr, len))
1498 {
1499 struct cleanup *old_chain;
1500
1501 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1502
1503 if (VEC_empty (mem_range_s, available)
1504 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1505 {
1506 /* Don't read into the traceframe's available
1507 memory. */
1508 if (!VEC_empty (mem_range_s, available))
1509 {
1510 LONGEST oldlen = len;
1511
1512 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1513 gdb_assert (len <= oldlen);
1514 }
1515
1516 do_cleanups (old_chain);
1517
1518 /* This goes through the topmost target again. */
1519 res = memory_xfer_live_readonly_partial (ops, object,
1520 readbuf, memaddr,
1521 len, xfered_len);
1522 if (res == TARGET_XFER_OK)
1523 return TARGET_XFER_OK;
1524 else
1525 {
1526 /* No use trying further, we know some memory starting
1527 at MEMADDR isn't available. */
1528 *xfered_len = len;
1529 return TARGET_XFER_E_UNAVAILABLE;
1530 }
1531 }
1532
1533 /* Don't try to read more than how much is available, in
1534 case the target implements the deprecated QTro packet to
1535 cater for older GDBs (the target's knowledge of read-only
1536 sections may be outdated by now). */
1537 len = VEC_index (mem_range_s, available, 0)->length;
1538
1539 do_cleanups (old_chain);
1540 }
1541 }
1542
1543 /* Try GDB's internal data cache. */
1544 region = lookup_mem_region (memaddr);
1545 /* region->hi == 0 means there's no upper bound. */
1546 if (memaddr + len < region->hi || region->hi == 0)
1547 reg_len = len;
1548 else
1549 reg_len = region->hi - memaddr;
1550
1551 switch (region->attrib.mode)
1552 {
1553 case MEM_RO:
1554 if (writebuf != NULL)
1555 return TARGET_XFER_E_IO;
1556 break;
1557
1558 case MEM_WO:
1559 if (readbuf != NULL)
1560 return TARGET_XFER_E_IO;
1561 break;
1562
1563 case MEM_FLASH:
1564 /* We only support writing to flash during "load" for now. */
1565 if (writebuf != NULL)
1566 error (_("Writing to flash memory forbidden in this context"));
1567 break;
1568
1569 case MEM_NONE:
1570 return TARGET_XFER_E_IO;
1571 }
1572
1573 if (!ptid_equal (inferior_ptid, null_ptid))
1574 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1575 else
1576 inf = NULL;
1577
1578 if (inf != NULL
1579 /* The dcache reads whole cache lines; that doesn't play well
1580 with reading from a trace buffer, because reading outside of
1581 the collected memory range fails. */
1582 && get_traceframe_number () == -1
1583 && (region->attrib.cache
1584 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1585 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1586 {
1587 DCACHE *dcache = target_dcache_get_or_init ();
1588 int l;
1589
1590 if (readbuf != NULL)
1591 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1592 else
1593 /* FIXME drow/2006-08-09: If we're going to preserve const
1594 correctness dcache_xfer_memory should take readbuf and
1595 writebuf. */
1596 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1597 reg_len, 1);
1598 if (l <= 0)
1599 return TARGET_XFER_E_IO;
1600 else
1601 {
1602 *xfered_len = (ULONGEST) l;
1603 return TARGET_XFER_OK;
1604 }
1605 }
1606
1607 /* If none of those methods found the memory we wanted, fall back
1608 to a target partial transfer. Normally a single call to
1609 to_xfer_partial is enough; if it doesn't recognize an object
1610 it will call the to_xfer_partial of the next target down.
1611 But for memory this won't do. Memory is the only target
1612 object which can be read from more than one valid target.
1613 A core file, for instance, could have some of memory but
1614 delegate other bits to the target below it. So, we must
1615 manually try all targets. */
1616
1617 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1618 xfered_len);
1619
1620 /* Make sure the cache gets updated no matter what - if we are writing
1621 to the stack. Even if this write is not tagged as such, we still need
1622 to update the cache. */
1623
1624 if (res == TARGET_XFER_OK
1625 && inf != NULL
1626 && writebuf != NULL
1627 && target_dcache_init_p ()
1628 && !region->attrib.cache
1629 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1630 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1631 {
1632 DCACHE *dcache = target_dcache_get ();
1633
1634 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1635 }
1636
1637 /* If we still haven't got anything, return the last error. We
1638 give up. */
1639 return res;
1640 }
1641
1642 /* Perform a partial memory transfer. For docs see target.h,
1643 to_xfer_partial. */
1644
1645 static enum target_xfer_status
1646 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1647 gdb_byte *readbuf, const gdb_byte *writebuf,
1648 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1649 {
1650 enum target_xfer_status res;
1651
1652 /* Zero length requests are ok and require no work. */
1653 if (len == 0)
1654 return TARGET_XFER_EOF;
1655
1656 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1657 breakpoint insns, thus hiding out from higher layers whether
1658 there are software breakpoints inserted in the code stream. */
1659 if (readbuf != NULL)
1660 {
1661 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1662 xfered_len);
1663
1664 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1665 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1666 }
1667 else
1668 {
1669 void *buf;
1670 struct cleanup *old_chain;
1671
1672 /* A large write request is likely to be partially satisfied
1673 by memory_xfer_partial_1. We will continually malloc
1674 and free a copy of the entire write request for breakpoint
1675 shadow handling even though we only end up writing a small
1676 subset of it. Cap writes to 4KB to mitigate this. */
1677 len = min (4096, len);
1678
1679 buf = xmalloc (len);
1680 old_chain = make_cleanup (xfree, buf);
1681 memcpy (buf, writebuf, len);
1682
1683 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1684 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1685 xfered_len);
1686
1687 do_cleanups (old_chain);
1688 }
1689
1690 return res;
1691 }
1692
1693 static void
1694 restore_show_memory_breakpoints (void *arg)
1695 {
1696 show_memory_breakpoints = (uintptr_t) arg;
1697 }
1698
1699 struct cleanup *
1700 make_show_memory_breakpoints_cleanup (int show)
1701 {
1702 int current = show_memory_breakpoints;
1703
1704 show_memory_breakpoints = show;
1705 return make_cleanup (restore_show_memory_breakpoints,
1706 (void *) (uintptr_t) current);
1707 }
1708
1709 /* For docs see target.h, to_xfer_partial. */
1710
1711 enum target_xfer_status
1712 target_xfer_partial (struct target_ops *ops,
1713 enum target_object object, const char *annex,
1714 gdb_byte *readbuf, const gdb_byte *writebuf,
1715 ULONGEST offset, ULONGEST len,
1716 ULONGEST *xfered_len)
1717 {
1718 enum target_xfer_status retval;
1719
1720 gdb_assert (ops->to_xfer_partial != NULL);
1721
1722 /* Transfer is done when LEN is zero. */
1723 if (len == 0)
1724 return TARGET_XFER_EOF;
1725
1726 if (writebuf && !may_write_memory)
1727 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1728 core_addr_to_string_nz (offset), plongest (len));
1729
1730 *xfered_len = 0;
1731
1732 /* If this is a memory transfer, let the memory-specific code
1733 have a look at it instead. Memory transfers are more
1734 complicated. */
1735 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1736 || object == TARGET_OBJECT_CODE_MEMORY)
1737 retval = memory_xfer_partial (ops, object, readbuf,
1738 writebuf, offset, len, xfered_len);
1739 else if (object == TARGET_OBJECT_RAW_MEMORY)
1740 {
1741 /* Request the normal memory object from other layers. */
1742 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1743 xfered_len);
1744 }
1745 else
1746 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1747 writebuf, offset, len, xfered_len);
1748
1749 if (targetdebug)
1750 {
1751 const unsigned char *myaddr = NULL;
1752
1753 fprintf_unfiltered (gdb_stdlog,
1754 "%s:target_xfer_partial "
1755 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1756 ops->to_shortname,
1757 (int) object,
1758 (annex ? annex : "(null)"),
1759 host_address_to_string (readbuf),
1760 host_address_to_string (writebuf),
1761 core_addr_to_string_nz (offset),
1762 pulongest (len), retval,
1763 pulongest (*xfered_len));
1764
1765 if (readbuf)
1766 myaddr = readbuf;
1767 if (writebuf)
1768 myaddr = writebuf;
1769 if (retval == TARGET_XFER_OK && myaddr != NULL)
1770 {
1771 int i;
1772
1773 fputs_unfiltered (", bytes =", gdb_stdlog);
1774 for (i = 0; i < *xfered_len; i++)
1775 {
1776 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1777 {
1778 if (targetdebug < 2 && i > 0)
1779 {
1780 fprintf_unfiltered (gdb_stdlog, " ...");
1781 break;
1782 }
1783 fprintf_unfiltered (gdb_stdlog, "\n");
1784 }
1785
1786 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1787 }
1788 }
1789
1790 fputc_unfiltered ('\n', gdb_stdlog);
1791 }
1792
1793 /* Check implementations of to_xfer_partial update *XFERED_LEN
1794 properly. Do assertion after printing debug messages, so that we
1795 can find more clues on assertion failure from debugging messages. */
1796 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1797 gdb_assert (*xfered_len > 0);
1798
1799 return retval;
1800 }
1801
1802 /* Read LEN bytes of target memory at address MEMADDR, placing the
1803 results in GDB's memory at MYADDR. Returns either 0 for success or
1804 TARGET_XFER_E_IO if any error occurs.
1805
1806 If an error occurs, no guarantee is made about the contents of the data at
1807 MYADDR. In particular, the caller should not depend upon partial reads
1808 filling the buffer with good data. There is no way for the caller to know
1809 how much good data might have been transfered anyway. Callers that can
1810 deal with partial reads should call target_read (which will retry until
1811 it makes no progress, and then return how much was transferred). */
1812
1813 int
1814 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1815 {
1816 /* Dispatch to the topmost target, not the flattened current_target.
1817 Memory accesses check target->to_has_(all_)memory, and the
1818 flattened target doesn't inherit those. */
1819 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1820 myaddr, memaddr, len) == len)
1821 return 0;
1822 else
1823 return TARGET_XFER_E_IO;
1824 }
1825
1826 /* Like target_read_memory, but specify explicitly that this is a read
1827 from the target's raw memory. That is, this read bypasses the
1828 dcache, breakpoint shadowing, etc. */
1829
1830 int
1831 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1832 {
1833 /* See comment in target_read_memory about why the request starts at
1834 current_target.beneath. */
1835 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1836 myaddr, memaddr, len) == len)
1837 return 0;
1838 else
1839 return TARGET_XFER_E_IO;
1840 }
1841
1842 /* Like target_read_memory, but specify explicitly that this is a read from
1843 the target's stack. This may trigger different cache behavior. */
1844
1845 int
1846 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1847 {
1848 /* See comment in target_read_memory about why the request starts at
1849 current_target.beneath. */
1850 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1851 myaddr, memaddr, len) == len)
1852 return 0;
1853 else
1854 return TARGET_XFER_E_IO;
1855 }
1856
1857 /* Like target_read_memory, but specify explicitly that this is a read from
1858 the target's code. This may trigger different cache behavior. */
1859
1860 int
1861 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1862 {
1863 /* See comment in target_read_memory about why the request starts at
1864 current_target.beneath. */
1865 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1866 myaddr, memaddr, len) == len)
1867 return 0;
1868 else
1869 return TARGET_XFER_E_IO;
1870 }
1871
1872 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1873 Returns either 0 for success or TARGET_XFER_E_IO if any
1874 error occurs. If an error occurs, no guarantee is made about how
1875 much data got written. Callers that can deal with partial writes
1876 should call target_write. */
1877
1878 int
1879 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1880 {
1881 /* See comment in target_read_memory about why the request starts at
1882 current_target.beneath. */
1883 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1884 myaddr, memaddr, len) == len)
1885 return 0;
1886 else
1887 return TARGET_XFER_E_IO;
1888 }
1889
1890 /* Write LEN bytes from MYADDR to target raw memory at address
1891 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1892 if any error occurs. If an error occurs, no guarantee is made
1893 about how much data got written. Callers that can deal with
1894 partial writes should call target_write. */
1895
1896 int
1897 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1898 {
1899 /* See comment in target_read_memory about why the request starts at
1900 current_target.beneath. */
1901 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1902 myaddr, memaddr, len) == len)
1903 return 0;
1904 else
1905 return TARGET_XFER_E_IO;
1906 }
1907
1908 /* Fetch the target's memory map. */
1909
1910 VEC(mem_region_s) *
1911 target_memory_map (void)
1912 {
1913 VEC(mem_region_s) *result;
1914 struct mem_region *last_one, *this_one;
1915 int ix;
1916 struct target_ops *t;
1917
1918 if (targetdebug)
1919 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1920
1921 for (t = current_target.beneath; t != NULL; t = t->beneath)
1922 if (t->to_memory_map != NULL)
1923 break;
1924
1925 if (t == NULL)
1926 return NULL;
1927
1928 result = t->to_memory_map (t);
1929 if (result == NULL)
1930 return NULL;
1931
1932 qsort (VEC_address (mem_region_s, result),
1933 VEC_length (mem_region_s, result),
1934 sizeof (struct mem_region), mem_region_cmp);
1935
1936 /* Check that regions do not overlap. Simultaneously assign
1937 a numbering for the "mem" commands to use to refer to
1938 each region. */
1939 last_one = NULL;
1940 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1941 {
1942 this_one->number = ix;
1943
1944 if (last_one && last_one->hi > this_one->lo)
1945 {
1946 warning (_("Overlapping regions in memory map: ignoring"));
1947 VEC_free (mem_region_s, result);
1948 return NULL;
1949 }
1950 last_one = this_one;
1951 }
1952
1953 return result;
1954 }
1955
1956 void
1957 target_flash_erase (ULONGEST address, LONGEST length)
1958 {
1959 struct target_ops *t;
1960
1961 for (t = current_target.beneath; t != NULL; t = t->beneath)
1962 if (t->to_flash_erase != NULL)
1963 {
1964 if (targetdebug)
1965 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1966 hex_string (address), phex (length, 0));
1967 t->to_flash_erase (t, address, length);
1968 return;
1969 }
1970
1971 tcomplain ();
1972 }
1973
1974 void
1975 target_flash_done (void)
1976 {
1977 struct target_ops *t;
1978
1979 for (t = current_target.beneath; t != NULL; t = t->beneath)
1980 if (t->to_flash_done != NULL)
1981 {
1982 if (targetdebug)
1983 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1984 t->to_flash_done (t);
1985 return;
1986 }
1987
1988 tcomplain ();
1989 }
1990
1991 static void
1992 show_trust_readonly (struct ui_file *file, int from_tty,
1993 struct cmd_list_element *c, const char *value)
1994 {
1995 fprintf_filtered (file,
1996 _("Mode for reading from readonly sections is %s.\n"),
1997 value);
1998 }
1999
2000 /* More generic transfers. */
2001
2002 static enum target_xfer_status
2003 default_xfer_partial (struct target_ops *ops, enum target_object object,
2004 const char *annex, gdb_byte *readbuf,
2005 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2006 ULONGEST *xfered_len)
2007 {
2008 if (object == TARGET_OBJECT_MEMORY
2009 && ops->deprecated_xfer_memory != NULL)
2010 /* If available, fall back to the target's
2011 "deprecated_xfer_memory" method. */
2012 {
2013 int xfered = -1;
2014
2015 errno = 0;
2016 if (writebuf != NULL)
2017 {
2018 void *buffer = xmalloc (len);
2019 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2020
2021 memcpy (buffer, writebuf, len);
2022 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2023 1/*write*/, NULL, ops);
2024 do_cleanups (cleanup);
2025 }
2026 if (readbuf != NULL)
2027 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2028 0/*read*/, NULL, ops);
2029 if (xfered > 0)
2030 {
2031 *xfered_len = (ULONGEST) xfered;
2032 return TARGET_XFER_E_IO;
2033 }
2034 else if (xfered == 0 && errno == 0)
2035 /* "deprecated_xfer_memory" uses 0, cross checked against
2036 ERRNO as one indication of an error. */
2037 return TARGET_XFER_EOF;
2038 else
2039 return TARGET_XFER_E_IO;
2040 }
2041 else
2042 {
2043 gdb_assert (ops->beneath != NULL);
2044 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2045 readbuf, writebuf, offset, len,
2046 xfered_len);
2047 }
2048 }
2049
2050 /* Target vector read/write partial wrapper functions. */
2051
2052 static enum target_xfer_status
2053 target_read_partial (struct target_ops *ops,
2054 enum target_object object,
2055 const char *annex, gdb_byte *buf,
2056 ULONGEST offset, ULONGEST len,
2057 ULONGEST *xfered_len)
2058 {
2059 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2060 xfered_len);
2061 }
2062
2063 static enum target_xfer_status
2064 target_write_partial (struct target_ops *ops,
2065 enum target_object object,
2066 const char *annex, const gdb_byte *buf,
2067 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2068 {
2069 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2070 xfered_len);
2071 }
2072
2073 /* Wrappers to perform the full transfer. */
2074
2075 /* For docs on target_read see target.h. */
2076
2077 LONGEST
2078 target_read (struct target_ops *ops,
2079 enum target_object object,
2080 const char *annex, gdb_byte *buf,
2081 ULONGEST offset, LONGEST len)
2082 {
2083 LONGEST xfered = 0;
2084
2085 while (xfered < len)
2086 {
2087 ULONGEST xfered_len;
2088 enum target_xfer_status status;
2089
2090 status = target_read_partial (ops, object, annex,
2091 (gdb_byte *) buf + xfered,
2092 offset + xfered, len - xfered,
2093 &xfered_len);
2094
2095 /* Call an observer, notifying them of the xfer progress? */
2096 if (status == TARGET_XFER_EOF)
2097 return xfered;
2098 else if (status == TARGET_XFER_OK)
2099 {
2100 xfered += xfered_len;
2101 QUIT;
2102 }
2103 else
2104 return -1;
2105
2106 }
2107 return len;
2108 }
2109
2110 /* Assuming that the entire [begin, end) range of memory cannot be
2111 read, try to read whatever subrange is possible to read.
2112
2113 The function returns, in RESULT, either zero or one memory block.
2114 If there's a readable subrange at the beginning, it is completely
2115 read and returned. Any further readable subrange will not be read.
2116 Otherwise, if there's a readable subrange at the end, it will be
2117 completely read and returned. Any readable subranges before it
2118 (obviously, not starting at the beginning), will be ignored. In
2119 other cases -- either no readable subrange, or readable subrange(s)
2120 that is neither at the beginning, or end, nothing is returned.
2121
2122 The purpose of this function is to handle a read across a boundary
2123 of accessible memory in a case when memory map is not available.
2124 The above restrictions are fine for this case, but will give
2125 incorrect results if the memory is 'patchy'. However, supporting
2126 'patchy' memory would require trying to read every single byte,
2127 and it seems unacceptable solution. Explicit memory map is
2128 recommended for this case -- and target_read_memory_robust will
2129 take care of reading multiple ranges then. */
2130
2131 static void
2132 read_whatever_is_readable (struct target_ops *ops,
2133 ULONGEST begin, ULONGEST end,
2134 VEC(memory_read_result_s) **result)
2135 {
2136 gdb_byte *buf = xmalloc (end - begin);
2137 ULONGEST current_begin = begin;
2138 ULONGEST current_end = end;
2139 int forward;
2140 memory_read_result_s r;
2141 ULONGEST xfered_len;
2142
2143 /* If we previously failed to read 1 byte, nothing can be done here. */
2144 if (end - begin <= 1)
2145 {
2146 xfree (buf);
2147 return;
2148 }
2149
2150 /* Check that either first or the last byte is readable, and give up
2151 if not. This heuristic is meant to permit reading accessible memory
2152 at the boundary of accessible region. */
2153 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2154 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2155 {
2156 forward = 1;
2157 ++current_begin;
2158 }
2159 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2160 buf + (end-begin) - 1, end - 1, 1,
2161 &xfered_len) == TARGET_XFER_OK)
2162 {
2163 forward = 0;
2164 --current_end;
2165 }
2166 else
2167 {
2168 xfree (buf);
2169 return;
2170 }
2171
2172 /* Loop invariant is that the [current_begin, current_end) was previously
2173 found to be not readable as a whole.
2174
2175 Note loop condition -- if the range has 1 byte, we can't divide the range
2176 so there's no point trying further. */
2177 while (current_end - current_begin > 1)
2178 {
2179 ULONGEST first_half_begin, first_half_end;
2180 ULONGEST second_half_begin, second_half_end;
2181 LONGEST xfer;
2182 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2183
2184 if (forward)
2185 {
2186 first_half_begin = current_begin;
2187 first_half_end = middle;
2188 second_half_begin = middle;
2189 second_half_end = current_end;
2190 }
2191 else
2192 {
2193 first_half_begin = middle;
2194 first_half_end = current_end;
2195 second_half_begin = current_begin;
2196 second_half_end = middle;
2197 }
2198
2199 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2200 buf + (first_half_begin - begin),
2201 first_half_begin,
2202 first_half_end - first_half_begin);
2203
2204 if (xfer == first_half_end - first_half_begin)
2205 {
2206 /* This half reads up fine. So, the error must be in the
2207 other half. */
2208 current_begin = second_half_begin;
2209 current_end = second_half_end;
2210 }
2211 else
2212 {
2213 /* This half is not readable. Because we've tried one byte, we
2214 know some part of this half if actually redable. Go to the next
2215 iteration to divide again and try to read.
2216
2217 We don't handle the other half, because this function only tries
2218 to read a single readable subrange. */
2219 current_begin = first_half_begin;
2220 current_end = first_half_end;
2221 }
2222 }
2223
2224 if (forward)
2225 {
2226 /* The [begin, current_begin) range has been read. */
2227 r.begin = begin;
2228 r.end = current_begin;
2229 r.data = buf;
2230 }
2231 else
2232 {
2233 /* The [current_end, end) range has been read. */
2234 LONGEST rlen = end - current_end;
2235
2236 r.data = xmalloc (rlen);
2237 memcpy (r.data, buf + current_end - begin, rlen);
2238 r.begin = current_end;
2239 r.end = end;
2240 xfree (buf);
2241 }
2242 VEC_safe_push(memory_read_result_s, (*result), &r);
2243 }
2244
2245 void
2246 free_memory_read_result_vector (void *x)
2247 {
2248 VEC(memory_read_result_s) *v = x;
2249 memory_read_result_s *current;
2250 int ix;
2251
2252 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2253 {
2254 xfree (current->data);
2255 }
2256 VEC_free (memory_read_result_s, v);
2257 }
2258
2259 VEC(memory_read_result_s) *
2260 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2261 {
2262 VEC(memory_read_result_s) *result = 0;
2263
2264 LONGEST xfered = 0;
2265 while (xfered < len)
2266 {
2267 struct mem_region *region = lookup_mem_region (offset + xfered);
2268 LONGEST rlen;
2269
2270 /* If there is no explicit region, a fake one should be created. */
2271 gdb_assert (region);
2272
2273 if (region->hi == 0)
2274 rlen = len - xfered;
2275 else
2276 rlen = region->hi - offset;
2277
2278 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2279 {
2280 /* Cannot read this region. Note that we can end up here only
2281 if the region is explicitly marked inaccessible, or
2282 'inaccessible-by-default' is in effect. */
2283 xfered += rlen;
2284 }
2285 else
2286 {
2287 LONGEST to_read = min (len - xfered, rlen);
2288 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2289
2290 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2291 (gdb_byte *) buffer,
2292 offset + xfered, to_read);
2293 /* Call an observer, notifying them of the xfer progress? */
2294 if (xfer <= 0)
2295 {
2296 /* Got an error reading full chunk. See if maybe we can read
2297 some subrange. */
2298 xfree (buffer);
2299 read_whatever_is_readable (ops, offset + xfered,
2300 offset + xfered + to_read, &result);
2301 xfered += to_read;
2302 }
2303 else
2304 {
2305 struct memory_read_result r;
2306 r.data = buffer;
2307 r.begin = offset + xfered;
2308 r.end = r.begin + xfer;
2309 VEC_safe_push (memory_read_result_s, result, &r);
2310 xfered += xfer;
2311 }
2312 QUIT;
2313 }
2314 }
2315 return result;
2316 }
2317
2318
2319 /* An alternative to target_write with progress callbacks. */
2320
2321 LONGEST
2322 target_write_with_progress (struct target_ops *ops,
2323 enum target_object object,
2324 const char *annex, const gdb_byte *buf,
2325 ULONGEST offset, LONGEST len,
2326 void (*progress) (ULONGEST, void *), void *baton)
2327 {
2328 LONGEST xfered = 0;
2329
2330 /* Give the progress callback a chance to set up. */
2331 if (progress)
2332 (*progress) (0, baton);
2333
2334 while (xfered < len)
2335 {
2336 ULONGEST xfered_len;
2337 enum target_xfer_status status;
2338
2339 status = target_write_partial (ops, object, annex,
2340 (gdb_byte *) buf + xfered,
2341 offset + xfered, len - xfered,
2342 &xfered_len);
2343
2344 if (status == TARGET_XFER_EOF)
2345 return xfered;
2346 if (TARGET_XFER_STATUS_ERROR_P (status))
2347 return -1;
2348
2349 gdb_assert (status == TARGET_XFER_OK);
2350 if (progress)
2351 (*progress) (xfered_len, baton);
2352
2353 xfered += xfered_len;
2354 QUIT;
2355 }
2356 return len;
2357 }
2358
2359 /* For docs on target_write see target.h. */
2360
2361 LONGEST
2362 target_write (struct target_ops *ops,
2363 enum target_object object,
2364 const char *annex, const gdb_byte *buf,
2365 ULONGEST offset, LONGEST len)
2366 {
2367 return target_write_with_progress (ops, object, annex, buf, offset, len,
2368 NULL, NULL);
2369 }
2370
2371 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2372 the size of the transferred data. PADDING additional bytes are
2373 available in *BUF_P. This is a helper function for
2374 target_read_alloc; see the declaration of that function for more
2375 information. */
2376
2377 static LONGEST
2378 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2379 const char *annex, gdb_byte **buf_p, int padding)
2380 {
2381 size_t buf_alloc, buf_pos;
2382 gdb_byte *buf;
2383
2384 /* This function does not have a length parameter; it reads the
2385 entire OBJECT). Also, it doesn't support objects fetched partly
2386 from one target and partly from another (in a different stratum,
2387 e.g. a core file and an executable). Both reasons make it
2388 unsuitable for reading memory. */
2389 gdb_assert (object != TARGET_OBJECT_MEMORY);
2390
2391 /* Start by reading up to 4K at a time. The target will throttle
2392 this number down if necessary. */
2393 buf_alloc = 4096;
2394 buf = xmalloc (buf_alloc);
2395 buf_pos = 0;
2396 while (1)
2397 {
2398 ULONGEST xfered_len;
2399 enum target_xfer_status status;
2400
2401 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2402 buf_pos, buf_alloc - buf_pos - padding,
2403 &xfered_len);
2404
2405 if (status == TARGET_XFER_EOF)
2406 {
2407 /* Read all there was. */
2408 if (buf_pos == 0)
2409 xfree (buf);
2410 else
2411 *buf_p = buf;
2412 return buf_pos;
2413 }
2414 else if (status != TARGET_XFER_OK)
2415 {
2416 /* An error occurred. */
2417 xfree (buf);
2418 return TARGET_XFER_E_IO;
2419 }
2420
2421 buf_pos += xfered_len;
2422
2423 /* If the buffer is filling up, expand it. */
2424 if (buf_alloc < buf_pos * 2)
2425 {
2426 buf_alloc *= 2;
2427 buf = xrealloc (buf, buf_alloc);
2428 }
2429
2430 QUIT;
2431 }
2432 }
2433
2434 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2435 the size of the transferred data. See the declaration in "target.h"
2436 function for more information about the return value. */
2437
2438 LONGEST
2439 target_read_alloc (struct target_ops *ops, enum target_object object,
2440 const char *annex, gdb_byte **buf_p)
2441 {
2442 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2443 }
2444
2445 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2446 returned as a string, allocated using xmalloc. If an error occurs
2447 or the transfer is unsupported, NULL is returned. Empty objects
2448 are returned as allocated but empty strings. A warning is issued
2449 if the result contains any embedded NUL bytes. */
2450
2451 char *
2452 target_read_stralloc (struct target_ops *ops, enum target_object object,
2453 const char *annex)
2454 {
2455 gdb_byte *buffer;
2456 char *bufstr;
2457 LONGEST i, transferred;
2458
2459 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2460 bufstr = (char *) buffer;
2461
2462 if (transferred < 0)
2463 return NULL;
2464
2465 if (transferred == 0)
2466 return xstrdup ("");
2467
2468 bufstr[transferred] = 0;
2469
2470 /* Check for embedded NUL bytes; but allow trailing NULs. */
2471 for (i = strlen (bufstr); i < transferred; i++)
2472 if (bufstr[i] != 0)
2473 {
2474 warning (_("target object %d, annex %s, "
2475 "contained unexpected null characters"),
2476 (int) object, annex ? annex : "(none)");
2477 break;
2478 }
2479
2480 return bufstr;
2481 }
2482
2483 /* Memory transfer methods. */
2484
2485 void
2486 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2487 LONGEST len)
2488 {
2489 /* This method is used to read from an alternate, non-current
2490 target. This read must bypass the overlay support (as symbols
2491 don't match this target), and GDB's internal cache (wrong cache
2492 for this target). */
2493 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2494 != len)
2495 memory_error (TARGET_XFER_E_IO, addr);
2496 }
2497
2498 ULONGEST
2499 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2500 int len, enum bfd_endian byte_order)
2501 {
2502 gdb_byte buf[sizeof (ULONGEST)];
2503
2504 gdb_assert (len <= sizeof (buf));
2505 get_target_memory (ops, addr, buf, len);
2506 return extract_unsigned_integer (buf, len, byte_order);
2507 }
2508
2509 /* See target.h. */
2510
2511 int
2512 target_insert_breakpoint (struct gdbarch *gdbarch,
2513 struct bp_target_info *bp_tgt)
2514 {
2515 if (!may_insert_breakpoints)
2516 {
2517 warning (_("May not insert breakpoints"));
2518 return 1;
2519 }
2520
2521 return current_target.to_insert_breakpoint (&current_target,
2522 gdbarch, bp_tgt);
2523 }
2524
2525 /* See target.h. */
2526
2527 int
2528 target_remove_breakpoint (struct gdbarch *gdbarch,
2529 struct bp_target_info *bp_tgt)
2530 {
2531 /* This is kind of a weird case to handle, but the permission might
2532 have been changed after breakpoints were inserted - in which case
2533 we should just take the user literally and assume that any
2534 breakpoints should be left in place. */
2535 if (!may_insert_breakpoints)
2536 {
2537 warning (_("May not remove breakpoints"));
2538 return 1;
2539 }
2540
2541 return current_target.to_remove_breakpoint (&current_target,
2542 gdbarch, bp_tgt);
2543 }
2544
2545 static void
2546 target_info (char *args, int from_tty)
2547 {
2548 struct target_ops *t;
2549 int has_all_mem = 0;
2550
2551 if (symfile_objfile != NULL)
2552 printf_unfiltered (_("Symbols from \"%s\".\n"),
2553 objfile_name (symfile_objfile));
2554
2555 for (t = target_stack; t != NULL; t = t->beneath)
2556 {
2557 if (!(*t->to_has_memory) (t))
2558 continue;
2559
2560 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2561 continue;
2562 if (has_all_mem)
2563 printf_unfiltered (_("\tWhile running this, "
2564 "GDB does not access memory from...\n"));
2565 printf_unfiltered ("%s:\n", t->to_longname);
2566 (t->to_files_info) (t);
2567 has_all_mem = (*t->to_has_all_memory) (t);
2568 }
2569 }
2570
2571 /* This function is called before any new inferior is created, e.g.
2572 by running a program, attaching, or connecting to a target.
2573 It cleans up any state from previous invocations which might
2574 change between runs. This is a subset of what target_preopen
2575 resets (things which might change between targets). */
2576
2577 void
2578 target_pre_inferior (int from_tty)
2579 {
2580 /* Clear out solib state. Otherwise the solib state of the previous
2581 inferior might have survived and is entirely wrong for the new
2582 target. This has been observed on GNU/Linux using glibc 2.3. How
2583 to reproduce:
2584
2585 bash$ ./foo&
2586 [1] 4711
2587 bash$ ./foo&
2588 [1] 4712
2589 bash$ gdb ./foo
2590 [...]
2591 (gdb) attach 4711
2592 (gdb) detach
2593 (gdb) attach 4712
2594 Cannot access memory at address 0xdeadbeef
2595 */
2596
2597 /* In some OSs, the shared library list is the same/global/shared
2598 across inferiors. If code is shared between processes, so are
2599 memory regions and features. */
2600 if (!gdbarch_has_global_solist (target_gdbarch ()))
2601 {
2602 no_shared_libraries (NULL, from_tty);
2603
2604 invalidate_target_mem_regions ();
2605
2606 target_clear_description ();
2607 }
2608
2609 agent_capability_invalidate ();
2610 }
2611
2612 /* Callback for iterate_over_inferiors. Gets rid of the given
2613 inferior. */
2614
2615 static int
2616 dispose_inferior (struct inferior *inf, void *args)
2617 {
2618 struct thread_info *thread;
2619
2620 thread = any_thread_of_process (inf->pid);
2621 if (thread)
2622 {
2623 switch_to_thread (thread->ptid);
2624
2625 /* Core inferiors actually should be detached, not killed. */
2626 if (target_has_execution)
2627 target_kill ();
2628 else
2629 target_detach (NULL, 0);
2630 }
2631
2632 return 0;
2633 }
2634
2635 /* This is to be called by the open routine before it does
2636 anything. */
2637
2638 void
2639 target_preopen (int from_tty)
2640 {
2641 dont_repeat ();
2642
2643 if (have_inferiors ())
2644 {
2645 if (!from_tty
2646 || !have_live_inferiors ()
2647 || query (_("A program is being debugged already. Kill it? ")))
2648 iterate_over_inferiors (dispose_inferior, NULL);
2649 else
2650 error (_("Program not killed."));
2651 }
2652
2653 /* Calling target_kill may remove the target from the stack. But if
2654 it doesn't (which seems like a win for UDI), remove it now. */
2655 /* Leave the exec target, though. The user may be switching from a
2656 live process to a core of the same program. */
2657 pop_all_targets_above (file_stratum);
2658
2659 target_pre_inferior (from_tty);
2660 }
2661
2662 /* Detach a target after doing deferred register stores. */
2663
2664 void
2665 target_detach (const char *args, int from_tty)
2666 {
2667 struct target_ops* t;
2668
2669 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2670 /* Don't remove global breakpoints here. They're removed on
2671 disconnection from the target. */
2672 ;
2673 else
2674 /* If we're in breakpoints-always-inserted mode, have to remove
2675 them before detaching. */
2676 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2677
2678 prepare_for_detach ();
2679
2680 current_target.to_detach (&current_target, args, from_tty);
2681 if (targetdebug)
2682 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2683 args, from_tty);
2684 }
2685
2686 void
2687 target_disconnect (char *args, int from_tty)
2688 {
2689 struct target_ops *t;
2690
2691 /* If we're in breakpoints-always-inserted mode or if breakpoints
2692 are global across processes, we have to remove them before
2693 disconnecting. */
2694 remove_breakpoints ();
2695
2696 for (t = current_target.beneath; t != NULL; t = t->beneath)
2697 if (t->to_disconnect != NULL)
2698 {
2699 if (targetdebug)
2700 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2701 args, from_tty);
2702 t->to_disconnect (t, args, from_tty);
2703 return;
2704 }
2705
2706 tcomplain ();
2707 }
2708
2709 ptid_t
2710 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2711 {
2712 struct target_ops *t;
2713 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2714 status, options);
2715
2716 if (targetdebug)
2717 {
2718 char *status_string;
2719 char *options_string;
2720
2721 status_string = target_waitstatus_to_string (status);
2722 options_string = target_options_to_string (options);
2723 fprintf_unfiltered (gdb_stdlog,
2724 "target_wait (%d, status, options={%s})"
2725 " = %d, %s\n",
2726 ptid_get_pid (ptid), options_string,
2727 ptid_get_pid (retval), status_string);
2728 xfree (status_string);
2729 xfree (options_string);
2730 }
2731
2732 return retval;
2733 }
2734
2735 char *
2736 target_pid_to_str (ptid_t ptid)
2737 {
2738 struct target_ops *t;
2739
2740 for (t = current_target.beneath; t != NULL; t = t->beneath)
2741 {
2742 if (t->to_pid_to_str != NULL)
2743 return (*t->to_pid_to_str) (t, ptid);
2744 }
2745
2746 return normal_pid_to_str (ptid);
2747 }
2748
2749 char *
2750 target_thread_name (struct thread_info *info)
2751 {
2752 struct target_ops *t;
2753
2754 for (t = current_target.beneath; t != NULL; t = t->beneath)
2755 {
2756 if (t->to_thread_name != NULL)
2757 return (*t->to_thread_name) (t, info);
2758 }
2759
2760 return NULL;
2761 }
2762
2763 void
2764 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2765 {
2766 struct target_ops *t;
2767
2768 target_dcache_invalidate ();
2769
2770 current_target.to_resume (&current_target, ptid, step, signal);
2771 if (targetdebug)
2772 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2773 ptid_get_pid (ptid),
2774 step ? "step" : "continue",
2775 gdb_signal_to_name (signal));
2776
2777 registers_changed_ptid (ptid);
2778 set_executing (ptid, 1);
2779 set_running (ptid, 1);
2780 clear_inline_frame_state (ptid);
2781 }
2782
2783 void
2784 target_pass_signals (int numsigs, unsigned char *pass_signals)
2785 {
2786 struct target_ops *t;
2787
2788 for (t = current_target.beneath; t != NULL; t = t->beneath)
2789 {
2790 if (t->to_pass_signals != NULL)
2791 {
2792 if (targetdebug)
2793 {
2794 int i;
2795
2796 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2797 numsigs);
2798
2799 for (i = 0; i < numsigs; i++)
2800 if (pass_signals[i])
2801 fprintf_unfiltered (gdb_stdlog, " %s",
2802 gdb_signal_to_name (i));
2803
2804 fprintf_unfiltered (gdb_stdlog, " })\n");
2805 }
2806
2807 (*t->to_pass_signals) (t, numsigs, pass_signals);
2808 return;
2809 }
2810 }
2811 }
2812
2813 void
2814 target_program_signals (int numsigs, unsigned char *program_signals)
2815 {
2816 struct target_ops *t;
2817
2818 for (t = current_target.beneath; t != NULL; t = t->beneath)
2819 {
2820 if (t->to_program_signals != NULL)
2821 {
2822 if (targetdebug)
2823 {
2824 int i;
2825
2826 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2827 numsigs);
2828
2829 for (i = 0; i < numsigs; i++)
2830 if (program_signals[i])
2831 fprintf_unfiltered (gdb_stdlog, " %s",
2832 gdb_signal_to_name (i));
2833
2834 fprintf_unfiltered (gdb_stdlog, " })\n");
2835 }
2836
2837 (*t->to_program_signals) (t, numsigs, program_signals);
2838 return;
2839 }
2840 }
2841 }
2842
2843 /* Look through the list of possible targets for a target that can
2844 follow forks. */
2845
2846 int
2847 target_follow_fork (int follow_child, int detach_fork)
2848 {
2849 struct target_ops *t;
2850
2851 for (t = current_target.beneath; t != NULL; t = t->beneath)
2852 {
2853 if (t->to_follow_fork != NULL)
2854 {
2855 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2856
2857 if (targetdebug)
2858 fprintf_unfiltered (gdb_stdlog,
2859 "target_follow_fork (%d, %d) = %d\n",
2860 follow_child, detach_fork, retval);
2861 return retval;
2862 }
2863 }
2864
2865 /* Some target returned a fork event, but did not know how to follow it. */
2866 internal_error (__FILE__, __LINE__,
2867 _("could not find a target to follow fork"));
2868 }
2869
2870 void
2871 target_mourn_inferior (void)
2872 {
2873 struct target_ops *t;
2874
2875 for (t = current_target.beneath; t != NULL; t = t->beneath)
2876 {
2877 if (t->to_mourn_inferior != NULL)
2878 {
2879 t->to_mourn_inferior (t);
2880 if (targetdebug)
2881 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2882
2883 /* We no longer need to keep handles on any of the object files.
2884 Make sure to release them to avoid unnecessarily locking any
2885 of them while we're not actually debugging. */
2886 bfd_cache_close_all ();
2887
2888 return;
2889 }
2890 }
2891
2892 internal_error (__FILE__, __LINE__,
2893 _("could not find a target to follow mourn inferior"));
2894 }
2895
2896 /* Look for a target which can describe architectural features, starting
2897 from TARGET. If we find one, return its description. */
2898
2899 const struct target_desc *
2900 target_read_description (struct target_ops *target)
2901 {
2902 struct target_ops *t;
2903
2904 for (t = target; t != NULL; t = t->beneath)
2905 if (t->to_read_description != NULL)
2906 {
2907 const struct target_desc *tdesc;
2908
2909 tdesc = t->to_read_description (t);
2910 if (tdesc)
2911 return tdesc;
2912 }
2913
2914 return NULL;
2915 }
2916
2917 /* The default implementation of to_search_memory.
2918 This implements a basic search of memory, reading target memory and
2919 performing the search here (as opposed to performing the search in on the
2920 target side with, for example, gdbserver). */
2921
2922 int
2923 simple_search_memory (struct target_ops *ops,
2924 CORE_ADDR start_addr, ULONGEST search_space_len,
2925 const gdb_byte *pattern, ULONGEST pattern_len,
2926 CORE_ADDR *found_addrp)
2927 {
2928 /* NOTE: also defined in find.c testcase. */
2929 #define SEARCH_CHUNK_SIZE 16000
2930 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2931 /* Buffer to hold memory contents for searching. */
2932 gdb_byte *search_buf;
2933 unsigned search_buf_size;
2934 struct cleanup *old_cleanups;
2935
2936 search_buf_size = chunk_size + pattern_len - 1;
2937
2938 /* No point in trying to allocate a buffer larger than the search space. */
2939 if (search_space_len < search_buf_size)
2940 search_buf_size = search_space_len;
2941
2942 search_buf = malloc (search_buf_size);
2943 if (search_buf == NULL)
2944 error (_("Unable to allocate memory to perform the search."));
2945 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2946
2947 /* Prime the search buffer. */
2948
2949 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2950 search_buf, start_addr, search_buf_size) != search_buf_size)
2951 {
2952 warning (_("Unable to access %s bytes of target "
2953 "memory at %s, halting search."),
2954 pulongest (search_buf_size), hex_string (start_addr));
2955 do_cleanups (old_cleanups);
2956 return -1;
2957 }
2958
2959 /* Perform the search.
2960
2961 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2962 When we've scanned N bytes we copy the trailing bytes to the start and
2963 read in another N bytes. */
2964
2965 while (search_space_len >= pattern_len)
2966 {
2967 gdb_byte *found_ptr;
2968 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2969
2970 found_ptr = memmem (search_buf, nr_search_bytes,
2971 pattern, pattern_len);
2972
2973 if (found_ptr != NULL)
2974 {
2975 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2976
2977 *found_addrp = found_addr;
2978 do_cleanups (old_cleanups);
2979 return 1;
2980 }
2981
2982 /* Not found in this chunk, skip to next chunk. */
2983
2984 /* Don't let search_space_len wrap here, it's unsigned. */
2985 if (search_space_len >= chunk_size)
2986 search_space_len -= chunk_size;
2987 else
2988 search_space_len = 0;
2989
2990 if (search_space_len >= pattern_len)
2991 {
2992 unsigned keep_len = search_buf_size - chunk_size;
2993 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2994 int nr_to_read;
2995
2996 /* Copy the trailing part of the previous iteration to the front
2997 of the buffer for the next iteration. */
2998 gdb_assert (keep_len == pattern_len - 1);
2999 memcpy (search_buf, search_buf + chunk_size, keep_len);
3000
3001 nr_to_read = min (search_space_len - keep_len, chunk_size);
3002
3003 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3004 search_buf + keep_len, read_addr,
3005 nr_to_read) != nr_to_read)
3006 {
3007 warning (_("Unable to access %s bytes of target "
3008 "memory at %s, halting search."),
3009 plongest (nr_to_read),
3010 hex_string (read_addr));
3011 do_cleanups (old_cleanups);
3012 return -1;
3013 }
3014
3015 start_addr += chunk_size;
3016 }
3017 }
3018
3019 /* Not found. */
3020
3021 do_cleanups (old_cleanups);
3022 return 0;
3023 }
3024
3025 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3026 sequence of bytes in PATTERN with length PATTERN_LEN.
3027
3028 The result is 1 if found, 0 if not found, and -1 if there was an error
3029 requiring halting of the search (e.g. memory read error).
3030 If the pattern is found the address is recorded in FOUND_ADDRP. */
3031
3032 int
3033 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3034 const gdb_byte *pattern, ULONGEST pattern_len,
3035 CORE_ADDR *found_addrp)
3036 {
3037 struct target_ops *t;
3038 int found;
3039
3040 /* We don't use INHERIT to set current_target.to_search_memory,
3041 so we have to scan the target stack and handle targetdebug
3042 ourselves. */
3043
3044 if (targetdebug)
3045 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3046 hex_string (start_addr));
3047
3048 for (t = current_target.beneath; t != NULL; t = t->beneath)
3049 if (t->to_search_memory != NULL)
3050 break;
3051
3052 if (t != NULL)
3053 {
3054 found = t->to_search_memory (t, start_addr, search_space_len,
3055 pattern, pattern_len, found_addrp);
3056 }
3057 else
3058 {
3059 /* If a special version of to_search_memory isn't available, use the
3060 simple version. */
3061 found = simple_search_memory (current_target.beneath,
3062 start_addr, search_space_len,
3063 pattern, pattern_len, found_addrp);
3064 }
3065
3066 if (targetdebug)
3067 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3068
3069 return found;
3070 }
3071
3072 /* Look through the currently pushed targets. If none of them will
3073 be able to restart the currently running process, issue an error
3074 message. */
3075
3076 void
3077 target_require_runnable (void)
3078 {
3079 struct target_ops *t;
3080
3081 for (t = target_stack; t != NULL; t = t->beneath)
3082 {
3083 /* If this target knows how to create a new program, then
3084 assume we will still be able to after killing the current
3085 one. Either killing and mourning will not pop T, or else
3086 find_default_run_target will find it again. */
3087 if (t->to_create_inferior != NULL)
3088 return;
3089
3090 /* Do not worry about thread_stratum targets that can not
3091 create inferiors. Assume they will be pushed again if
3092 necessary, and continue to the process_stratum. */
3093 if (t->to_stratum == thread_stratum
3094 || t->to_stratum == arch_stratum)
3095 continue;
3096
3097 error (_("The \"%s\" target does not support \"run\". "
3098 "Try \"help target\" or \"continue\"."),
3099 t->to_shortname);
3100 }
3101
3102 /* This function is only called if the target is running. In that
3103 case there should have been a process_stratum target and it
3104 should either know how to create inferiors, or not... */
3105 internal_error (__FILE__, __LINE__, _("No targets found"));
3106 }
3107
3108 /* Look through the list of possible targets for a target that can
3109 execute a run or attach command without any other data. This is
3110 used to locate the default process stratum.
3111
3112 If DO_MESG is not NULL, the result is always valid (error() is
3113 called for errors); else, return NULL on error. */
3114
3115 static struct target_ops *
3116 find_default_run_target (char *do_mesg)
3117 {
3118 struct target_ops **t;
3119 struct target_ops *runable = NULL;
3120 int count;
3121
3122 count = 0;
3123
3124 for (t = target_structs; t < target_structs + target_struct_size;
3125 ++t)
3126 {
3127 if ((*t)->to_can_run && target_can_run (*t))
3128 {
3129 runable = *t;
3130 ++count;
3131 }
3132 }
3133
3134 if (count != 1)
3135 {
3136 if (do_mesg)
3137 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3138 else
3139 return NULL;
3140 }
3141
3142 return runable;
3143 }
3144
3145 void
3146 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3147 {
3148 struct target_ops *t;
3149
3150 t = find_default_run_target ("attach");
3151 (t->to_attach) (t, args, from_tty);
3152 return;
3153 }
3154
3155 void
3156 find_default_create_inferior (struct target_ops *ops,
3157 char *exec_file, char *allargs, char **env,
3158 int from_tty)
3159 {
3160 struct target_ops *t;
3161
3162 t = find_default_run_target ("run");
3163 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3164 return;
3165 }
3166
3167 static int
3168 find_default_can_async_p (struct target_ops *ignore)
3169 {
3170 struct target_ops *t;
3171
3172 /* This may be called before the target is pushed on the stack;
3173 look for the default process stratum. If there's none, gdb isn't
3174 configured with a native debugger, and target remote isn't
3175 connected yet. */
3176 t = find_default_run_target (NULL);
3177 if (t && t->to_can_async_p != delegate_can_async_p)
3178 return (t->to_can_async_p) (t);
3179 return 0;
3180 }
3181
3182 static int
3183 find_default_is_async_p (struct target_ops *ignore)
3184 {
3185 struct target_ops *t;
3186
3187 /* This may be called before the target is pushed on the stack;
3188 look for the default process stratum. If there's none, gdb isn't
3189 configured with a native debugger, and target remote isn't
3190 connected yet. */
3191 t = find_default_run_target (NULL);
3192 if (t && t->to_is_async_p != delegate_is_async_p)
3193 return (t->to_is_async_p) (t);
3194 return 0;
3195 }
3196
3197 static int
3198 find_default_supports_non_stop (struct target_ops *self)
3199 {
3200 struct target_ops *t;
3201
3202 t = find_default_run_target (NULL);
3203 if (t && t->to_supports_non_stop)
3204 return (t->to_supports_non_stop) (t);
3205 return 0;
3206 }
3207
3208 int
3209 target_supports_non_stop (void)
3210 {
3211 struct target_ops *t;
3212
3213 for (t = &current_target; t != NULL; t = t->beneath)
3214 if (t->to_supports_non_stop)
3215 return t->to_supports_non_stop (t);
3216
3217 return 0;
3218 }
3219
3220 /* Implement the "info proc" command. */
3221
3222 int
3223 target_info_proc (char *args, enum info_proc_what what)
3224 {
3225 struct target_ops *t;
3226
3227 /* If we're already connected to something that can get us OS
3228 related data, use it. Otherwise, try using the native
3229 target. */
3230 if (current_target.to_stratum >= process_stratum)
3231 t = current_target.beneath;
3232 else
3233 t = find_default_run_target (NULL);
3234
3235 for (; t != NULL; t = t->beneath)
3236 {
3237 if (t->to_info_proc != NULL)
3238 {
3239 t->to_info_proc (t, args, what);
3240
3241 if (targetdebug)
3242 fprintf_unfiltered (gdb_stdlog,
3243 "target_info_proc (\"%s\", %d)\n", args, what);
3244
3245 return 1;
3246 }
3247 }
3248
3249 return 0;
3250 }
3251
3252 static int
3253 find_default_supports_disable_randomization (struct target_ops *self)
3254 {
3255 struct target_ops *t;
3256
3257 t = find_default_run_target (NULL);
3258 if (t && t->to_supports_disable_randomization)
3259 return (t->to_supports_disable_randomization) (t);
3260 return 0;
3261 }
3262
3263 int
3264 target_supports_disable_randomization (void)
3265 {
3266 struct target_ops *t;
3267
3268 for (t = &current_target; t != NULL; t = t->beneath)
3269 if (t->to_supports_disable_randomization)
3270 return t->to_supports_disable_randomization (t);
3271
3272 return 0;
3273 }
3274
3275 char *
3276 target_get_osdata (const char *type)
3277 {
3278 struct target_ops *t;
3279
3280 /* If we're already connected to something that can get us OS
3281 related data, use it. Otherwise, try using the native
3282 target. */
3283 if (current_target.to_stratum >= process_stratum)
3284 t = current_target.beneath;
3285 else
3286 t = find_default_run_target ("get OS data");
3287
3288 if (!t)
3289 return NULL;
3290
3291 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3292 }
3293
3294 /* Determine the current address space of thread PTID. */
3295
3296 struct address_space *
3297 target_thread_address_space (ptid_t ptid)
3298 {
3299 struct address_space *aspace;
3300 struct inferior *inf;
3301 struct target_ops *t;
3302
3303 for (t = current_target.beneath; t != NULL; t = t->beneath)
3304 {
3305 if (t->to_thread_address_space != NULL)
3306 {
3307 aspace = t->to_thread_address_space (t, ptid);
3308 gdb_assert (aspace);
3309
3310 if (targetdebug)
3311 fprintf_unfiltered (gdb_stdlog,
3312 "target_thread_address_space (%s) = %d\n",
3313 target_pid_to_str (ptid),
3314 address_space_num (aspace));
3315 return aspace;
3316 }
3317 }
3318
3319 /* Fall-back to the "main" address space of the inferior. */
3320 inf = find_inferior_pid (ptid_get_pid (ptid));
3321
3322 if (inf == NULL || inf->aspace == NULL)
3323 internal_error (__FILE__, __LINE__,
3324 _("Can't determine the current "
3325 "address space of thread %s\n"),
3326 target_pid_to_str (ptid));
3327
3328 return inf->aspace;
3329 }
3330
3331
3332 /* Target file operations. */
3333
3334 static struct target_ops *
3335 default_fileio_target (void)
3336 {
3337 /* If we're already connected to something that can perform
3338 file I/O, use it. Otherwise, try using the native target. */
3339 if (current_target.to_stratum >= process_stratum)
3340 return current_target.beneath;
3341 else
3342 return find_default_run_target ("file I/O");
3343 }
3344
3345 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3346 target file descriptor, or -1 if an error occurs (and set
3347 *TARGET_ERRNO). */
3348 int
3349 target_fileio_open (const char *filename, int flags, int mode,
3350 int *target_errno)
3351 {
3352 struct target_ops *t;
3353
3354 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3355 {
3356 if (t->to_fileio_open != NULL)
3357 {
3358 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3359
3360 if (targetdebug)
3361 fprintf_unfiltered (gdb_stdlog,
3362 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3363 filename, flags, mode,
3364 fd, fd != -1 ? 0 : *target_errno);
3365 return fd;
3366 }
3367 }
3368
3369 *target_errno = FILEIO_ENOSYS;
3370 return -1;
3371 }
3372
3373 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3374 Return the number of bytes written, or -1 if an error occurs
3375 (and set *TARGET_ERRNO). */
3376 int
3377 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3378 ULONGEST offset, int *target_errno)
3379 {
3380 struct target_ops *t;
3381
3382 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3383 {
3384 if (t->to_fileio_pwrite != NULL)
3385 {
3386 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3387 target_errno);
3388
3389 if (targetdebug)
3390 fprintf_unfiltered (gdb_stdlog,
3391 "target_fileio_pwrite (%d,...,%d,%s) "
3392 "= %d (%d)\n",
3393 fd, len, pulongest (offset),
3394 ret, ret != -1 ? 0 : *target_errno);
3395 return ret;
3396 }
3397 }
3398
3399 *target_errno = FILEIO_ENOSYS;
3400 return -1;
3401 }
3402
3403 /* Read up to LEN bytes FD on the target into READ_BUF.
3404 Return the number of bytes read, or -1 if an error occurs
3405 (and set *TARGET_ERRNO). */
3406 int
3407 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3408 ULONGEST offset, int *target_errno)
3409 {
3410 struct target_ops *t;
3411
3412 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3413 {
3414 if (t->to_fileio_pread != NULL)
3415 {
3416 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3417 target_errno);
3418
3419 if (targetdebug)
3420 fprintf_unfiltered (gdb_stdlog,
3421 "target_fileio_pread (%d,...,%d,%s) "
3422 "= %d (%d)\n",
3423 fd, len, pulongest (offset),
3424 ret, ret != -1 ? 0 : *target_errno);
3425 return ret;
3426 }
3427 }
3428
3429 *target_errno = FILEIO_ENOSYS;
3430 return -1;
3431 }
3432
3433 /* Close FD on the target. Return 0, or -1 if an error occurs
3434 (and set *TARGET_ERRNO). */
3435 int
3436 target_fileio_close (int fd, int *target_errno)
3437 {
3438 struct target_ops *t;
3439
3440 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3441 {
3442 if (t->to_fileio_close != NULL)
3443 {
3444 int ret = t->to_fileio_close (t, fd, target_errno);
3445
3446 if (targetdebug)
3447 fprintf_unfiltered (gdb_stdlog,
3448 "target_fileio_close (%d) = %d (%d)\n",
3449 fd, ret, ret != -1 ? 0 : *target_errno);
3450 return ret;
3451 }
3452 }
3453
3454 *target_errno = FILEIO_ENOSYS;
3455 return -1;
3456 }
3457
3458 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3459 occurs (and set *TARGET_ERRNO). */
3460 int
3461 target_fileio_unlink (const char *filename, int *target_errno)
3462 {
3463 struct target_ops *t;
3464
3465 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3466 {
3467 if (t->to_fileio_unlink != NULL)
3468 {
3469 int ret = t->to_fileio_unlink (t, filename, target_errno);
3470
3471 if (targetdebug)
3472 fprintf_unfiltered (gdb_stdlog,
3473 "target_fileio_unlink (%s) = %d (%d)\n",
3474 filename, ret, ret != -1 ? 0 : *target_errno);
3475 return ret;
3476 }
3477 }
3478
3479 *target_errno = FILEIO_ENOSYS;
3480 return -1;
3481 }
3482
3483 /* Read value of symbolic link FILENAME on the target. Return a
3484 null-terminated string allocated via xmalloc, or NULL if an error
3485 occurs (and set *TARGET_ERRNO). */
3486 char *
3487 target_fileio_readlink (const char *filename, int *target_errno)
3488 {
3489 struct target_ops *t;
3490
3491 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3492 {
3493 if (t->to_fileio_readlink != NULL)
3494 {
3495 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3496
3497 if (targetdebug)
3498 fprintf_unfiltered (gdb_stdlog,
3499 "target_fileio_readlink (%s) = %s (%d)\n",
3500 filename, ret? ret : "(nil)",
3501 ret? 0 : *target_errno);
3502 return ret;
3503 }
3504 }
3505
3506 *target_errno = FILEIO_ENOSYS;
3507 return NULL;
3508 }
3509
3510 static void
3511 target_fileio_close_cleanup (void *opaque)
3512 {
3513 int fd = *(int *) opaque;
3514 int target_errno;
3515
3516 target_fileio_close (fd, &target_errno);
3517 }
3518
3519 /* Read target file FILENAME. Store the result in *BUF_P and
3520 return the size of the transferred data. PADDING additional bytes are
3521 available in *BUF_P. This is a helper function for
3522 target_fileio_read_alloc; see the declaration of that function for more
3523 information. */
3524
3525 static LONGEST
3526 target_fileio_read_alloc_1 (const char *filename,
3527 gdb_byte **buf_p, int padding)
3528 {
3529 struct cleanup *close_cleanup;
3530 size_t buf_alloc, buf_pos;
3531 gdb_byte *buf;
3532 LONGEST n;
3533 int fd;
3534 int target_errno;
3535
3536 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3537 if (fd == -1)
3538 return -1;
3539
3540 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3541
3542 /* Start by reading up to 4K at a time. The target will throttle
3543 this number down if necessary. */
3544 buf_alloc = 4096;
3545 buf = xmalloc (buf_alloc);
3546 buf_pos = 0;
3547 while (1)
3548 {
3549 n = target_fileio_pread (fd, &buf[buf_pos],
3550 buf_alloc - buf_pos - padding, buf_pos,
3551 &target_errno);
3552 if (n < 0)
3553 {
3554 /* An error occurred. */
3555 do_cleanups (close_cleanup);
3556 xfree (buf);
3557 return -1;
3558 }
3559 else if (n == 0)
3560 {
3561 /* Read all there was. */
3562 do_cleanups (close_cleanup);
3563 if (buf_pos == 0)
3564 xfree (buf);
3565 else
3566 *buf_p = buf;
3567 return buf_pos;
3568 }
3569
3570 buf_pos += n;
3571
3572 /* If the buffer is filling up, expand it. */
3573 if (buf_alloc < buf_pos * 2)
3574 {
3575 buf_alloc *= 2;
3576 buf = xrealloc (buf, buf_alloc);
3577 }
3578
3579 QUIT;
3580 }
3581 }
3582
3583 /* Read target file FILENAME. Store the result in *BUF_P and return
3584 the size of the transferred data. See the declaration in "target.h"
3585 function for more information about the return value. */
3586
3587 LONGEST
3588 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3589 {
3590 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3591 }
3592
3593 /* Read target file FILENAME. The result is NUL-terminated and
3594 returned as a string, allocated using xmalloc. If an error occurs
3595 or the transfer is unsupported, NULL is returned. Empty objects
3596 are returned as allocated but empty strings. A warning is issued
3597 if the result contains any embedded NUL bytes. */
3598
3599 char *
3600 target_fileio_read_stralloc (const char *filename)
3601 {
3602 gdb_byte *buffer;
3603 char *bufstr;
3604 LONGEST i, transferred;
3605
3606 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3607 bufstr = (char *) buffer;
3608
3609 if (transferred < 0)
3610 return NULL;
3611
3612 if (transferred == 0)
3613 return xstrdup ("");
3614
3615 bufstr[transferred] = 0;
3616
3617 /* Check for embedded NUL bytes; but allow trailing NULs. */
3618 for (i = strlen (bufstr); i < transferred; i++)
3619 if (bufstr[i] != 0)
3620 {
3621 warning (_("target file %s "
3622 "contained unexpected null characters"),
3623 filename);
3624 break;
3625 }
3626
3627 return bufstr;
3628 }
3629
3630
3631 static int
3632 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3633 CORE_ADDR addr, int len)
3634 {
3635 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3636 }
3637
3638 static int
3639 default_watchpoint_addr_within_range (struct target_ops *target,
3640 CORE_ADDR addr,
3641 CORE_ADDR start, int length)
3642 {
3643 return addr >= start && addr < start + length;
3644 }
3645
3646 static struct gdbarch *
3647 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3648 {
3649 return target_gdbarch ();
3650 }
3651
3652 static int
3653 return_zero (void)
3654 {
3655 return 0;
3656 }
3657
3658 static int
3659 return_one (void)
3660 {
3661 return 1;
3662 }
3663
3664 static int
3665 return_minus_one (void)
3666 {
3667 return -1;
3668 }
3669
3670 static void *
3671 return_null (void)
3672 {
3673 return 0;
3674 }
3675
3676 /*
3677 * Find the next target down the stack from the specified target.
3678 */
3679
3680 struct target_ops *
3681 find_target_beneath (struct target_ops *t)
3682 {
3683 return t->beneath;
3684 }
3685
3686 /* See target.h. */
3687
3688 struct target_ops *
3689 find_target_at (enum strata stratum)
3690 {
3691 struct target_ops *t;
3692
3693 for (t = current_target.beneath; t != NULL; t = t->beneath)
3694 if (t->to_stratum == stratum)
3695 return t;
3696
3697 return NULL;
3698 }
3699
3700 \f
3701 /* The inferior process has died. Long live the inferior! */
3702
3703 void
3704 generic_mourn_inferior (void)
3705 {
3706 ptid_t ptid;
3707
3708 ptid = inferior_ptid;
3709 inferior_ptid = null_ptid;
3710
3711 /* Mark breakpoints uninserted in case something tries to delete a
3712 breakpoint while we delete the inferior's threads (which would
3713 fail, since the inferior is long gone). */
3714 mark_breakpoints_out ();
3715
3716 if (!ptid_equal (ptid, null_ptid))
3717 {
3718 int pid = ptid_get_pid (ptid);
3719 exit_inferior (pid);
3720 }
3721
3722 /* Note this wipes step-resume breakpoints, so needs to be done
3723 after exit_inferior, which ends up referencing the step-resume
3724 breakpoints through clear_thread_inferior_resources. */
3725 breakpoint_init_inferior (inf_exited);
3726
3727 registers_changed ();
3728
3729 reopen_exec_file ();
3730 reinit_frame_cache ();
3731
3732 if (deprecated_detach_hook)
3733 deprecated_detach_hook ();
3734 }
3735 \f
3736 /* Convert a normal process ID to a string. Returns the string in a
3737 static buffer. */
3738
3739 char *
3740 normal_pid_to_str (ptid_t ptid)
3741 {
3742 static char buf[32];
3743
3744 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3745 return buf;
3746 }
3747
3748 static char *
3749 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3750 {
3751 return normal_pid_to_str (ptid);
3752 }
3753
3754 /* Error-catcher for target_find_memory_regions. */
3755 static int
3756 dummy_find_memory_regions (struct target_ops *self,
3757 find_memory_region_ftype ignore1, void *ignore2)
3758 {
3759 error (_("Command not implemented for this target."));
3760 return 0;
3761 }
3762
3763 /* Error-catcher for target_make_corefile_notes. */
3764 static char *
3765 dummy_make_corefile_notes (struct target_ops *self,
3766 bfd *ignore1, int *ignore2)
3767 {
3768 error (_("Command not implemented for this target."));
3769 return NULL;
3770 }
3771
3772 /* Error-catcher for target_get_bookmark. */
3773 static gdb_byte *
3774 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3775 {
3776 tcomplain ();
3777 return NULL;
3778 }
3779
3780 /* Error-catcher for target_goto_bookmark. */
3781 static void
3782 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3783 {
3784 tcomplain ();
3785 }
3786
3787 /* Set up the handful of non-empty slots needed by the dummy target
3788 vector. */
3789
3790 static void
3791 init_dummy_target (void)
3792 {
3793 dummy_target.to_shortname = "None";
3794 dummy_target.to_longname = "None";
3795 dummy_target.to_doc = "";
3796 dummy_target.to_create_inferior = find_default_create_inferior;
3797 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3798 dummy_target.to_supports_disable_randomization
3799 = find_default_supports_disable_randomization;
3800 dummy_target.to_pid_to_str = dummy_pid_to_str;
3801 dummy_target.to_stratum = dummy_stratum;
3802 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3803 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3804 dummy_target.to_get_bookmark = dummy_get_bookmark;
3805 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3806 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3807 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3808 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3809 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3810 dummy_target.to_has_execution
3811 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3812 dummy_target.to_magic = OPS_MAGIC;
3813
3814 install_dummy_methods (&dummy_target);
3815 }
3816 \f
3817 static void
3818 debug_to_open (char *args, int from_tty)
3819 {
3820 debug_target.to_open (args, from_tty);
3821
3822 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3823 }
3824
3825 void
3826 target_close (struct target_ops *targ)
3827 {
3828 gdb_assert (!target_is_pushed (targ));
3829
3830 if (targ->to_xclose != NULL)
3831 targ->to_xclose (targ);
3832 else if (targ->to_close != NULL)
3833 targ->to_close (targ);
3834
3835 if (targetdebug)
3836 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3837 }
3838
3839 void
3840 target_attach (char *args, int from_tty)
3841 {
3842 current_target.to_attach (&current_target, args, from_tty);
3843 if (targetdebug)
3844 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3845 args, from_tty);
3846 }
3847
3848 int
3849 target_thread_alive (ptid_t ptid)
3850 {
3851 struct target_ops *t;
3852
3853 for (t = current_target.beneath; t != NULL; t = t->beneath)
3854 {
3855 if (t->to_thread_alive != NULL)
3856 {
3857 int retval;
3858
3859 retval = t->to_thread_alive (t, ptid);
3860 if (targetdebug)
3861 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3862 ptid_get_pid (ptid), retval);
3863
3864 return retval;
3865 }
3866 }
3867
3868 return 0;
3869 }
3870
3871 void
3872 target_find_new_threads (void)
3873 {
3874 struct target_ops *t;
3875
3876 for (t = current_target.beneath; t != NULL; t = t->beneath)
3877 {
3878 if (t->to_find_new_threads != NULL)
3879 {
3880 t->to_find_new_threads (t);
3881 if (targetdebug)
3882 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3883
3884 return;
3885 }
3886 }
3887 }
3888
3889 void
3890 target_stop (ptid_t ptid)
3891 {
3892 if (!may_stop)
3893 {
3894 warning (_("May not interrupt or stop the target, ignoring attempt"));
3895 return;
3896 }
3897
3898 (*current_target.to_stop) (&current_target, ptid);
3899 }
3900
3901 static void
3902 debug_to_post_attach (struct target_ops *self, int pid)
3903 {
3904 debug_target.to_post_attach (&debug_target, pid);
3905
3906 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3907 }
3908
3909 /* Concatenate ELEM to LIST, a comma separate list, and return the
3910 result. The LIST incoming argument is released. */
3911
3912 static char *
3913 str_comma_list_concat_elem (char *list, const char *elem)
3914 {
3915 if (list == NULL)
3916 return xstrdup (elem);
3917 else
3918 return reconcat (list, list, ", ", elem, (char *) NULL);
3919 }
3920
3921 /* Helper for target_options_to_string. If OPT is present in
3922 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3923 Returns the new resulting string. OPT is removed from
3924 TARGET_OPTIONS. */
3925
3926 static char *
3927 do_option (int *target_options, char *ret,
3928 int opt, char *opt_str)
3929 {
3930 if ((*target_options & opt) != 0)
3931 {
3932 ret = str_comma_list_concat_elem (ret, opt_str);
3933 *target_options &= ~opt;
3934 }
3935
3936 return ret;
3937 }
3938
3939 char *
3940 target_options_to_string (int target_options)
3941 {
3942 char *ret = NULL;
3943
3944 #define DO_TARG_OPTION(OPT) \
3945 ret = do_option (&target_options, ret, OPT, #OPT)
3946
3947 DO_TARG_OPTION (TARGET_WNOHANG);
3948
3949 if (target_options != 0)
3950 ret = str_comma_list_concat_elem (ret, "unknown???");
3951
3952 if (ret == NULL)
3953 ret = xstrdup ("");
3954 return ret;
3955 }
3956
3957 static void
3958 debug_print_register (const char * func,
3959 struct regcache *regcache, int regno)
3960 {
3961 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3962
3963 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3964 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3965 && gdbarch_register_name (gdbarch, regno) != NULL
3966 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3967 fprintf_unfiltered (gdb_stdlog, "(%s)",
3968 gdbarch_register_name (gdbarch, regno));
3969 else
3970 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3971 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3972 {
3973 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3974 int i, size = register_size (gdbarch, regno);
3975 gdb_byte buf[MAX_REGISTER_SIZE];
3976
3977 regcache_raw_collect (regcache, regno, buf);
3978 fprintf_unfiltered (gdb_stdlog, " = ");
3979 for (i = 0; i < size; i++)
3980 {
3981 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3982 }
3983 if (size <= sizeof (LONGEST))
3984 {
3985 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3986
3987 fprintf_unfiltered (gdb_stdlog, " %s %s",
3988 core_addr_to_string_nz (val), plongest (val));
3989 }
3990 }
3991 fprintf_unfiltered (gdb_stdlog, "\n");
3992 }
3993
3994 void
3995 target_fetch_registers (struct regcache *regcache, int regno)
3996 {
3997 struct target_ops *t;
3998
3999 for (t = current_target.beneath; t != NULL; t = t->beneath)
4000 {
4001 if (t->to_fetch_registers != NULL)
4002 {
4003 t->to_fetch_registers (t, regcache, regno);
4004 if (targetdebug)
4005 debug_print_register ("target_fetch_registers", regcache, regno);
4006 return;
4007 }
4008 }
4009 }
4010
4011 void
4012 target_store_registers (struct regcache *regcache, int regno)
4013 {
4014 struct target_ops *t;
4015
4016 if (!may_write_registers)
4017 error (_("Writing to registers is not allowed (regno %d)"), regno);
4018
4019 current_target.to_store_registers (&current_target, regcache, regno);
4020 if (targetdebug)
4021 {
4022 debug_print_register ("target_store_registers", regcache, regno);
4023 }
4024 }
4025
4026 int
4027 target_core_of_thread (ptid_t ptid)
4028 {
4029 struct target_ops *t;
4030
4031 for (t = current_target.beneath; t != NULL; t = t->beneath)
4032 {
4033 if (t->to_core_of_thread != NULL)
4034 {
4035 int retval = t->to_core_of_thread (t, ptid);
4036
4037 if (targetdebug)
4038 fprintf_unfiltered (gdb_stdlog,
4039 "target_core_of_thread (%d) = %d\n",
4040 ptid_get_pid (ptid), retval);
4041 return retval;
4042 }
4043 }
4044
4045 return -1;
4046 }
4047
4048 int
4049 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4050 {
4051 struct target_ops *t;
4052
4053 for (t = current_target.beneath; t != NULL; t = t->beneath)
4054 {
4055 if (t->to_verify_memory != NULL)
4056 {
4057 int retval = t->to_verify_memory (t, data, memaddr, size);
4058
4059 if (targetdebug)
4060 fprintf_unfiltered (gdb_stdlog,
4061 "target_verify_memory (%s, %s) = %d\n",
4062 paddress (target_gdbarch (), memaddr),
4063 pulongest (size),
4064 retval);
4065 return retval;
4066 }
4067 }
4068
4069 tcomplain ();
4070 }
4071
4072 /* The documentation for this function is in its prototype declaration in
4073 target.h. */
4074
4075 int
4076 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4077 {
4078 struct target_ops *t;
4079
4080 for (t = current_target.beneath; t != NULL; t = t->beneath)
4081 if (t->to_insert_mask_watchpoint != NULL)
4082 {
4083 int ret;
4084
4085 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4086
4087 if (targetdebug)
4088 fprintf_unfiltered (gdb_stdlog, "\
4089 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4090 core_addr_to_string (addr),
4091 core_addr_to_string (mask), rw, ret);
4092
4093 return ret;
4094 }
4095
4096 return 1;
4097 }
4098
4099 /* The documentation for this function is in its prototype declaration in
4100 target.h. */
4101
4102 int
4103 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4104 {
4105 struct target_ops *t;
4106
4107 for (t = current_target.beneath; t != NULL; t = t->beneath)
4108 if (t->to_remove_mask_watchpoint != NULL)
4109 {
4110 int ret;
4111
4112 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4113
4114 if (targetdebug)
4115 fprintf_unfiltered (gdb_stdlog, "\
4116 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4117 core_addr_to_string (addr),
4118 core_addr_to_string (mask), rw, ret);
4119
4120 return ret;
4121 }
4122
4123 return 1;
4124 }
4125
4126 /* The documentation for this function is in its prototype declaration
4127 in target.h. */
4128
4129 int
4130 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4131 {
4132 struct target_ops *t;
4133
4134 for (t = current_target.beneath; t != NULL; t = t->beneath)
4135 if (t->to_masked_watch_num_registers != NULL)
4136 return t->to_masked_watch_num_registers (t, addr, mask);
4137
4138 return -1;
4139 }
4140
4141 /* The documentation for this function is in its prototype declaration
4142 in target.h. */
4143
4144 int
4145 target_ranged_break_num_registers (void)
4146 {
4147 struct target_ops *t;
4148
4149 for (t = current_target.beneath; t != NULL; t = t->beneath)
4150 if (t->to_ranged_break_num_registers != NULL)
4151 return t->to_ranged_break_num_registers (t);
4152
4153 return -1;
4154 }
4155
4156 /* See target.h. */
4157
4158 struct btrace_target_info *
4159 target_enable_btrace (ptid_t ptid)
4160 {
4161 struct target_ops *t;
4162
4163 for (t = current_target.beneath; t != NULL; t = t->beneath)
4164 if (t->to_enable_btrace != NULL)
4165 return t->to_enable_btrace (t, ptid);
4166
4167 tcomplain ();
4168 return NULL;
4169 }
4170
4171 /* See target.h. */
4172
4173 void
4174 target_disable_btrace (struct btrace_target_info *btinfo)
4175 {
4176 struct target_ops *t;
4177
4178 for (t = current_target.beneath; t != NULL; t = t->beneath)
4179 if (t->to_disable_btrace != NULL)
4180 {
4181 t->to_disable_btrace (t, btinfo);
4182 return;
4183 }
4184
4185 tcomplain ();
4186 }
4187
4188 /* See target.h. */
4189
4190 void
4191 target_teardown_btrace (struct btrace_target_info *btinfo)
4192 {
4193 struct target_ops *t;
4194
4195 for (t = current_target.beneath; t != NULL; t = t->beneath)
4196 if (t->to_teardown_btrace != NULL)
4197 {
4198 t->to_teardown_btrace (t, btinfo);
4199 return;
4200 }
4201
4202 tcomplain ();
4203 }
4204
4205 /* See target.h. */
4206
4207 enum btrace_error
4208 target_read_btrace (VEC (btrace_block_s) **btrace,
4209 struct btrace_target_info *btinfo,
4210 enum btrace_read_type type)
4211 {
4212 struct target_ops *t;
4213
4214 for (t = current_target.beneath; t != NULL; t = t->beneath)
4215 if (t->to_read_btrace != NULL)
4216 return t->to_read_btrace (t, btrace, btinfo, type);
4217
4218 tcomplain ();
4219 return BTRACE_ERR_NOT_SUPPORTED;
4220 }
4221
4222 /* See target.h. */
4223
4224 void
4225 target_stop_recording (void)
4226 {
4227 struct target_ops *t;
4228
4229 for (t = current_target.beneath; t != NULL; t = t->beneath)
4230 if (t->to_stop_recording != NULL)
4231 {
4232 t->to_stop_recording (t);
4233 return;
4234 }
4235
4236 /* This is optional. */
4237 }
4238
4239 /* See target.h. */
4240
4241 void
4242 target_info_record (void)
4243 {
4244 struct target_ops *t;
4245
4246 for (t = current_target.beneath; t != NULL; t = t->beneath)
4247 if (t->to_info_record != NULL)
4248 {
4249 t->to_info_record (t);
4250 return;
4251 }
4252
4253 tcomplain ();
4254 }
4255
4256 /* See target.h. */
4257
4258 void
4259 target_save_record (const char *filename)
4260 {
4261 struct target_ops *t;
4262
4263 for (t = current_target.beneath; t != NULL; t = t->beneath)
4264 if (t->to_save_record != NULL)
4265 {
4266 t->to_save_record (t, filename);
4267 return;
4268 }
4269
4270 tcomplain ();
4271 }
4272
4273 /* See target.h. */
4274
4275 int
4276 target_supports_delete_record (void)
4277 {
4278 struct target_ops *t;
4279
4280 for (t = current_target.beneath; t != NULL; t = t->beneath)
4281 if (t->to_delete_record != NULL)
4282 return 1;
4283
4284 return 0;
4285 }
4286
4287 /* See target.h. */
4288
4289 void
4290 target_delete_record (void)
4291 {
4292 struct target_ops *t;
4293
4294 for (t = current_target.beneath; t != NULL; t = t->beneath)
4295 if (t->to_delete_record != NULL)
4296 {
4297 t->to_delete_record (t);
4298 return;
4299 }
4300
4301 tcomplain ();
4302 }
4303
4304 /* See target.h. */
4305
4306 int
4307 target_record_is_replaying (void)
4308 {
4309 struct target_ops *t;
4310
4311 for (t = current_target.beneath; t != NULL; t = t->beneath)
4312 if (t->to_record_is_replaying != NULL)
4313 return t->to_record_is_replaying (t);
4314
4315 return 0;
4316 }
4317
4318 /* See target.h. */
4319
4320 void
4321 target_goto_record_begin (void)
4322 {
4323 struct target_ops *t;
4324
4325 for (t = current_target.beneath; t != NULL; t = t->beneath)
4326 if (t->to_goto_record_begin != NULL)
4327 {
4328 t->to_goto_record_begin (t);
4329 return;
4330 }
4331
4332 tcomplain ();
4333 }
4334
4335 /* See target.h. */
4336
4337 void
4338 target_goto_record_end (void)
4339 {
4340 struct target_ops *t;
4341
4342 for (t = current_target.beneath; t != NULL; t = t->beneath)
4343 if (t->to_goto_record_end != NULL)
4344 {
4345 t->to_goto_record_end (t);
4346 return;
4347 }
4348
4349 tcomplain ();
4350 }
4351
4352 /* See target.h. */
4353
4354 void
4355 target_goto_record (ULONGEST insn)
4356 {
4357 struct target_ops *t;
4358
4359 for (t = current_target.beneath; t != NULL; t = t->beneath)
4360 if (t->to_goto_record != NULL)
4361 {
4362 t->to_goto_record (t, insn);
4363 return;
4364 }
4365
4366 tcomplain ();
4367 }
4368
4369 /* See target.h. */
4370
4371 void
4372 target_insn_history (int size, int flags)
4373 {
4374 struct target_ops *t;
4375
4376 for (t = current_target.beneath; t != NULL; t = t->beneath)
4377 if (t->to_insn_history != NULL)
4378 {
4379 t->to_insn_history (t, size, flags);
4380 return;
4381 }
4382
4383 tcomplain ();
4384 }
4385
4386 /* See target.h. */
4387
4388 void
4389 target_insn_history_from (ULONGEST from, int size, int flags)
4390 {
4391 struct target_ops *t;
4392
4393 for (t = current_target.beneath; t != NULL; t = t->beneath)
4394 if (t->to_insn_history_from != NULL)
4395 {
4396 t->to_insn_history_from (t, from, size, flags);
4397 return;
4398 }
4399
4400 tcomplain ();
4401 }
4402
4403 /* See target.h. */
4404
4405 void
4406 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4407 {
4408 struct target_ops *t;
4409
4410 for (t = current_target.beneath; t != NULL; t = t->beneath)
4411 if (t->to_insn_history_range != NULL)
4412 {
4413 t->to_insn_history_range (t, begin, end, flags);
4414 return;
4415 }
4416
4417 tcomplain ();
4418 }
4419
4420 /* See target.h. */
4421
4422 void
4423 target_call_history (int size, int flags)
4424 {
4425 struct target_ops *t;
4426
4427 for (t = current_target.beneath; t != NULL; t = t->beneath)
4428 if (t->to_call_history != NULL)
4429 {
4430 t->to_call_history (t, size, flags);
4431 return;
4432 }
4433
4434 tcomplain ();
4435 }
4436
4437 /* See target.h. */
4438
4439 void
4440 target_call_history_from (ULONGEST begin, int size, int flags)
4441 {
4442 struct target_ops *t;
4443
4444 for (t = current_target.beneath; t != NULL; t = t->beneath)
4445 if (t->to_call_history_from != NULL)
4446 {
4447 t->to_call_history_from (t, begin, size, flags);
4448 return;
4449 }
4450
4451 tcomplain ();
4452 }
4453
4454 /* See target.h. */
4455
4456 void
4457 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4458 {
4459 struct target_ops *t;
4460
4461 for (t = current_target.beneath; t != NULL; t = t->beneath)
4462 if (t->to_call_history_range != NULL)
4463 {
4464 t->to_call_history_range (t, begin, end, flags);
4465 return;
4466 }
4467
4468 tcomplain ();
4469 }
4470
4471 static void
4472 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4473 {
4474 debug_target.to_prepare_to_store (&debug_target, regcache);
4475
4476 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4477 }
4478
4479 /* See target.h. */
4480
4481 const struct frame_unwind *
4482 target_get_unwinder (void)
4483 {
4484 struct target_ops *t;
4485
4486 for (t = current_target.beneath; t != NULL; t = t->beneath)
4487 if (t->to_get_unwinder != NULL)
4488 return t->to_get_unwinder;
4489
4490 return NULL;
4491 }
4492
4493 /* See target.h. */
4494
4495 const struct frame_unwind *
4496 target_get_tailcall_unwinder (void)
4497 {
4498 struct target_ops *t;
4499
4500 for (t = current_target.beneath; t != NULL; t = t->beneath)
4501 if (t->to_get_tailcall_unwinder != NULL)
4502 return t->to_get_tailcall_unwinder;
4503
4504 return NULL;
4505 }
4506
4507 /* See target.h. */
4508
4509 CORE_ADDR
4510 forward_target_decr_pc_after_break (struct target_ops *ops,
4511 struct gdbarch *gdbarch)
4512 {
4513 for (; ops != NULL; ops = ops->beneath)
4514 if (ops->to_decr_pc_after_break != NULL)
4515 return ops->to_decr_pc_after_break (ops, gdbarch);
4516
4517 return gdbarch_decr_pc_after_break (gdbarch);
4518 }
4519
4520 /* See target.h. */
4521
4522 CORE_ADDR
4523 target_decr_pc_after_break (struct gdbarch *gdbarch)
4524 {
4525 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4526 }
4527
4528 static int
4529 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4530 int write, struct mem_attrib *attrib,
4531 struct target_ops *target)
4532 {
4533 int retval;
4534
4535 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4536 attrib, target);
4537
4538 fprintf_unfiltered (gdb_stdlog,
4539 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4540 paddress (target_gdbarch (), memaddr), len,
4541 write ? "write" : "read", retval);
4542
4543 if (retval > 0)
4544 {
4545 int i;
4546
4547 fputs_unfiltered (", bytes =", gdb_stdlog);
4548 for (i = 0; i < retval; i++)
4549 {
4550 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4551 {
4552 if (targetdebug < 2 && i > 0)
4553 {
4554 fprintf_unfiltered (gdb_stdlog, " ...");
4555 break;
4556 }
4557 fprintf_unfiltered (gdb_stdlog, "\n");
4558 }
4559
4560 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4561 }
4562 }
4563
4564 fputc_unfiltered ('\n', gdb_stdlog);
4565
4566 return retval;
4567 }
4568
4569 static void
4570 debug_to_files_info (struct target_ops *target)
4571 {
4572 debug_target.to_files_info (target);
4573
4574 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4575 }
4576
4577 static int
4578 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4579 struct bp_target_info *bp_tgt)
4580 {
4581 int retval;
4582
4583 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4584
4585 fprintf_unfiltered (gdb_stdlog,
4586 "target_insert_breakpoint (%s, xxx) = %ld\n",
4587 core_addr_to_string (bp_tgt->placed_address),
4588 (unsigned long) retval);
4589 return retval;
4590 }
4591
4592 static int
4593 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4594 struct bp_target_info *bp_tgt)
4595 {
4596 int retval;
4597
4598 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4599
4600 fprintf_unfiltered (gdb_stdlog,
4601 "target_remove_breakpoint (%s, xxx) = %ld\n",
4602 core_addr_to_string (bp_tgt->placed_address),
4603 (unsigned long) retval);
4604 return retval;
4605 }
4606
4607 static int
4608 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4609 int type, int cnt, int from_tty)
4610 {
4611 int retval;
4612
4613 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4614 type, cnt, from_tty);
4615
4616 fprintf_unfiltered (gdb_stdlog,
4617 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4618 (unsigned long) type,
4619 (unsigned long) cnt,
4620 (unsigned long) from_tty,
4621 (unsigned long) retval);
4622 return retval;
4623 }
4624
4625 static int
4626 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4627 CORE_ADDR addr, int len)
4628 {
4629 CORE_ADDR retval;
4630
4631 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4632 addr, len);
4633
4634 fprintf_unfiltered (gdb_stdlog,
4635 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4636 core_addr_to_string (addr), (unsigned long) len,
4637 core_addr_to_string (retval));
4638 return retval;
4639 }
4640
4641 static int
4642 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4643 CORE_ADDR addr, int len, int rw,
4644 struct expression *cond)
4645 {
4646 int retval;
4647
4648 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4649 addr, len,
4650 rw, cond);
4651
4652 fprintf_unfiltered (gdb_stdlog,
4653 "target_can_accel_watchpoint_condition "
4654 "(%s, %d, %d, %s) = %ld\n",
4655 core_addr_to_string (addr), len, rw,
4656 host_address_to_string (cond), (unsigned long) retval);
4657 return retval;
4658 }
4659
4660 static int
4661 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4662 {
4663 int retval;
4664
4665 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4666
4667 fprintf_unfiltered (gdb_stdlog,
4668 "target_stopped_by_watchpoint () = %ld\n",
4669 (unsigned long) retval);
4670 return retval;
4671 }
4672
4673 static int
4674 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4675 {
4676 int retval;
4677
4678 retval = debug_target.to_stopped_data_address (target, addr);
4679
4680 fprintf_unfiltered (gdb_stdlog,
4681 "target_stopped_data_address ([%s]) = %ld\n",
4682 core_addr_to_string (*addr),
4683 (unsigned long)retval);
4684 return retval;
4685 }
4686
4687 static int
4688 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4689 CORE_ADDR addr,
4690 CORE_ADDR start, int length)
4691 {
4692 int retval;
4693
4694 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4695 start, length);
4696
4697 fprintf_filtered (gdb_stdlog,
4698 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4699 core_addr_to_string (addr), core_addr_to_string (start),
4700 length, retval);
4701 return retval;
4702 }
4703
4704 static int
4705 debug_to_insert_hw_breakpoint (struct target_ops *self,
4706 struct gdbarch *gdbarch,
4707 struct bp_target_info *bp_tgt)
4708 {
4709 int retval;
4710
4711 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4712 gdbarch, bp_tgt);
4713
4714 fprintf_unfiltered (gdb_stdlog,
4715 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4716 core_addr_to_string (bp_tgt->placed_address),
4717 (unsigned long) retval);
4718 return retval;
4719 }
4720
4721 static int
4722 debug_to_remove_hw_breakpoint (struct target_ops *self,
4723 struct gdbarch *gdbarch,
4724 struct bp_target_info *bp_tgt)
4725 {
4726 int retval;
4727
4728 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4729 gdbarch, bp_tgt);
4730
4731 fprintf_unfiltered (gdb_stdlog,
4732 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4733 core_addr_to_string (bp_tgt->placed_address),
4734 (unsigned long) retval);
4735 return retval;
4736 }
4737
4738 static int
4739 debug_to_insert_watchpoint (struct target_ops *self,
4740 CORE_ADDR addr, int len, int type,
4741 struct expression *cond)
4742 {
4743 int retval;
4744
4745 retval = debug_target.to_insert_watchpoint (&debug_target,
4746 addr, len, type, cond);
4747
4748 fprintf_unfiltered (gdb_stdlog,
4749 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4750 core_addr_to_string (addr), len, type,
4751 host_address_to_string (cond), (unsigned long) retval);
4752 return retval;
4753 }
4754
4755 static int
4756 debug_to_remove_watchpoint (struct target_ops *self,
4757 CORE_ADDR addr, int len, int type,
4758 struct expression *cond)
4759 {
4760 int retval;
4761
4762 retval = debug_target.to_remove_watchpoint (&debug_target,
4763 addr, len, type, cond);
4764
4765 fprintf_unfiltered (gdb_stdlog,
4766 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4767 core_addr_to_string (addr), len, type,
4768 host_address_to_string (cond), (unsigned long) retval);
4769 return retval;
4770 }
4771
4772 static void
4773 debug_to_terminal_init (struct target_ops *self)
4774 {
4775 debug_target.to_terminal_init (&debug_target);
4776
4777 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4778 }
4779
4780 static void
4781 debug_to_terminal_inferior (struct target_ops *self)
4782 {
4783 debug_target.to_terminal_inferior (&debug_target);
4784
4785 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4786 }
4787
4788 static void
4789 debug_to_terminal_ours_for_output (struct target_ops *self)
4790 {
4791 debug_target.to_terminal_ours_for_output (&debug_target);
4792
4793 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4794 }
4795
4796 static void
4797 debug_to_terminal_ours (struct target_ops *self)
4798 {
4799 debug_target.to_terminal_ours (&debug_target);
4800
4801 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4802 }
4803
4804 static void
4805 debug_to_terminal_save_ours (struct target_ops *self)
4806 {
4807 debug_target.to_terminal_save_ours (&debug_target);
4808
4809 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4810 }
4811
4812 static void
4813 debug_to_terminal_info (struct target_ops *self,
4814 const char *arg, int from_tty)
4815 {
4816 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4819 from_tty);
4820 }
4821
4822 static void
4823 debug_to_load (struct target_ops *self, char *args, int from_tty)
4824 {
4825 debug_target.to_load (&debug_target, args, from_tty);
4826
4827 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4828 }
4829
4830 static void
4831 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4832 {
4833 debug_target.to_post_startup_inferior (&debug_target, ptid);
4834
4835 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4836 ptid_get_pid (ptid));
4837 }
4838
4839 static int
4840 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4841 {
4842 int retval;
4843
4844 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4847 pid, retval);
4848
4849 return retval;
4850 }
4851
4852 static int
4853 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4854 {
4855 int retval;
4856
4857 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4860 pid, retval);
4861
4862 return retval;
4863 }
4864
4865 static int
4866 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4867 {
4868 int retval;
4869
4870 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4871
4872 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4873 pid, retval);
4874
4875 return retval;
4876 }
4877
4878 static int
4879 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4880 {
4881 int retval;
4882
4883 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4884
4885 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4886 pid, retval);
4887
4888 return retval;
4889 }
4890
4891 static int
4892 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4893 {
4894 int retval;
4895
4896 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4897
4898 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4899 pid, retval);
4900
4901 return retval;
4902 }
4903
4904 static int
4905 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4906 {
4907 int retval;
4908
4909 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4912 pid, retval);
4913
4914 return retval;
4915 }
4916
4917 static int
4918 debug_to_has_exited (struct target_ops *self,
4919 int pid, int wait_status, int *exit_status)
4920 {
4921 int has_exited;
4922
4923 has_exited = debug_target.to_has_exited (&debug_target,
4924 pid, wait_status, exit_status);
4925
4926 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4927 pid, wait_status, *exit_status, has_exited);
4928
4929 return has_exited;
4930 }
4931
4932 static int
4933 debug_to_can_run (struct target_ops *self)
4934 {
4935 int retval;
4936
4937 retval = debug_target.to_can_run (&debug_target);
4938
4939 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4940
4941 return retval;
4942 }
4943
4944 static struct gdbarch *
4945 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4946 {
4947 struct gdbarch *retval;
4948
4949 retval = debug_target.to_thread_architecture (ops, ptid);
4950
4951 fprintf_unfiltered (gdb_stdlog,
4952 "target_thread_architecture (%s) = %s [%s]\n",
4953 target_pid_to_str (ptid),
4954 host_address_to_string (retval),
4955 gdbarch_bfd_arch_info (retval)->printable_name);
4956 return retval;
4957 }
4958
4959 static void
4960 debug_to_stop (struct target_ops *self, ptid_t ptid)
4961 {
4962 debug_target.to_stop (&debug_target, ptid);
4963
4964 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4965 target_pid_to_str (ptid));
4966 }
4967
4968 static void
4969 debug_to_rcmd (struct target_ops *self, char *command,
4970 struct ui_file *outbuf)
4971 {
4972 debug_target.to_rcmd (&debug_target, command, outbuf);
4973 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4974 }
4975
4976 static char *
4977 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4978 {
4979 char *exec_file;
4980
4981 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4982
4983 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4984 pid, exec_file);
4985
4986 return exec_file;
4987 }
4988
4989 static void
4990 setup_target_debug (void)
4991 {
4992 memcpy (&debug_target, &current_target, sizeof debug_target);
4993
4994 current_target.to_open = debug_to_open;
4995 current_target.to_post_attach = debug_to_post_attach;
4996 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4997 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4998 current_target.to_files_info = debug_to_files_info;
4999 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5000 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5001 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5002 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5003 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5004 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5005 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5006 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5007 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5008 current_target.to_watchpoint_addr_within_range
5009 = debug_to_watchpoint_addr_within_range;
5010 current_target.to_region_ok_for_hw_watchpoint
5011 = debug_to_region_ok_for_hw_watchpoint;
5012 current_target.to_can_accel_watchpoint_condition
5013 = debug_to_can_accel_watchpoint_condition;
5014 current_target.to_terminal_init = debug_to_terminal_init;
5015 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5016 current_target.to_terminal_ours_for_output
5017 = debug_to_terminal_ours_for_output;
5018 current_target.to_terminal_ours = debug_to_terminal_ours;
5019 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5020 current_target.to_terminal_info = debug_to_terminal_info;
5021 current_target.to_load = debug_to_load;
5022 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5023 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5024 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5025 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5026 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5027 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5028 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5029 current_target.to_has_exited = debug_to_has_exited;
5030 current_target.to_can_run = debug_to_can_run;
5031 current_target.to_stop = debug_to_stop;
5032 current_target.to_rcmd = debug_to_rcmd;
5033 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5034 current_target.to_thread_architecture = debug_to_thread_architecture;
5035 }
5036 \f
5037
5038 static char targ_desc[] =
5039 "Names of targets and files being debugged.\nShows the entire \
5040 stack of targets currently in use (including the exec-file,\n\
5041 core-file, and process, if any), as well as the symbol file name.";
5042
5043 static void
5044 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
5045 {
5046 error (_("\"monitor\" command not supported by this target."));
5047 }
5048
5049 static void
5050 do_monitor_command (char *cmd,
5051 int from_tty)
5052 {
5053 target_rcmd (cmd, gdb_stdtarg);
5054 }
5055
5056 /* Print the name of each layers of our target stack. */
5057
5058 static void
5059 maintenance_print_target_stack (char *cmd, int from_tty)
5060 {
5061 struct target_ops *t;
5062
5063 printf_filtered (_("The current target stack is:\n"));
5064
5065 for (t = target_stack; t != NULL; t = t->beneath)
5066 {
5067 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5068 }
5069 }
5070
5071 /* Controls if async mode is permitted. */
5072 int target_async_permitted = 0;
5073
5074 /* The set command writes to this variable. If the inferior is
5075 executing, target_async_permitted is *not* updated. */
5076 static int target_async_permitted_1 = 0;
5077
5078 static void
5079 set_target_async_command (char *args, int from_tty,
5080 struct cmd_list_element *c)
5081 {
5082 if (have_live_inferiors ())
5083 {
5084 target_async_permitted_1 = target_async_permitted;
5085 error (_("Cannot change this setting while the inferior is running."));
5086 }
5087
5088 target_async_permitted = target_async_permitted_1;
5089 }
5090
5091 static void
5092 show_target_async_command (struct ui_file *file, int from_tty,
5093 struct cmd_list_element *c,
5094 const char *value)
5095 {
5096 fprintf_filtered (file,
5097 _("Controlling the inferior in "
5098 "asynchronous mode is %s.\n"), value);
5099 }
5100
5101 /* Temporary copies of permission settings. */
5102
5103 static int may_write_registers_1 = 1;
5104 static int may_write_memory_1 = 1;
5105 static int may_insert_breakpoints_1 = 1;
5106 static int may_insert_tracepoints_1 = 1;
5107 static int may_insert_fast_tracepoints_1 = 1;
5108 static int may_stop_1 = 1;
5109
5110 /* Make the user-set values match the real values again. */
5111
5112 void
5113 update_target_permissions (void)
5114 {
5115 may_write_registers_1 = may_write_registers;
5116 may_write_memory_1 = may_write_memory;
5117 may_insert_breakpoints_1 = may_insert_breakpoints;
5118 may_insert_tracepoints_1 = may_insert_tracepoints;
5119 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5120 may_stop_1 = may_stop;
5121 }
5122
5123 /* The one function handles (most of) the permission flags in the same
5124 way. */
5125
5126 static void
5127 set_target_permissions (char *args, int from_tty,
5128 struct cmd_list_element *c)
5129 {
5130 if (target_has_execution)
5131 {
5132 update_target_permissions ();
5133 error (_("Cannot change this setting while the inferior is running."));
5134 }
5135
5136 /* Make the real values match the user-changed values. */
5137 may_write_registers = may_write_registers_1;
5138 may_insert_breakpoints = may_insert_breakpoints_1;
5139 may_insert_tracepoints = may_insert_tracepoints_1;
5140 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5141 may_stop = may_stop_1;
5142 update_observer_mode ();
5143 }
5144
5145 /* Set memory write permission independently of observer mode. */
5146
5147 static void
5148 set_write_memory_permission (char *args, int from_tty,
5149 struct cmd_list_element *c)
5150 {
5151 /* Make the real values match the user-changed values. */
5152 may_write_memory = may_write_memory_1;
5153 update_observer_mode ();
5154 }
5155
5156
5157 void
5158 initialize_targets (void)
5159 {
5160 init_dummy_target ();
5161 push_target (&dummy_target);
5162
5163 add_info ("target", target_info, targ_desc);
5164 add_info ("files", target_info, targ_desc);
5165
5166 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5167 Set target debugging."), _("\
5168 Show target debugging."), _("\
5169 When non-zero, target debugging is enabled. Higher numbers are more\n\
5170 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5171 command."),
5172 NULL,
5173 show_targetdebug,
5174 &setdebuglist, &showdebuglist);
5175
5176 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5177 &trust_readonly, _("\
5178 Set mode for reading from readonly sections."), _("\
5179 Show mode for reading from readonly sections."), _("\
5180 When this mode is on, memory reads from readonly sections (such as .text)\n\
5181 will be read from the object file instead of from the target. This will\n\
5182 result in significant performance improvement for remote targets."),
5183 NULL,
5184 show_trust_readonly,
5185 &setlist, &showlist);
5186
5187 add_com ("monitor", class_obscure, do_monitor_command,
5188 _("Send a command to the remote monitor (remote targets only)."));
5189
5190 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5191 _("Print the name of each layer of the internal target stack."),
5192 &maintenanceprintlist);
5193
5194 add_setshow_boolean_cmd ("target-async", no_class,
5195 &target_async_permitted_1, _("\
5196 Set whether gdb controls the inferior in asynchronous mode."), _("\
5197 Show whether gdb controls the inferior in asynchronous mode."), _("\
5198 Tells gdb whether to control the inferior in asynchronous mode."),
5199 set_target_async_command,
5200 show_target_async_command,
5201 &setlist,
5202 &showlist);
5203
5204 add_setshow_boolean_cmd ("may-write-registers", class_support,
5205 &may_write_registers_1, _("\
5206 Set permission to write into registers."), _("\
5207 Show permission to write into registers."), _("\
5208 When this permission is on, GDB may write into the target's registers.\n\
5209 Otherwise, any sort of write attempt will result in an error."),
5210 set_target_permissions, NULL,
5211 &setlist, &showlist);
5212
5213 add_setshow_boolean_cmd ("may-write-memory", class_support,
5214 &may_write_memory_1, _("\
5215 Set permission to write into target memory."), _("\
5216 Show permission to write into target memory."), _("\
5217 When this permission is on, GDB may write into the target's memory.\n\
5218 Otherwise, any sort of write attempt will result in an error."),
5219 set_write_memory_permission, NULL,
5220 &setlist, &showlist);
5221
5222 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5223 &may_insert_breakpoints_1, _("\
5224 Set permission to insert breakpoints in the target."), _("\
5225 Show permission to insert breakpoints in the target."), _("\
5226 When this permission is on, GDB may insert breakpoints in the program.\n\
5227 Otherwise, any sort of insertion attempt will result in an error."),
5228 set_target_permissions, NULL,
5229 &setlist, &showlist);
5230
5231 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5232 &may_insert_tracepoints_1, _("\
5233 Set permission to insert tracepoints in the target."), _("\
5234 Show permission to insert tracepoints in the target."), _("\
5235 When this permission is on, GDB may insert tracepoints in the program.\n\
5236 Otherwise, any sort of insertion attempt will result in an error."),
5237 set_target_permissions, NULL,
5238 &setlist, &showlist);
5239
5240 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5241 &may_insert_fast_tracepoints_1, _("\
5242 Set permission to insert fast tracepoints in the target."), _("\
5243 Show permission to insert fast tracepoints in the target."), _("\
5244 When this permission is on, GDB may insert fast tracepoints.\n\
5245 Otherwise, any sort of insertion attempt will result in an error."),
5246 set_target_permissions, NULL,
5247 &setlist, &showlist);
5248
5249 add_setshow_boolean_cmd ("may-interrupt", class_support,
5250 &may_stop_1, _("\
5251 Set permission to interrupt or signal the target."), _("\
5252 Show permission to interrupt or signal the target."), _("\
5253 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5254 Otherwise, any attempt to interrupt or stop will be ignored."),
5255 set_target_permissions, NULL,
5256 &setlist, &showlist);
5257 }
This page took 0.140363 seconds and 5 git commands to generate.