Add target_ops argument to to_region_ok_for_hw_watchpoint
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void tcomplain (void) ATTRIBUTE_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
132 struct expression *);
133
134 static void debug_to_terminal_init (void);
135
136 static void debug_to_terminal_inferior (void);
137
138 static void debug_to_terminal_ours_for_output (void);
139
140 static void debug_to_terminal_save_ours (void);
141
142 static void debug_to_terminal_ours (void);
143
144 static void debug_to_load (char *, int);
145
146 static int debug_to_can_run (void);
147
148 static void debug_to_stop (ptid_t);
149
150 /* Pointer to array of target architecture structures; the size of the
151 array; the current index into the array; the allocated size of the
152 array. */
153 struct target_ops **target_structs;
154 unsigned target_struct_size;
155 unsigned target_struct_allocsize;
156 #define DEFAULT_ALLOCSIZE 10
157
158 /* The initial current target, so that there is always a semi-valid
159 current target. */
160
161 static struct target_ops dummy_target;
162
163 /* Top of target stack. */
164
165 static struct target_ops *target_stack;
166
167 /* The target structure we are currently using to talk to a process
168 or file or whatever "inferior" we have. */
169
170 struct target_ops current_target;
171
172 /* Command list for target. */
173
174 static struct cmd_list_element *targetlist = NULL;
175
176 /* Nonzero if we should trust readonly sections from the
177 executable when reading memory. */
178
179 static int trust_readonly = 0;
180
181 /* Nonzero if we should show true memory content including
182 memory breakpoint inserted by gdb. */
183
184 static int show_memory_breakpoints = 0;
185
186 /* These globals control whether GDB attempts to perform these
187 operations; they are useful for targets that need to prevent
188 inadvertant disruption, such as in non-stop mode. */
189
190 int may_write_registers = 1;
191
192 int may_write_memory = 1;
193
194 int may_insert_breakpoints = 1;
195
196 int may_insert_tracepoints = 1;
197
198 int may_insert_fast_tracepoints = 1;
199
200 int may_stop = 1;
201
202 /* Non-zero if we want to see trace of target level stuff. */
203
204 static unsigned int targetdebug = 0;
205 static void
206 show_targetdebug (struct ui_file *file, int from_tty,
207 struct cmd_list_element *c, const char *value)
208 {
209 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
210 }
211
212 static void setup_target_debug (void);
213
214 /* The user just typed 'target' without the name of a target. */
215
216 static void
217 target_command (char *arg, int from_tty)
218 {
219 fputs_filtered ("Argument required (target name). Try `help target'\n",
220 gdb_stdout);
221 }
222
223 /* Default target_has_* methods for process_stratum targets. */
224
225 int
226 default_child_has_all_memory (struct target_ops *ops)
227 {
228 /* If no inferior selected, then we can't read memory here. */
229 if (ptid_equal (inferior_ptid, null_ptid))
230 return 0;
231
232 return 1;
233 }
234
235 int
236 default_child_has_memory (struct target_ops *ops)
237 {
238 /* If no inferior selected, then we can't read memory here. */
239 if (ptid_equal (inferior_ptid, null_ptid))
240 return 0;
241
242 return 1;
243 }
244
245 int
246 default_child_has_stack (struct target_ops *ops)
247 {
248 /* If no inferior selected, there's no stack. */
249 if (ptid_equal (inferior_ptid, null_ptid))
250 return 0;
251
252 return 1;
253 }
254
255 int
256 default_child_has_registers (struct target_ops *ops)
257 {
258 /* Can't read registers from no inferior. */
259 if (ptid_equal (inferior_ptid, null_ptid))
260 return 0;
261
262 return 1;
263 }
264
265 int
266 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
267 {
268 /* If there's no thread selected, then we can't make it run through
269 hoops. */
270 if (ptid_equal (the_ptid, null_ptid))
271 return 0;
272
273 return 1;
274 }
275
276
277 int
278 target_has_all_memory_1 (void)
279 {
280 struct target_ops *t;
281
282 for (t = current_target.beneath; t != NULL; t = t->beneath)
283 if (t->to_has_all_memory (t))
284 return 1;
285
286 return 0;
287 }
288
289 int
290 target_has_memory_1 (void)
291 {
292 struct target_ops *t;
293
294 for (t = current_target.beneath; t != NULL; t = t->beneath)
295 if (t->to_has_memory (t))
296 return 1;
297
298 return 0;
299 }
300
301 int
302 target_has_stack_1 (void)
303 {
304 struct target_ops *t;
305
306 for (t = current_target.beneath; t != NULL; t = t->beneath)
307 if (t->to_has_stack (t))
308 return 1;
309
310 return 0;
311 }
312
313 int
314 target_has_registers_1 (void)
315 {
316 struct target_ops *t;
317
318 for (t = current_target.beneath; t != NULL; t = t->beneath)
319 if (t->to_has_registers (t))
320 return 1;
321
322 return 0;
323 }
324
325 int
326 target_has_execution_1 (ptid_t the_ptid)
327 {
328 struct target_ops *t;
329
330 for (t = current_target.beneath; t != NULL; t = t->beneath)
331 if (t->to_has_execution (t, the_ptid))
332 return 1;
333
334 return 0;
335 }
336
337 int
338 target_has_execution_current (void)
339 {
340 return target_has_execution_1 (inferior_ptid);
341 }
342
343 /* Complete initialization of T. This ensures that various fields in
344 T are set, if needed by the target implementation. */
345
346 void
347 complete_target_initialization (struct target_ops *t)
348 {
349 /* Provide default values for all "must have" methods. */
350 if (t->to_xfer_partial == NULL)
351 t->to_xfer_partial = default_xfer_partial;
352
353 if (t->to_has_all_memory == NULL)
354 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
355
356 if (t->to_has_memory == NULL)
357 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_stack == NULL)
360 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
361
362 if (t->to_has_registers == NULL)
363 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
364
365 if (t->to_has_execution == NULL)
366 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
367
368 install_delegators (t);
369 }
370
371 /* Add possible target architecture T to the list and add a new
372 command 'target T->to_shortname'. Set COMPLETER as the command's
373 completer if not NULL. */
374
375 void
376 add_target_with_completer (struct target_ops *t,
377 completer_ftype *completer)
378 {
379 struct cmd_list_element *c;
380
381 complete_target_initialization (t);
382
383 if (!target_structs)
384 {
385 target_struct_allocsize = DEFAULT_ALLOCSIZE;
386 target_structs = (struct target_ops **) xmalloc
387 (target_struct_allocsize * sizeof (*target_structs));
388 }
389 if (target_struct_size >= target_struct_allocsize)
390 {
391 target_struct_allocsize *= 2;
392 target_structs = (struct target_ops **)
393 xrealloc ((char *) target_structs,
394 target_struct_allocsize * sizeof (*target_structs));
395 }
396 target_structs[target_struct_size++] = t;
397
398 if (targetlist == NULL)
399 add_prefix_cmd ("target", class_run, target_command, _("\
400 Connect to a target machine or process.\n\
401 The first argument is the type or protocol of the target machine.\n\
402 Remaining arguments are interpreted by the target protocol. For more\n\
403 information on the arguments for a particular protocol, type\n\
404 `help target ' followed by the protocol name."),
405 &targetlist, "target ", 0, &cmdlist);
406 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
407 &targetlist);
408 if (completer != NULL)
409 set_cmd_completer (c, completer);
410 }
411
412 /* Add a possible target architecture to the list. */
413
414 void
415 add_target (struct target_ops *t)
416 {
417 add_target_with_completer (t, NULL);
418 }
419
420 /* See target.h. */
421
422 void
423 add_deprecated_target_alias (struct target_ops *t, char *alias)
424 {
425 struct cmd_list_element *c;
426 char *alt;
427
428 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
429 see PR cli/15104. */
430 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
431 alt = xstrprintf ("target %s", t->to_shortname);
432 deprecate_cmd (c, alt);
433 }
434
435 /* Stub functions */
436
437 void
438 target_ignore (void)
439 {
440 }
441
442 void
443 target_kill (void)
444 {
445 struct target_ops *t;
446
447 for (t = current_target.beneath; t != NULL; t = t->beneath)
448 if (t->to_kill != NULL)
449 {
450 if (targetdebug)
451 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
452
453 t->to_kill (t);
454 return;
455 }
456
457 noprocess ();
458 }
459
460 void
461 target_load (char *arg, int from_tty)
462 {
463 target_dcache_invalidate ();
464 (*current_target.to_load) (arg, from_tty);
465 }
466
467 void
468 target_create_inferior (char *exec_file, char *args,
469 char **env, int from_tty)
470 {
471 struct target_ops *t;
472
473 for (t = current_target.beneath; t != NULL; t = t->beneath)
474 {
475 if (t->to_create_inferior != NULL)
476 {
477 t->to_create_inferior (t, exec_file, args, env, from_tty);
478 if (targetdebug)
479 fprintf_unfiltered (gdb_stdlog,
480 "target_create_inferior (%s, %s, xxx, %d)\n",
481 exec_file, args, from_tty);
482 return;
483 }
484 }
485
486 internal_error (__FILE__, __LINE__,
487 _("could not find a target to create inferior"));
488 }
489
490 void
491 target_terminal_inferior (void)
492 {
493 /* A background resume (``run&'') should leave GDB in control of the
494 terminal. Use target_can_async_p, not target_is_async_p, since at
495 this point the target is not async yet. However, if sync_execution
496 is not set, we know it will become async prior to resume. */
497 if (target_can_async_p () && !sync_execution)
498 return;
499
500 /* If GDB is resuming the inferior in the foreground, install
501 inferior's terminal modes. */
502 (*current_target.to_terminal_inferior) ();
503 }
504
505 static int
506 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
507 struct target_ops *t)
508 {
509 errno = EIO; /* Can't read/write this location. */
510 return 0; /* No bytes handled. */
511 }
512
513 static void
514 tcomplain (void)
515 {
516 error (_("You can't do that when your target is `%s'"),
517 current_target.to_shortname);
518 }
519
520 void
521 noprocess (void)
522 {
523 error (_("You can't do that without a process to debug."));
524 }
525
526 static void
527 default_terminal_info (const char *args, int from_tty)
528 {
529 printf_unfiltered (_("No saved terminal information.\n"));
530 }
531
532 /* A default implementation for the to_get_ada_task_ptid target method.
533
534 This function builds the PTID by using both LWP and TID as part of
535 the PTID lwp and tid elements. The pid used is the pid of the
536 inferior_ptid. */
537
538 static ptid_t
539 default_get_ada_task_ptid (long lwp, long tid)
540 {
541 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
542 }
543
544 static enum exec_direction_kind
545 default_execution_direction (void)
546 {
547 if (!target_can_execute_reverse)
548 return EXEC_FORWARD;
549 else if (!target_can_async_p ())
550 return EXEC_FORWARD;
551 else
552 gdb_assert_not_reached ("\
553 to_execution_direction must be implemented for reverse async");
554 }
555
556 /* Go through the target stack from top to bottom, copying over zero
557 entries in current_target, then filling in still empty entries. In
558 effect, we are doing class inheritance through the pushed target
559 vectors.
560
561 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
562 is currently implemented, is that it discards any knowledge of
563 which target an inherited method originally belonged to.
564 Consequently, new new target methods should instead explicitly and
565 locally search the target stack for the target that can handle the
566 request. */
567
568 static void
569 update_current_target (void)
570 {
571 struct target_ops *t;
572
573 /* First, reset current's contents. */
574 memset (&current_target, 0, sizeof (current_target));
575
576 /* Install the delegators. */
577 install_delegators (&current_target);
578
579 #define INHERIT(FIELD, TARGET) \
580 if (!current_target.FIELD) \
581 current_target.FIELD = (TARGET)->FIELD
582
583 for (t = target_stack; t; t = t->beneath)
584 {
585 INHERIT (to_shortname, t);
586 INHERIT (to_longname, t);
587 INHERIT (to_doc, t);
588 /* Do not inherit to_open. */
589 /* Do not inherit to_close. */
590 /* Do not inherit to_attach. */
591 INHERIT (to_post_attach, t);
592 INHERIT (to_attach_no_wait, t);
593 /* Do not inherit to_detach. */
594 /* Do not inherit to_disconnect. */
595 /* Do not inherit to_resume. */
596 /* Do not inherit to_wait. */
597 /* Do not inherit to_fetch_registers. */
598 /* Do not inherit to_store_registers. */
599 INHERIT (to_prepare_to_store, t);
600 INHERIT (deprecated_xfer_memory, t);
601 INHERIT (to_files_info, t);
602 /* Do not inherit to_insert_breakpoint. */
603 /* Do not inherit to_remove_breakpoint. */
604 INHERIT (to_can_use_hw_breakpoint, t);
605 INHERIT (to_insert_hw_breakpoint, t);
606 INHERIT (to_remove_hw_breakpoint, t);
607 /* Do not inherit to_ranged_break_num_registers. */
608 INHERIT (to_insert_watchpoint, t);
609 INHERIT (to_remove_watchpoint, t);
610 /* Do not inherit to_insert_mask_watchpoint. */
611 /* Do not inherit to_remove_mask_watchpoint. */
612 /* Do not inherit to_stopped_data_address. */
613 INHERIT (to_have_steppable_watchpoint, t);
614 INHERIT (to_have_continuable_watchpoint, t);
615 /* Do not inherit to_stopped_by_watchpoint. */
616 INHERIT (to_watchpoint_addr_within_range, t);
617 INHERIT (to_region_ok_for_hw_watchpoint, t);
618 INHERIT (to_can_accel_watchpoint_condition, t);
619 /* Do not inherit to_masked_watch_num_registers. */
620 INHERIT (to_terminal_init, t);
621 INHERIT (to_terminal_inferior, t);
622 INHERIT (to_terminal_ours_for_output, t);
623 INHERIT (to_terminal_ours, t);
624 INHERIT (to_terminal_save_ours, t);
625 INHERIT (to_terminal_info, t);
626 /* Do not inherit to_kill. */
627 INHERIT (to_load, t);
628 /* Do no inherit to_create_inferior. */
629 INHERIT (to_post_startup_inferior, t);
630 INHERIT (to_insert_fork_catchpoint, t);
631 INHERIT (to_remove_fork_catchpoint, t);
632 INHERIT (to_insert_vfork_catchpoint, t);
633 INHERIT (to_remove_vfork_catchpoint, t);
634 /* Do not inherit to_follow_fork. */
635 INHERIT (to_insert_exec_catchpoint, t);
636 INHERIT (to_remove_exec_catchpoint, t);
637 INHERIT (to_set_syscall_catchpoint, t);
638 INHERIT (to_has_exited, t);
639 /* Do not inherit to_mourn_inferior. */
640 INHERIT (to_can_run, t);
641 /* Do not inherit to_pass_signals. */
642 /* Do not inherit to_program_signals. */
643 /* Do not inherit to_thread_alive. */
644 /* Do not inherit to_find_new_threads. */
645 /* Do not inherit to_pid_to_str. */
646 INHERIT (to_extra_thread_info, t);
647 INHERIT (to_thread_name, t);
648 INHERIT (to_stop, t);
649 /* Do not inherit to_xfer_partial. */
650 INHERIT (to_rcmd, t);
651 INHERIT (to_pid_to_exec_file, t);
652 INHERIT (to_log_command, t);
653 INHERIT (to_stratum, t);
654 /* Do not inherit to_has_all_memory. */
655 /* Do not inherit to_has_memory. */
656 /* Do not inherit to_has_stack. */
657 /* Do not inherit to_has_registers. */
658 /* Do not inherit to_has_execution. */
659 INHERIT (to_has_thread_control, t);
660 /* Do not inherit to_can_async_p. */
661 /* Do not inherit to_is_async_p. */
662 /* Do not inherit to_async. */
663 INHERIT (to_find_memory_regions, t);
664 INHERIT (to_make_corefile_notes, t);
665 INHERIT (to_get_bookmark, t);
666 INHERIT (to_goto_bookmark, t);
667 /* Do not inherit to_get_thread_local_address. */
668 INHERIT (to_can_execute_reverse, t);
669 INHERIT (to_execution_direction, t);
670 INHERIT (to_thread_architecture, t);
671 /* Do not inherit to_read_description. */
672 INHERIT (to_get_ada_task_ptid, t);
673 /* Do not inherit to_search_memory. */
674 INHERIT (to_supports_multi_process, t);
675 INHERIT (to_supports_enable_disable_tracepoint, t);
676 INHERIT (to_supports_string_tracing, t);
677 INHERIT (to_trace_init, t);
678 INHERIT (to_download_tracepoint, t);
679 INHERIT (to_can_download_tracepoint, t);
680 INHERIT (to_download_trace_state_variable, t);
681 INHERIT (to_enable_tracepoint, t);
682 INHERIT (to_disable_tracepoint, t);
683 INHERIT (to_trace_set_readonly_regions, t);
684 INHERIT (to_trace_start, t);
685 INHERIT (to_get_trace_status, t);
686 INHERIT (to_get_tracepoint_status, t);
687 INHERIT (to_trace_stop, t);
688 INHERIT (to_trace_find, t);
689 INHERIT (to_get_trace_state_variable_value, t);
690 INHERIT (to_save_trace_data, t);
691 INHERIT (to_upload_tracepoints, t);
692 INHERIT (to_upload_trace_state_variables, t);
693 INHERIT (to_get_raw_trace_data, t);
694 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
695 INHERIT (to_set_disconnected_tracing, t);
696 INHERIT (to_set_circular_trace_buffer, t);
697 INHERIT (to_set_trace_buffer_size, t);
698 INHERIT (to_set_trace_notes, t);
699 INHERIT (to_get_tib_address, t);
700 INHERIT (to_set_permissions, t);
701 INHERIT (to_static_tracepoint_marker_at, t);
702 INHERIT (to_static_tracepoint_markers_by_strid, t);
703 INHERIT (to_traceframe_info, t);
704 INHERIT (to_use_agent, t);
705 INHERIT (to_can_use_agent, t);
706 INHERIT (to_augmented_libraries_svr4_read, t);
707 INHERIT (to_magic, t);
708 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
709 INHERIT (to_can_run_breakpoint_commands, t);
710 /* Do not inherit to_memory_map. */
711 /* Do not inherit to_flash_erase. */
712 /* Do not inherit to_flash_done. */
713 }
714 #undef INHERIT
715
716 /* Clean up a target struct so it no longer has any zero pointers in
717 it. Some entries are defaulted to a method that print an error,
718 others are hard-wired to a standard recursive default. */
719
720 #define de_fault(field, value) \
721 if (!current_target.field) \
722 current_target.field = value
723
724 de_fault (to_open,
725 (void (*) (char *, int))
726 tcomplain);
727 de_fault (to_close,
728 (void (*) (struct target_ops *))
729 target_ignore);
730 de_fault (to_post_attach,
731 (void (*) (struct target_ops *, int))
732 target_ignore);
733 de_fault (to_prepare_to_store,
734 (void (*) (struct target_ops *, struct regcache *))
735 noprocess);
736 de_fault (deprecated_xfer_memory,
737 (int (*) (CORE_ADDR, gdb_byte *, int, int,
738 struct mem_attrib *, struct target_ops *))
739 nomemory);
740 de_fault (to_files_info,
741 (void (*) (struct target_ops *))
742 target_ignore);
743 de_fault (to_can_use_hw_breakpoint,
744 (int (*) (struct target_ops *, int, int, int))
745 return_zero);
746 de_fault (to_insert_hw_breakpoint,
747 (int (*) (struct target_ops *, struct gdbarch *,
748 struct bp_target_info *))
749 return_minus_one);
750 de_fault (to_remove_hw_breakpoint,
751 (int (*) (struct target_ops *, struct gdbarch *,
752 struct bp_target_info *))
753 return_minus_one);
754 de_fault (to_insert_watchpoint,
755 (int (*) (struct target_ops *, CORE_ADDR, int, int,
756 struct expression *))
757 return_minus_one);
758 de_fault (to_remove_watchpoint,
759 (int (*) (struct target_ops *, CORE_ADDR, int, int,
760 struct expression *))
761 return_minus_one);
762 de_fault (to_watchpoint_addr_within_range,
763 default_watchpoint_addr_within_range);
764 de_fault (to_region_ok_for_hw_watchpoint,
765 default_region_ok_for_hw_watchpoint);
766 de_fault (to_can_accel_watchpoint_condition,
767 (int (*) (CORE_ADDR, int, int, struct expression *))
768 return_zero);
769 de_fault (to_terminal_init,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_inferior,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_ours_for_output,
776 (void (*) (void))
777 target_ignore);
778 de_fault (to_terminal_ours,
779 (void (*) (void))
780 target_ignore);
781 de_fault (to_terminal_save_ours,
782 (void (*) (void))
783 target_ignore);
784 de_fault (to_terminal_info,
785 default_terminal_info);
786 de_fault (to_load,
787 (void (*) (char *, int))
788 tcomplain);
789 de_fault (to_post_startup_inferior,
790 (void (*) (ptid_t))
791 target_ignore);
792 de_fault (to_insert_fork_catchpoint,
793 (int (*) (int))
794 return_one);
795 de_fault (to_remove_fork_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_insert_vfork_catchpoint,
799 (int (*) (int))
800 return_one);
801 de_fault (to_remove_vfork_catchpoint,
802 (int (*) (int))
803 return_one);
804 de_fault (to_insert_exec_catchpoint,
805 (int (*) (int))
806 return_one);
807 de_fault (to_remove_exec_catchpoint,
808 (int (*) (int))
809 return_one);
810 de_fault (to_set_syscall_catchpoint,
811 (int (*) (int, int, int, int, int *))
812 return_one);
813 de_fault (to_has_exited,
814 (int (*) (int, int, int *))
815 return_zero);
816 de_fault (to_can_run,
817 return_zero);
818 de_fault (to_extra_thread_info,
819 (char *(*) (struct thread_info *))
820 return_null);
821 de_fault (to_thread_name,
822 (char *(*) (struct thread_info *))
823 return_null);
824 de_fault (to_stop,
825 (void (*) (ptid_t))
826 target_ignore);
827 de_fault (to_rcmd,
828 (void (*) (char *, struct ui_file *))
829 tcomplain);
830 de_fault (to_pid_to_exec_file,
831 (char *(*) (int))
832 return_null);
833 de_fault (to_thread_architecture,
834 default_thread_architecture);
835 current_target.to_read_description = NULL;
836 de_fault (to_get_ada_task_ptid,
837 (ptid_t (*) (long, long))
838 default_get_ada_task_ptid);
839 de_fault (to_supports_multi_process,
840 (int (*) (void))
841 return_zero);
842 de_fault (to_supports_enable_disable_tracepoint,
843 (int (*) (void))
844 return_zero);
845 de_fault (to_supports_string_tracing,
846 (int (*) (void))
847 return_zero);
848 de_fault (to_trace_init,
849 (void (*) (void))
850 tcomplain);
851 de_fault (to_download_tracepoint,
852 (void (*) (struct bp_location *))
853 tcomplain);
854 de_fault (to_can_download_tracepoint,
855 (int (*) (void))
856 return_zero);
857 de_fault (to_download_trace_state_variable,
858 (void (*) (struct trace_state_variable *))
859 tcomplain);
860 de_fault (to_enable_tracepoint,
861 (void (*) (struct bp_location *))
862 tcomplain);
863 de_fault (to_disable_tracepoint,
864 (void (*) (struct bp_location *))
865 tcomplain);
866 de_fault (to_trace_set_readonly_regions,
867 (void (*) (void))
868 tcomplain);
869 de_fault (to_trace_start,
870 (void (*) (void))
871 tcomplain);
872 de_fault (to_get_trace_status,
873 (int (*) (struct trace_status *))
874 return_minus_one);
875 de_fault (to_get_tracepoint_status,
876 (void (*) (struct breakpoint *, struct uploaded_tp *))
877 tcomplain);
878 de_fault (to_trace_stop,
879 (void (*) (void))
880 tcomplain);
881 de_fault (to_trace_find,
882 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
883 return_minus_one);
884 de_fault (to_get_trace_state_variable_value,
885 (int (*) (int, LONGEST *))
886 return_zero);
887 de_fault (to_save_trace_data,
888 (int (*) (const char *))
889 tcomplain);
890 de_fault (to_upload_tracepoints,
891 (int (*) (struct uploaded_tp **))
892 return_zero);
893 de_fault (to_upload_trace_state_variables,
894 (int (*) (struct uploaded_tsv **))
895 return_zero);
896 de_fault (to_get_raw_trace_data,
897 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
898 tcomplain);
899 de_fault (to_get_min_fast_tracepoint_insn_len,
900 (int (*) (void))
901 return_minus_one);
902 de_fault (to_set_disconnected_tracing,
903 (void (*) (int))
904 target_ignore);
905 de_fault (to_set_circular_trace_buffer,
906 (void (*) (int))
907 target_ignore);
908 de_fault (to_set_trace_buffer_size,
909 (void (*) (LONGEST))
910 target_ignore);
911 de_fault (to_set_trace_notes,
912 (int (*) (const char *, const char *, const char *))
913 return_zero);
914 de_fault (to_get_tib_address,
915 (int (*) (ptid_t, CORE_ADDR *))
916 tcomplain);
917 de_fault (to_set_permissions,
918 (void (*) (void))
919 target_ignore);
920 de_fault (to_static_tracepoint_marker_at,
921 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
922 return_zero);
923 de_fault (to_static_tracepoint_markers_by_strid,
924 (VEC(static_tracepoint_marker_p) * (*) (const char *))
925 tcomplain);
926 de_fault (to_traceframe_info,
927 (struct traceframe_info * (*) (void))
928 return_null);
929 de_fault (to_supports_evaluation_of_breakpoint_conditions,
930 (int (*) (void))
931 return_zero);
932 de_fault (to_can_run_breakpoint_commands,
933 (int (*) (void))
934 return_zero);
935 de_fault (to_use_agent,
936 (int (*) (int))
937 tcomplain);
938 de_fault (to_can_use_agent,
939 (int (*) (void))
940 return_zero);
941 de_fault (to_augmented_libraries_svr4_read,
942 (int (*) (void))
943 return_zero);
944 de_fault (to_execution_direction, default_execution_direction);
945
946 #undef de_fault
947
948 /* Finally, position the target-stack beneath the squashed
949 "current_target". That way code looking for a non-inherited
950 target method can quickly and simply find it. */
951 current_target.beneath = target_stack;
952
953 if (targetdebug)
954 setup_target_debug ();
955 }
956
957 /* Push a new target type into the stack of the existing target accessors,
958 possibly superseding some of the existing accessors.
959
960 Rather than allow an empty stack, we always have the dummy target at
961 the bottom stratum, so we can call the function vectors without
962 checking them. */
963
964 void
965 push_target (struct target_ops *t)
966 {
967 struct target_ops **cur;
968
969 /* Check magic number. If wrong, it probably means someone changed
970 the struct definition, but not all the places that initialize one. */
971 if (t->to_magic != OPS_MAGIC)
972 {
973 fprintf_unfiltered (gdb_stderr,
974 "Magic number of %s target struct wrong\n",
975 t->to_shortname);
976 internal_error (__FILE__, __LINE__,
977 _("failed internal consistency check"));
978 }
979
980 /* Find the proper stratum to install this target in. */
981 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
982 {
983 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
984 break;
985 }
986
987 /* If there's already targets at this stratum, remove them. */
988 /* FIXME: cagney/2003-10-15: I think this should be popping all
989 targets to CUR, and not just those at this stratum level. */
990 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
991 {
992 /* There's already something at this stratum level. Close it,
993 and un-hook it from the stack. */
994 struct target_ops *tmp = (*cur);
995
996 (*cur) = (*cur)->beneath;
997 tmp->beneath = NULL;
998 target_close (tmp);
999 }
1000
1001 /* We have removed all targets in our stratum, now add the new one. */
1002 t->beneath = (*cur);
1003 (*cur) = t;
1004
1005 update_current_target ();
1006 }
1007
1008 /* Remove a target_ops vector from the stack, wherever it may be.
1009 Return how many times it was removed (0 or 1). */
1010
1011 int
1012 unpush_target (struct target_ops *t)
1013 {
1014 struct target_ops **cur;
1015 struct target_ops *tmp;
1016
1017 if (t->to_stratum == dummy_stratum)
1018 internal_error (__FILE__, __LINE__,
1019 _("Attempt to unpush the dummy target"));
1020
1021 /* Look for the specified target. Note that we assume that a target
1022 can only occur once in the target stack. */
1023
1024 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1025 {
1026 if ((*cur) == t)
1027 break;
1028 }
1029
1030 /* If we don't find target_ops, quit. Only open targets should be
1031 closed. */
1032 if ((*cur) == NULL)
1033 return 0;
1034
1035 /* Unchain the target. */
1036 tmp = (*cur);
1037 (*cur) = (*cur)->beneath;
1038 tmp->beneath = NULL;
1039
1040 update_current_target ();
1041
1042 /* Finally close the target. Note we do this after unchaining, so
1043 any target method calls from within the target_close
1044 implementation don't end up in T anymore. */
1045 target_close (t);
1046
1047 return 1;
1048 }
1049
1050 void
1051 pop_all_targets_above (enum strata above_stratum)
1052 {
1053 while ((int) (current_target.to_stratum) > (int) above_stratum)
1054 {
1055 if (!unpush_target (target_stack))
1056 {
1057 fprintf_unfiltered (gdb_stderr,
1058 "pop_all_targets couldn't find target %s\n",
1059 target_stack->to_shortname);
1060 internal_error (__FILE__, __LINE__,
1061 _("failed internal consistency check"));
1062 break;
1063 }
1064 }
1065 }
1066
1067 void
1068 pop_all_targets (void)
1069 {
1070 pop_all_targets_above (dummy_stratum);
1071 }
1072
1073 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1074
1075 int
1076 target_is_pushed (struct target_ops *t)
1077 {
1078 struct target_ops **cur;
1079
1080 /* Check magic number. If wrong, it probably means someone changed
1081 the struct definition, but not all the places that initialize one. */
1082 if (t->to_magic != OPS_MAGIC)
1083 {
1084 fprintf_unfiltered (gdb_stderr,
1085 "Magic number of %s target struct wrong\n",
1086 t->to_shortname);
1087 internal_error (__FILE__, __LINE__,
1088 _("failed internal consistency check"));
1089 }
1090
1091 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1092 if (*cur == t)
1093 return 1;
1094
1095 return 0;
1096 }
1097
1098 /* Using the objfile specified in OBJFILE, find the address for the
1099 current thread's thread-local storage with offset OFFSET. */
1100 CORE_ADDR
1101 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1102 {
1103 volatile CORE_ADDR addr = 0;
1104 struct target_ops *target;
1105
1106 for (target = current_target.beneath;
1107 target != NULL;
1108 target = target->beneath)
1109 {
1110 if (target->to_get_thread_local_address != NULL)
1111 break;
1112 }
1113
1114 if (target != NULL
1115 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1116 {
1117 ptid_t ptid = inferior_ptid;
1118 volatile struct gdb_exception ex;
1119
1120 TRY_CATCH (ex, RETURN_MASK_ALL)
1121 {
1122 CORE_ADDR lm_addr;
1123
1124 /* Fetch the load module address for this objfile. */
1125 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1126 objfile);
1127 /* If it's 0, throw the appropriate exception. */
1128 if (lm_addr == 0)
1129 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1130 _("TLS load module not found"));
1131
1132 addr = target->to_get_thread_local_address (target, ptid,
1133 lm_addr, offset);
1134 }
1135 /* If an error occurred, print TLS related messages here. Otherwise,
1136 throw the error to some higher catcher. */
1137 if (ex.reason < 0)
1138 {
1139 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1140
1141 switch (ex.error)
1142 {
1143 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1144 error (_("Cannot find thread-local variables "
1145 "in this thread library."));
1146 break;
1147 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1148 if (objfile_is_library)
1149 error (_("Cannot find shared library `%s' in dynamic"
1150 " linker's load module list"), objfile_name (objfile));
1151 else
1152 error (_("Cannot find executable file `%s' in dynamic"
1153 " linker's load module list"), objfile_name (objfile));
1154 break;
1155 case TLS_NOT_ALLOCATED_YET_ERROR:
1156 if (objfile_is_library)
1157 error (_("The inferior has not yet allocated storage for"
1158 " thread-local variables in\n"
1159 "the shared library `%s'\n"
1160 "for %s"),
1161 objfile_name (objfile), target_pid_to_str (ptid));
1162 else
1163 error (_("The inferior has not yet allocated storage for"
1164 " thread-local variables in\n"
1165 "the executable `%s'\n"
1166 "for %s"),
1167 objfile_name (objfile), target_pid_to_str (ptid));
1168 break;
1169 case TLS_GENERIC_ERROR:
1170 if (objfile_is_library)
1171 error (_("Cannot find thread-local storage for %s, "
1172 "shared library %s:\n%s"),
1173 target_pid_to_str (ptid),
1174 objfile_name (objfile), ex.message);
1175 else
1176 error (_("Cannot find thread-local storage for %s, "
1177 "executable file %s:\n%s"),
1178 target_pid_to_str (ptid),
1179 objfile_name (objfile), ex.message);
1180 break;
1181 default:
1182 throw_exception (ex);
1183 break;
1184 }
1185 }
1186 }
1187 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1188 TLS is an ABI-specific thing. But we don't do that yet. */
1189 else
1190 error (_("Cannot find thread-local variables on this target"));
1191
1192 return addr;
1193 }
1194
1195 const char *
1196 target_xfer_status_to_string (enum target_xfer_status err)
1197 {
1198 #define CASE(X) case X: return #X
1199 switch (err)
1200 {
1201 CASE(TARGET_XFER_E_IO);
1202 CASE(TARGET_XFER_E_UNAVAILABLE);
1203 default:
1204 return "<unknown>";
1205 }
1206 #undef CASE
1207 };
1208
1209
1210 #undef MIN
1211 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1212
1213 /* target_read_string -- read a null terminated string, up to LEN bytes,
1214 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1215 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1216 is responsible for freeing it. Return the number of bytes successfully
1217 read. */
1218
1219 int
1220 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1221 {
1222 int tlen, offset, i;
1223 gdb_byte buf[4];
1224 int errcode = 0;
1225 char *buffer;
1226 int buffer_allocated;
1227 char *bufptr;
1228 unsigned int nbytes_read = 0;
1229
1230 gdb_assert (string);
1231
1232 /* Small for testing. */
1233 buffer_allocated = 4;
1234 buffer = xmalloc (buffer_allocated);
1235 bufptr = buffer;
1236
1237 while (len > 0)
1238 {
1239 tlen = MIN (len, 4 - (memaddr & 3));
1240 offset = memaddr & 3;
1241
1242 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1243 if (errcode != 0)
1244 {
1245 /* The transfer request might have crossed the boundary to an
1246 unallocated region of memory. Retry the transfer, requesting
1247 a single byte. */
1248 tlen = 1;
1249 offset = 0;
1250 errcode = target_read_memory (memaddr, buf, 1);
1251 if (errcode != 0)
1252 goto done;
1253 }
1254
1255 if (bufptr - buffer + tlen > buffer_allocated)
1256 {
1257 unsigned int bytes;
1258
1259 bytes = bufptr - buffer;
1260 buffer_allocated *= 2;
1261 buffer = xrealloc (buffer, buffer_allocated);
1262 bufptr = buffer + bytes;
1263 }
1264
1265 for (i = 0; i < tlen; i++)
1266 {
1267 *bufptr++ = buf[i + offset];
1268 if (buf[i + offset] == '\000')
1269 {
1270 nbytes_read += i + 1;
1271 goto done;
1272 }
1273 }
1274
1275 memaddr += tlen;
1276 len -= tlen;
1277 nbytes_read += tlen;
1278 }
1279 done:
1280 *string = buffer;
1281 if (errnop != NULL)
1282 *errnop = errcode;
1283 return nbytes_read;
1284 }
1285
1286 struct target_section_table *
1287 target_get_section_table (struct target_ops *target)
1288 {
1289 struct target_ops *t;
1290
1291 if (targetdebug)
1292 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1293
1294 for (t = target; t != NULL; t = t->beneath)
1295 if (t->to_get_section_table != NULL)
1296 return (*t->to_get_section_table) (t);
1297
1298 return NULL;
1299 }
1300
1301 /* Find a section containing ADDR. */
1302
1303 struct target_section *
1304 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1305 {
1306 struct target_section_table *table = target_get_section_table (target);
1307 struct target_section *secp;
1308
1309 if (table == NULL)
1310 return NULL;
1311
1312 for (secp = table->sections; secp < table->sections_end; secp++)
1313 {
1314 if (addr >= secp->addr && addr < secp->endaddr)
1315 return secp;
1316 }
1317 return NULL;
1318 }
1319
1320 /* Read memory from the live target, even if currently inspecting a
1321 traceframe. The return is the same as that of target_read. */
1322
1323 static enum target_xfer_status
1324 target_read_live_memory (enum target_object object,
1325 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1326 ULONGEST *xfered_len)
1327 {
1328 enum target_xfer_status ret;
1329 struct cleanup *cleanup;
1330
1331 /* Switch momentarily out of tfind mode so to access live memory.
1332 Note that this must not clear global state, such as the frame
1333 cache, which must still remain valid for the previous traceframe.
1334 We may be _building_ the frame cache at this point. */
1335 cleanup = make_cleanup_restore_traceframe_number ();
1336 set_traceframe_number (-1);
1337
1338 ret = target_xfer_partial (current_target.beneath, object, NULL,
1339 myaddr, NULL, memaddr, len, xfered_len);
1340
1341 do_cleanups (cleanup);
1342 return ret;
1343 }
1344
1345 /* Using the set of read-only target sections of OPS, read live
1346 read-only memory. Note that the actual reads start from the
1347 top-most target again.
1348
1349 For interface/parameters/return description see target.h,
1350 to_xfer_partial. */
1351
1352 static enum target_xfer_status
1353 memory_xfer_live_readonly_partial (struct target_ops *ops,
1354 enum target_object object,
1355 gdb_byte *readbuf, ULONGEST memaddr,
1356 ULONGEST len, ULONGEST *xfered_len)
1357 {
1358 struct target_section *secp;
1359 struct target_section_table *table;
1360
1361 secp = target_section_by_addr (ops, memaddr);
1362 if (secp != NULL
1363 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1364 secp->the_bfd_section)
1365 & SEC_READONLY))
1366 {
1367 struct target_section *p;
1368 ULONGEST memend = memaddr + len;
1369
1370 table = target_get_section_table (ops);
1371
1372 for (p = table->sections; p < table->sections_end; p++)
1373 {
1374 if (memaddr >= p->addr)
1375 {
1376 if (memend <= p->endaddr)
1377 {
1378 /* Entire transfer is within this section. */
1379 return target_read_live_memory (object, memaddr,
1380 readbuf, len, xfered_len);
1381 }
1382 else if (memaddr >= p->endaddr)
1383 {
1384 /* This section ends before the transfer starts. */
1385 continue;
1386 }
1387 else
1388 {
1389 /* This section overlaps the transfer. Just do half. */
1390 len = p->endaddr - memaddr;
1391 return target_read_live_memory (object, memaddr,
1392 readbuf, len, xfered_len);
1393 }
1394 }
1395 }
1396 }
1397
1398 return TARGET_XFER_EOF;
1399 }
1400
1401 /* Read memory from more than one valid target. A core file, for
1402 instance, could have some of memory but delegate other bits to
1403 the target below it. So, we must manually try all targets. */
1404
1405 static enum target_xfer_status
1406 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1407 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1408 ULONGEST *xfered_len)
1409 {
1410 enum target_xfer_status res;
1411
1412 do
1413 {
1414 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1415 readbuf, writebuf, memaddr, len,
1416 xfered_len);
1417 if (res == TARGET_XFER_OK)
1418 break;
1419
1420 /* Stop if the target reports that the memory is not available. */
1421 if (res == TARGET_XFER_E_UNAVAILABLE)
1422 break;
1423
1424 /* We want to continue past core files to executables, but not
1425 past a running target's memory. */
1426 if (ops->to_has_all_memory (ops))
1427 break;
1428
1429 ops = ops->beneath;
1430 }
1431 while (ops != NULL);
1432
1433 return res;
1434 }
1435
1436 /* Perform a partial memory transfer.
1437 For docs see target.h, to_xfer_partial. */
1438
1439 static enum target_xfer_status
1440 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1441 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1442 ULONGEST len, ULONGEST *xfered_len)
1443 {
1444 enum target_xfer_status res;
1445 int reg_len;
1446 struct mem_region *region;
1447 struct inferior *inf;
1448
1449 /* For accesses to unmapped overlay sections, read directly from
1450 files. Must do this first, as MEMADDR may need adjustment. */
1451 if (readbuf != NULL && overlay_debugging)
1452 {
1453 struct obj_section *section = find_pc_overlay (memaddr);
1454
1455 if (pc_in_unmapped_range (memaddr, section))
1456 {
1457 struct target_section_table *table
1458 = target_get_section_table (ops);
1459 const char *section_name = section->the_bfd_section->name;
1460
1461 memaddr = overlay_mapped_address (memaddr, section);
1462 return section_table_xfer_memory_partial (readbuf, writebuf,
1463 memaddr, len, xfered_len,
1464 table->sections,
1465 table->sections_end,
1466 section_name);
1467 }
1468 }
1469
1470 /* Try the executable files, if "trust-readonly-sections" is set. */
1471 if (readbuf != NULL && trust_readonly)
1472 {
1473 struct target_section *secp;
1474 struct target_section_table *table;
1475
1476 secp = target_section_by_addr (ops, memaddr);
1477 if (secp != NULL
1478 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1479 secp->the_bfd_section)
1480 & SEC_READONLY))
1481 {
1482 table = target_get_section_table (ops);
1483 return section_table_xfer_memory_partial (readbuf, writebuf,
1484 memaddr, len, xfered_len,
1485 table->sections,
1486 table->sections_end,
1487 NULL);
1488 }
1489 }
1490
1491 /* If reading unavailable memory in the context of traceframes, and
1492 this address falls within a read-only section, fallback to
1493 reading from live memory. */
1494 if (readbuf != NULL && get_traceframe_number () != -1)
1495 {
1496 VEC(mem_range_s) *available;
1497
1498 /* If we fail to get the set of available memory, then the
1499 target does not support querying traceframe info, and so we
1500 attempt reading from the traceframe anyway (assuming the
1501 target implements the old QTro packet then). */
1502 if (traceframe_available_memory (&available, memaddr, len))
1503 {
1504 struct cleanup *old_chain;
1505
1506 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1507
1508 if (VEC_empty (mem_range_s, available)
1509 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1510 {
1511 /* Don't read into the traceframe's available
1512 memory. */
1513 if (!VEC_empty (mem_range_s, available))
1514 {
1515 LONGEST oldlen = len;
1516
1517 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1518 gdb_assert (len <= oldlen);
1519 }
1520
1521 do_cleanups (old_chain);
1522
1523 /* This goes through the topmost target again. */
1524 res = memory_xfer_live_readonly_partial (ops, object,
1525 readbuf, memaddr,
1526 len, xfered_len);
1527 if (res == TARGET_XFER_OK)
1528 return TARGET_XFER_OK;
1529 else
1530 {
1531 /* No use trying further, we know some memory starting
1532 at MEMADDR isn't available. */
1533 *xfered_len = len;
1534 return TARGET_XFER_E_UNAVAILABLE;
1535 }
1536 }
1537
1538 /* Don't try to read more than how much is available, in
1539 case the target implements the deprecated QTro packet to
1540 cater for older GDBs (the target's knowledge of read-only
1541 sections may be outdated by now). */
1542 len = VEC_index (mem_range_s, available, 0)->length;
1543
1544 do_cleanups (old_chain);
1545 }
1546 }
1547
1548 /* Try GDB's internal data cache. */
1549 region = lookup_mem_region (memaddr);
1550 /* region->hi == 0 means there's no upper bound. */
1551 if (memaddr + len < region->hi || region->hi == 0)
1552 reg_len = len;
1553 else
1554 reg_len = region->hi - memaddr;
1555
1556 switch (region->attrib.mode)
1557 {
1558 case MEM_RO:
1559 if (writebuf != NULL)
1560 return TARGET_XFER_E_IO;
1561 break;
1562
1563 case MEM_WO:
1564 if (readbuf != NULL)
1565 return TARGET_XFER_E_IO;
1566 break;
1567
1568 case MEM_FLASH:
1569 /* We only support writing to flash during "load" for now. */
1570 if (writebuf != NULL)
1571 error (_("Writing to flash memory forbidden in this context"));
1572 break;
1573
1574 case MEM_NONE:
1575 return TARGET_XFER_E_IO;
1576 }
1577
1578 if (!ptid_equal (inferior_ptid, null_ptid))
1579 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1580 else
1581 inf = NULL;
1582
1583 if (inf != NULL
1584 /* The dcache reads whole cache lines; that doesn't play well
1585 with reading from a trace buffer, because reading outside of
1586 the collected memory range fails. */
1587 && get_traceframe_number () == -1
1588 && (region->attrib.cache
1589 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1590 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1591 {
1592 DCACHE *dcache = target_dcache_get_or_init ();
1593 int l;
1594
1595 if (readbuf != NULL)
1596 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1597 else
1598 /* FIXME drow/2006-08-09: If we're going to preserve const
1599 correctness dcache_xfer_memory should take readbuf and
1600 writebuf. */
1601 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1602 reg_len, 1);
1603 if (l <= 0)
1604 return TARGET_XFER_E_IO;
1605 else
1606 {
1607 *xfered_len = (ULONGEST) l;
1608 return TARGET_XFER_OK;
1609 }
1610 }
1611
1612 /* If none of those methods found the memory we wanted, fall back
1613 to a target partial transfer. Normally a single call to
1614 to_xfer_partial is enough; if it doesn't recognize an object
1615 it will call the to_xfer_partial of the next target down.
1616 But for memory this won't do. Memory is the only target
1617 object which can be read from more than one valid target.
1618 A core file, for instance, could have some of memory but
1619 delegate other bits to the target below it. So, we must
1620 manually try all targets. */
1621
1622 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1623 xfered_len);
1624
1625 /* Make sure the cache gets updated no matter what - if we are writing
1626 to the stack. Even if this write is not tagged as such, we still need
1627 to update the cache. */
1628
1629 if (res == TARGET_XFER_OK
1630 && inf != NULL
1631 && writebuf != NULL
1632 && target_dcache_init_p ()
1633 && !region->attrib.cache
1634 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1635 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1636 {
1637 DCACHE *dcache = target_dcache_get ();
1638
1639 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1640 }
1641
1642 /* If we still haven't got anything, return the last error. We
1643 give up. */
1644 return res;
1645 }
1646
1647 /* Perform a partial memory transfer. For docs see target.h,
1648 to_xfer_partial. */
1649
1650 static enum target_xfer_status
1651 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1652 gdb_byte *readbuf, const gdb_byte *writebuf,
1653 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1654 {
1655 enum target_xfer_status res;
1656
1657 /* Zero length requests are ok and require no work. */
1658 if (len == 0)
1659 return TARGET_XFER_EOF;
1660
1661 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1662 breakpoint insns, thus hiding out from higher layers whether
1663 there are software breakpoints inserted in the code stream. */
1664 if (readbuf != NULL)
1665 {
1666 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1667 xfered_len);
1668
1669 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1670 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1671 }
1672 else
1673 {
1674 void *buf;
1675 struct cleanup *old_chain;
1676
1677 /* A large write request is likely to be partially satisfied
1678 by memory_xfer_partial_1. We will continually malloc
1679 and free a copy of the entire write request for breakpoint
1680 shadow handling even though we only end up writing a small
1681 subset of it. Cap writes to 4KB to mitigate this. */
1682 len = min (4096, len);
1683
1684 buf = xmalloc (len);
1685 old_chain = make_cleanup (xfree, buf);
1686 memcpy (buf, writebuf, len);
1687
1688 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1689 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1690 xfered_len);
1691
1692 do_cleanups (old_chain);
1693 }
1694
1695 return res;
1696 }
1697
1698 static void
1699 restore_show_memory_breakpoints (void *arg)
1700 {
1701 show_memory_breakpoints = (uintptr_t) arg;
1702 }
1703
1704 struct cleanup *
1705 make_show_memory_breakpoints_cleanup (int show)
1706 {
1707 int current = show_memory_breakpoints;
1708
1709 show_memory_breakpoints = show;
1710 return make_cleanup (restore_show_memory_breakpoints,
1711 (void *) (uintptr_t) current);
1712 }
1713
1714 /* For docs see target.h, to_xfer_partial. */
1715
1716 enum target_xfer_status
1717 target_xfer_partial (struct target_ops *ops,
1718 enum target_object object, const char *annex,
1719 gdb_byte *readbuf, const gdb_byte *writebuf,
1720 ULONGEST offset, ULONGEST len,
1721 ULONGEST *xfered_len)
1722 {
1723 enum target_xfer_status retval;
1724
1725 gdb_assert (ops->to_xfer_partial != NULL);
1726
1727 /* Transfer is done when LEN is zero. */
1728 if (len == 0)
1729 return TARGET_XFER_EOF;
1730
1731 if (writebuf && !may_write_memory)
1732 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1733 core_addr_to_string_nz (offset), plongest (len));
1734
1735 *xfered_len = 0;
1736
1737 /* If this is a memory transfer, let the memory-specific code
1738 have a look at it instead. Memory transfers are more
1739 complicated. */
1740 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1741 || object == TARGET_OBJECT_CODE_MEMORY)
1742 retval = memory_xfer_partial (ops, object, readbuf,
1743 writebuf, offset, len, xfered_len);
1744 else if (object == TARGET_OBJECT_RAW_MEMORY)
1745 {
1746 /* Request the normal memory object from other layers. */
1747 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1748 xfered_len);
1749 }
1750 else
1751 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1752 writebuf, offset, len, xfered_len);
1753
1754 if (targetdebug)
1755 {
1756 const unsigned char *myaddr = NULL;
1757
1758 fprintf_unfiltered (gdb_stdlog,
1759 "%s:target_xfer_partial "
1760 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1761 ops->to_shortname,
1762 (int) object,
1763 (annex ? annex : "(null)"),
1764 host_address_to_string (readbuf),
1765 host_address_to_string (writebuf),
1766 core_addr_to_string_nz (offset),
1767 pulongest (len), retval,
1768 pulongest (*xfered_len));
1769
1770 if (readbuf)
1771 myaddr = readbuf;
1772 if (writebuf)
1773 myaddr = writebuf;
1774 if (retval == TARGET_XFER_OK && myaddr != NULL)
1775 {
1776 int i;
1777
1778 fputs_unfiltered (", bytes =", gdb_stdlog);
1779 for (i = 0; i < *xfered_len; i++)
1780 {
1781 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1782 {
1783 if (targetdebug < 2 && i > 0)
1784 {
1785 fprintf_unfiltered (gdb_stdlog, " ...");
1786 break;
1787 }
1788 fprintf_unfiltered (gdb_stdlog, "\n");
1789 }
1790
1791 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1792 }
1793 }
1794
1795 fputc_unfiltered ('\n', gdb_stdlog);
1796 }
1797
1798 /* Check implementations of to_xfer_partial update *XFERED_LEN
1799 properly. Do assertion after printing debug messages, so that we
1800 can find more clues on assertion failure from debugging messages. */
1801 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1802 gdb_assert (*xfered_len > 0);
1803
1804 return retval;
1805 }
1806
1807 /* Read LEN bytes of target memory at address MEMADDR, placing the
1808 results in GDB's memory at MYADDR. Returns either 0 for success or
1809 TARGET_XFER_E_IO if any error occurs.
1810
1811 If an error occurs, no guarantee is made about the contents of the data at
1812 MYADDR. In particular, the caller should not depend upon partial reads
1813 filling the buffer with good data. There is no way for the caller to know
1814 how much good data might have been transfered anyway. Callers that can
1815 deal with partial reads should call target_read (which will retry until
1816 it makes no progress, and then return how much was transferred). */
1817
1818 int
1819 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1820 {
1821 /* Dispatch to the topmost target, not the flattened current_target.
1822 Memory accesses check target->to_has_(all_)memory, and the
1823 flattened target doesn't inherit those. */
1824 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1825 myaddr, memaddr, len) == len)
1826 return 0;
1827 else
1828 return TARGET_XFER_E_IO;
1829 }
1830
1831 /* Like target_read_memory, but specify explicitly that this is a read
1832 from the target's raw memory. That is, this read bypasses the
1833 dcache, breakpoint shadowing, etc. */
1834
1835 int
1836 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1837 {
1838 /* See comment in target_read_memory about why the request starts at
1839 current_target.beneath. */
1840 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1841 myaddr, memaddr, len) == len)
1842 return 0;
1843 else
1844 return TARGET_XFER_E_IO;
1845 }
1846
1847 /* Like target_read_memory, but specify explicitly that this is a read from
1848 the target's stack. This may trigger different cache behavior. */
1849
1850 int
1851 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1852 {
1853 /* See comment in target_read_memory about why the request starts at
1854 current_target.beneath. */
1855 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1856 myaddr, memaddr, len) == len)
1857 return 0;
1858 else
1859 return TARGET_XFER_E_IO;
1860 }
1861
1862 /* Like target_read_memory, but specify explicitly that this is a read from
1863 the target's code. This may trigger different cache behavior. */
1864
1865 int
1866 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1867 {
1868 /* See comment in target_read_memory about why the request starts at
1869 current_target.beneath. */
1870 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1871 myaddr, memaddr, len) == len)
1872 return 0;
1873 else
1874 return TARGET_XFER_E_IO;
1875 }
1876
1877 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1878 Returns either 0 for success or TARGET_XFER_E_IO if any
1879 error occurs. If an error occurs, no guarantee is made about how
1880 much data got written. Callers that can deal with partial writes
1881 should call target_write. */
1882
1883 int
1884 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1885 {
1886 /* See comment in target_read_memory about why the request starts at
1887 current_target.beneath. */
1888 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1889 myaddr, memaddr, len) == len)
1890 return 0;
1891 else
1892 return TARGET_XFER_E_IO;
1893 }
1894
1895 /* Write LEN bytes from MYADDR to target raw memory at address
1896 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1897 if any error occurs. If an error occurs, no guarantee is made
1898 about how much data got written. Callers that can deal with
1899 partial writes should call target_write. */
1900
1901 int
1902 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1903 {
1904 /* See comment in target_read_memory about why the request starts at
1905 current_target.beneath. */
1906 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1907 myaddr, memaddr, len) == len)
1908 return 0;
1909 else
1910 return TARGET_XFER_E_IO;
1911 }
1912
1913 /* Fetch the target's memory map. */
1914
1915 VEC(mem_region_s) *
1916 target_memory_map (void)
1917 {
1918 VEC(mem_region_s) *result;
1919 struct mem_region *last_one, *this_one;
1920 int ix;
1921 struct target_ops *t;
1922
1923 if (targetdebug)
1924 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1925
1926 for (t = current_target.beneath; t != NULL; t = t->beneath)
1927 if (t->to_memory_map != NULL)
1928 break;
1929
1930 if (t == NULL)
1931 return NULL;
1932
1933 result = t->to_memory_map (t);
1934 if (result == NULL)
1935 return NULL;
1936
1937 qsort (VEC_address (mem_region_s, result),
1938 VEC_length (mem_region_s, result),
1939 sizeof (struct mem_region), mem_region_cmp);
1940
1941 /* Check that regions do not overlap. Simultaneously assign
1942 a numbering for the "mem" commands to use to refer to
1943 each region. */
1944 last_one = NULL;
1945 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1946 {
1947 this_one->number = ix;
1948
1949 if (last_one && last_one->hi > this_one->lo)
1950 {
1951 warning (_("Overlapping regions in memory map: ignoring"));
1952 VEC_free (mem_region_s, result);
1953 return NULL;
1954 }
1955 last_one = this_one;
1956 }
1957
1958 return result;
1959 }
1960
1961 void
1962 target_flash_erase (ULONGEST address, LONGEST length)
1963 {
1964 struct target_ops *t;
1965
1966 for (t = current_target.beneath; t != NULL; t = t->beneath)
1967 if (t->to_flash_erase != NULL)
1968 {
1969 if (targetdebug)
1970 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1971 hex_string (address), phex (length, 0));
1972 t->to_flash_erase (t, address, length);
1973 return;
1974 }
1975
1976 tcomplain ();
1977 }
1978
1979 void
1980 target_flash_done (void)
1981 {
1982 struct target_ops *t;
1983
1984 for (t = current_target.beneath; t != NULL; t = t->beneath)
1985 if (t->to_flash_done != NULL)
1986 {
1987 if (targetdebug)
1988 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1989 t->to_flash_done (t);
1990 return;
1991 }
1992
1993 tcomplain ();
1994 }
1995
1996 static void
1997 show_trust_readonly (struct ui_file *file, int from_tty,
1998 struct cmd_list_element *c, const char *value)
1999 {
2000 fprintf_filtered (file,
2001 _("Mode for reading from readonly sections is %s.\n"),
2002 value);
2003 }
2004
2005 /* More generic transfers. */
2006
2007 static enum target_xfer_status
2008 default_xfer_partial (struct target_ops *ops, enum target_object object,
2009 const char *annex, gdb_byte *readbuf,
2010 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2011 ULONGEST *xfered_len)
2012 {
2013 if (object == TARGET_OBJECT_MEMORY
2014 && ops->deprecated_xfer_memory != NULL)
2015 /* If available, fall back to the target's
2016 "deprecated_xfer_memory" method. */
2017 {
2018 int xfered = -1;
2019
2020 errno = 0;
2021 if (writebuf != NULL)
2022 {
2023 void *buffer = xmalloc (len);
2024 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2025
2026 memcpy (buffer, writebuf, len);
2027 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2028 1/*write*/, NULL, ops);
2029 do_cleanups (cleanup);
2030 }
2031 if (readbuf != NULL)
2032 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2033 0/*read*/, NULL, ops);
2034 if (xfered > 0)
2035 {
2036 *xfered_len = (ULONGEST) xfered;
2037 return TARGET_XFER_E_IO;
2038 }
2039 else if (xfered == 0 && errno == 0)
2040 /* "deprecated_xfer_memory" uses 0, cross checked against
2041 ERRNO as one indication of an error. */
2042 return TARGET_XFER_EOF;
2043 else
2044 return TARGET_XFER_E_IO;
2045 }
2046 else
2047 {
2048 gdb_assert (ops->beneath != NULL);
2049 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2050 readbuf, writebuf, offset, len,
2051 xfered_len);
2052 }
2053 }
2054
2055 /* Target vector read/write partial wrapper functions. */
2056
2057 static enum target_xfer_status
2058 target_read_partial (struct target_ops *ops,
2059 enum target_object object,
2060 const char *annex, gdb_byte *buf,
2061 ULONGEST offset, ULONGEST len,
2062 ULONGEST *xfered_len)
2063 {
2064 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2065 xfered_len);
2066 }
2067
2068 static enum target_xfer_status
2069 target_write_partial (struct target_ops *ops,
2070 enum target_object object,
2071 const char *annex, const gdb_byte *buf,
2072 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2073 {
2074 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2075 xfered_len);
2076 }
2077
2078 /* Wrappers to perform the full transfer. */
2079
2080 /* For docs on target_read see target.h. */
2081
2082 LONGEST
2083 target_read (struct target_ops *ops,
2084 enum target_object object,
2085 const char *annex, gdb_byte *buf,
2086 ULONGEST offset, LONGEST len)
2087 {
2088 LONGEST xfered = 0;
2089
2090 while (xfered < len)
2091 {
2092 ULONGEST xfered_len;
2093 enum target_xfer_status status;
2094
2095 status = target_read_partial (ops, object, annex,
2096 (gdb_byte *) buf + xfered,
2097 offset + xfered, len - xfered,
2098 &xfered_len);
2099
2100 /* Call an observer, notifying them of the xfer progress? */
2101 if (status == TARGET_XFER_EOF)
2102 return xfered;
2103 else if (status == TARGET_XFER_OK)
2104 {
2105 xfered += xfered_len;
2106 QUIT;
2107 }
2108 else
2109 return -1;
2110
2111 }
2112 return len;
2113 }
2114
2115 /* Assuming that the entire [begin, end) range of memory cannot be
2116 read, try to read whatever subrange is possible to read.
2117
2118 The function returns, in RESULT, either zero or one memory block.
2119 If there's a readable subrange at the beginning, it is completely
2120 read and returned. Any further readable subrange will not be read.
2121 Otherwise, if there's a readable subrange at the end, it will be
2122 completely read and returned. Any readable subranges before it
2123 (obviously, not starting at the beginning), will be ignored. In
2124 other cases -- either no readable subrange, or readable subrange(s)
2125 that is neither at the beginning, or end, nothing is returned.
2126
2127 The purpose of this function is to handle a read across a boundary
2128 of accessible memory in a case when memory map is not available.
2129 The above restrictions are fine for this case, but will give
2130 incorrect results if the memory is 'patchy'. However, supporting
2131 'patchy' memory would require trying to read every single byte,
2132 and it seems unacceptable solution. Explicit memory map is
2133 recommended for this case -- and target_read_memory_robust will
2134 take care of reading multiple ranges then. */
2135
2136 static void
2137 read_whatever_is_readable (struct target_ops *ops,
2138 ULONGEST begin, ULONGEST end,
2139 VEC(memory_read_result_s) **result)
2140 {
2141 gdb_byte *buf = xmalloc (end - begin);
2142 ULONGEST current_begin = begin;
2143 ULONGEST current_end = end;
2144 int forward;
2145 memory_read_result_s r;
2146 ULONGEST xfered_len;
2147
2148 /* If we previously failed to read 1 byte, nothing can be done here. */
2149 if (end - begin <= 1)
2150 {
2151 xfree (buf);
2152 return;
2153 }
2154
2155 /* Check that either first or the last byte is readable, and give up
2156 if not. This heuristic is meant to permit reading accessible memory
2157 at the boundary of accessible region. */
2158 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2159 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2160 {
2161 forward = 1;
2162 ++current_begin;
2163 }
2164 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2165 buf + (end-begin) - 1, end - 1, 1,
2166 &xfered_len) == TARGET_XFER_OK)
2167 {
2168 forward = 0;
2169 --current_end;
2170 }
2171 else
2172 {
2173 xfree (buf);
2174 return;
2175 }
2176
2177 /* Loop invariant is that the [current_begin, current_end) was previously
2178 found to be not readable as a whole.
2179
2180 Note loop condition -- if the range has 1 byte, we can't divide the range
2181 so there's no point trying further. */
2182 while (current_end - current_begin > 1)
2183 {
2184 ULONGEST first_half_begin, first_half_end;
2185 ULONGEST second_half_begin, second_half_end;
2186 LONGEST xfer;
2187 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2188
2189 if (forward)
2190 {
2191 first_half_begin = current_begin;
2192 first_half_end = middle;
2193 second_half_begin = middle;
2194 second_half_end = current_end;
2195 }
2196 else
2197 {
2198 first_half_begin = middle;
2199 first_half_end = current_end;
2200 second_half_begin = current_begin;
2201 second_half_end = middle;
2202 }
2203
2204 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2205 buf + (first_half_begin - begin),
2206 first_half_begin,
2207 first_half_end - first_half_begin);
2208
2209 if (xfer == first_half_end - first_half_begin)
2210 {
2211 /* This half reads up fine. So, the error must be in the
2212 other half. */
2213 current_begin = second_half_begin;
2214 current_end = second_half_end;
2215 }
2216 else
2217 {
2218 /* This half is not readable. Because we've tried one byte, we
2219 know some part of this half if actually redable. Go to the next
2220 iteration to divide again and try to read.
2221
2222 We don't handle the other half, because this function only tries
2223 to read a single readable subrange. */
2224 current_begin = first_half_begin;
2225 current_end = first_half_end;
2226 }
2227 }
2228
2229 if (forward)
2230 {
2231 /* The [begin, current_begin) range has been read. */
2232 r.begin = begin;
2233 r.end = current_begin;
2234 r.data = buf;
2235 }
2236 else
2237 {
2238 /* The [current_end, end) range has been read. */
2239 LONGEST rlen = end - current_end;
2240
2241 r.data = xmalloc (rlen);
2242 memcpy (r.data, buf + current_end - begin, rlen);
2243 r.begin = current_end;
2244 r.end = end;
2245 xfree (buf);
2246 }
2247 VEC_safe_push(memory_read_result_s, (*result), &r);
2248 }
2249
2250 void
2251 free_memory_read_result_vector (void *x)
2252 {
2253 VEC(memory_read_result_s) *v = x;
2254 memory_read_result_s *current;
2255 int ix;
2256
2257 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2258 {
2259 xfree (current->data);
2260 }
2261 VEC_free (memory_read_result_s, v);
2262 }
2263
2264 VEC(memory_read_result_s) *
2265 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2266 {
2267 VEC(memory_read_result_s) *result = 0;
2268
2269 LONGEST xfered = 0;
2270 while (xfered < len)
2271 {
2272 struct mem_region *region = lookup_mem_region (offset + xfered);
2273 LONGEST rlen;
2274
2275 /* If there is no explicit region, a fake one should be created. */
2276 gdb_assert (region);
2277
2278 if (region->hi == 0)
2279 rlen = len - xfered;
2280 else
2281 rlen = region->hi - offset;
2282
2283 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2284 {
2285 /* Cannot read this region. Note that we can end up here only
2286 if the region is explicitly marked inaccessible, or
2287 'inaccessible-by-default' is in effect. */
2288 xfered += rlen;
2289 }
2290 else
2291 {
2292 LONGEST to_read = min (len - xfered, rlen);
2293 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2294
2295 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2296 (gdb_byte *) buffer,
2297 offset + xfered, to_read);
2298 /* Call an observer, notifying them of the xfer progress? */
2299 if (xfer <= 0)
2300 {
2301 /* Got an error reading full chunk. See if maybe we can read
2302 some subrange. */
2303 xfree (buffer);
2304 read_whatever_is_readable (ops, offset + xfered,
2305 offset + xfered + to_read, &result);
2306 xfered += to_read;
2307 }
2308 else
2309 {
2310 struct memory_read_result r;
2311 r.data = buffer;
2312 r.begin = offset + xfered;
2313 r.end = r.begin + xfer;
2314 VEC_safe_push (memory_read_result_s, result, &r);
2315 xfered += xfer;
2316 }
2317 QUIT;
2318 }
2319 }
2320 return result;
2321 }
2322
2323
2324 /* An alternative to target_write with progress callbacks. */
2325
2326 LONGEST
2327 target_write_with_progress (struct target_ops *ops,
2328 enum target_object object,
2329 const char *annex, const gdb_byte *buf,
2330 ULONGEST offset, LONGEST len,
2331 void (*progress) (ULONGEST, void *), void *baton)
2332 {
2333 LONGEST xfered = 0;
2334
2335 /* Give the progress callback a chance to set up. */
2336 if (progress)
2337 (*progress) (0, baton);
2338
2339 while (xfered < len)
2340 {
2341 ULONGEST xfered_len;
2342 enum target_xfer_status status;
2343
2344 status = target_write_partial (ops, object, annex,
2345 (gdb_byte *) buf + xfered,
2346 offset + xfered, len - xfered,
2347 &xfered_len);
2348
2349 if (status == TARGET_XFER_EOF)
2350 return xfered;
2351 if (TARGET_XFER_STATUS_ERROR_P (status))
2352 return -1;
2353
2354 gdb_assert (status == TARGET_XFER_OK);
2355 if (progress)
2356 (*progress) (xfered_len, baton);
2357
2358 xfered += xfered_len;
2359 QUIT;
2360 }
2361 return len;
2362 }
2363
2364 /* For docs on target_write see target.h. */
2365
2366 LONGEST
2367 target_write (struct target_ops *ops,
2368 enum target_object object,
2369 const char *annex, const gdb_byte *buf,
2370 ULONGEST offset, LONGEST len)
2371 {
2372 return target_write_with_progress (ops, object, annex, buf, offset, len,
2373 NULL, NULL);
2374 }
2375
2376 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2377 the size of the transferred data. PADDING additional bytes are
2378 available in *BUF_P. This is a helper function for
2379 target_read_alloc; see the declaration of that function for more
2380 information. */
2381
2382 static LONGEST
2383 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2384 const char *annex, gdb_byte **buf_p, int padding)
2385 {
2386 size_t buf_alloc, buf_pos;
2387 gdb_byte *buf;
2388
2389 /* This function does not have a length parameter; it reads the
2390 entire OBJECT). Also, it doesn't support objects fetched partly
2391 from one target and partly from another (in a different stratum,
2392 e.g. a core file and an executable). Both reasons make it
2393 unsuitable for reading memory. */
2394 gdb_assert (object != TARGET_OBJECT_MEMORY);
2395
2396 /* Start by reading up to 4K at a time. The target will throttle
2397 this number down if necessary. */
2398 buf_alloc = 4096;
2399 buf = xmalloc (buf_alloc);
2400 buf_pos = 0;
2401 while (1)
2402 {
2403 ULONGEST xfered_len;
2404 enum target_xfer_status status;
2405
2406 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2407 buf_pos, buf_alloc - buf_pos - padding,
2408 &xfered_len);
2409
2410 if (status == TARGET_XFER_EOF)
2411 {
2412 /* Read all there was. */
2413 if (buf_pos == 0)
2414 xfree (buf);
2415 else
2416 *buf_p = buf;
2417 return buf_pos;
2418 }
2419 else if (status != TARGET_XFER_OK)
2420 {
2421 /* An error occurred. */
2422 xfree (buf);
2423 return TARGET_XFER_E_IO;
2424 }
2425
2426 buf_pos += xfered_len;
2427
2428 /* If the buffer is filling up, expand it. */
2429 if (buf_alloc < buf_pos * 2)
2430 {
2431 buf_alloc *= 2;
2432 buf = xrealloc (buf, buf_alloc);
2433 }
2434
2435 QUIT;
2436 }
2437 }
2438
2439 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2440 the size of the transferred data. See the declaration in "target.h"
2441 function for more information about the return value. */
2442
2443 LONGEST
2444 target_read_alloc (struct target_ops *ops, enum target_object object,
2445 const char *annex, gdb_byte **buf_p)
2446 {
2447 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2448 }
2449
2450 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2451 returned as a string, allocated using xmalloc. If an error occurs
2452 or the transfer is unsupported, NULL is returned. Empty objects
2453 are returned as allocated but empty strings. A warning is issued
2454 if the result contains any embedded NUL bytes. */
2455
2456 char *
2457 target_read_stralloc (struct target_ops *ops, enum target_object object,
2458 const char *annex)
2459 {
2460 gdb_byte *buffer;
2461 char *bufstr;
2462 LONGEST i, transferred;
2463
2464 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2465 bufstr = (char *) buffer;
2466
2467 if (transferred < 0)
2468 return NULL;
2469
2470 if (transferred == 0)
2471 return xstrdup ("");
2472
2473 bufstr[transferred] = 0;
2474
2475 /* Check for embedded NUL bytes; but allow trailing NULs. */
2476 for (i = strlen (bufstr); i < transferred; i++)
2477 if (bufstr[i] != 0)
2478 {
2479 warning (_("target object %d, annex %s, "
2480 "contained unexpected null characters"),
2481 (int) object, annex ? annex : "(none)");
2482 break;
2483 }
2484
2485 return bufstr;
2486 }
2487
2488 /* Memory transfer methods. */
2489
2490 void
2491 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2492 LONGEST len)
2493 {
2494 /* This method is used to read from an alternate, non-current
2495 target. This read must bypass the overlay support (as symbols
2496 don't match this target), and GDB's internal cache (wrong cache
2497 for this target). */
2498 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2499 != len)
2500 memory_error (TARGET_XFER_E_IO, addr);
2501 }
2502
2503 ULONGEST
2504 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2505 int len, enum bfd_endian byte_order)
2506 {
2507 gdb_byte buf[sizeof (ULONGEST)];
2508
2509 gdb_assert (len <= sizeof (buf));
2510 get_target_memory (ops, addr, buf, len);
2511 return extract_unsigned_integer (buf, len, byte_order);
2512 }
2513
2514 /* See target.h. */
2515
2516 int
2517 target_insert_breakpoint (struct gdbarch *gdbarch,
2518 struct bp_target_info *bp_tgt)
2519 {
2520 if (!may_insert_breakpoints)
2521 {
2522 warning (_("May not insert breakpoints"));
2523 return 1;
2524 }
2525
2526 return current_target.to_insert_breakpoint (&current_target,
2527 gdbarch, bp_tgt);
2528 }
2529
2530 /* See target.h. */
2531
2532 int
2533 target_remove_breakpoint (struct gdbarch *gdbarch,
2534 struct bp_target_info *bp_tgt)
2535 {
2536 /* This is kind of a weird case to handle, but the permission might
2537 have been changed after breakpoints were inserted - in which case
2538 we should just take the user literally and assume that any
2539 breakpoints should be left in place. */
2540 if (!may_insert_breakpoints)
2541 {
2542 warning (_("May not remove breakpoints"));
2543 return 1;
2544 }
2545
2546 return current_target.to_remove_breakpoint (&current_target,
2547 gdbarch, bp_tgt);
2548 }
2549
2550 static void
2551 target_info (char *args, int from_tty)
2552 {
2553 struct target_ops *t;
2554 int has_all_mem = 0;
2555
2556 if (symfile_objfile != NULL)
2557 printf_unfiltered (_("Symbols from \"%s\".\n"),
2558 objfile_name (symfile_objfile));
2559
2560 for (t = target_stack; t != NULL; t = t->beneath)
2561 {
2562 if (!(*t->to_has_memory) (t))
2563 continue;
2564
2565 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2566 continue;
2567 if (has_all_mem)
2568 printf_unfiltered (_("\tWhile running this, "
2569 "GDB does not access memory from...\n"));
2570 printf_unfiltered ("%s:\n", t->to_longname);
2571 (t->to_files_info) (t);
2572 has_all_mem = (*t->to_has_all_memory) (t);
2573 }
2574 }
2575
2576 /* This function is called before any new inferior is created, e.g.
2577 by running a program, attaching, or connecting to a target.
2578 It cleans up any state from previous invocations which might
2579 change between runs. This is a subset of what target_preopen
2580 resets (things which might change between targets). */
2581
2582 void
2583 target_pre_inferior (int from_tty)
2584 {
2585 /* Clear out solib state. Otherwise the solib state of the previous
2586 inferior might have survived and is entirely wrong for the new
2587 target. This has been observed on GNU/Linux using glibc 2.3. How
2588 to reproduce:
2589
2590 bash$ ./foo&
2591 [1] 4711
2592 bash$ ./foo&
2593 [1] 4712
2594 bash$ gdb ./foo
2595 [...]
2596 (gdb) attach 4711
2597 (gdb) detach
2598 (gdb) attach 4712
2599 Cannot access memory at address 0xdeadbeef
2600 */
2601
2602 /* In some OSs, the shared library list is the same/global/shared
2603 across inferiors. If code is shared between processes, so are
2604 memory regions and features. */
2605 if (!gdbarch_has_global_solist (target_gdbarch ()))
2606 {
2607 no_shared_libraries (NULL, from_tty);
2608
2609 invalidate_target_mem_regions ();
2610
2611 target_clear_description ();
2612 }
2613
2614 agent_capability_invalidate ();
2615 }
2616
2617 /* Callback for iterate_over_inferiors. Gets rid of the given
2618 inferior. */
2619
2620 static int
2621 dispose_inferior (struct inferior *inf, void *args)
2622 {
2623 struct thread_info *thread;
2624
2625 thread = any_thread_of_process (inf->pid);
2626 if (thread)
2627 {
2628 switch_to_thread (thread->ptid);
2629
2630 /* Core inferiors actually should be detached, not killed. */
2631 if (target_has_execution)
2632 target_kill ();
2633 else
2634 target_detach (NULL, 0);
2635 }
2636
2637 return 0;
2638 }
2639
2640 /* This is to be called by the open routine before it does
2641 anything. */
2642
2643 void
2644 target_preopen (int from_tty)
2645 {
2646 dont_repeat ();
2647
2648 if (have_inferiors ())
2649 {
2650 if (!from_tty
2651 || !have_live_inferiors ()
2652 || query (_("A program is being debugged already. Kill it? ")))
2653 iterate_over_inferiors (dispose_inferior, NULL);
2654 else
2655 error (_("Program not killed."));
2656 }
2657
2658 /* Calling target_kill may remove the target from the stack. But if
2659 it doesn't (which seems like a win for UDI), remove it now. */
2660 /* Leave the exec target, though. The user may be switching from a
2661 live process to a core of the same program. */
2662 pop_all_targets_above (file_stratum);
2663
2664 target_pre_inferior (from_tty);
2665 }
2666
2667 /* Detach a target after doing deferred register stores. */
2668
2669 void
2670 target_detach (const char *args, int from_tty)
2671 {
2672 struct target_ops* t;
2673
2674 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2675 /* Don't remove global breakpoints here. They're removed on
2676 disconnection from the target. */
2677 ;
2678 else
2679 /* If we're in breakpoints-always-inserted mode, have to remove
2680 them before detaching. */
2681 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2682
2683 prepare_for_detach ();
2684
2685 for (t = current_target.beneath; t != NULL; t = t->beneath)
2686 {
2687 if (t->to_detach != NULL)
2688 {
2689 t->to_detach (t, args, from_tty);
2690 if (targetdebug)
2691 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2692 args, from_tty);
2693 return;
2694 }
2695 }
2696
2697 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2698 }
2699
2700 void
2701 target_disconnect (char *args, int from_tty)
2702 {
2703 struct target_ops *t;
2704
2705 /* If we're in breakpoints-always-inserted mode or if breakpoints
2706 are global across processes, we have to remove them before
2707 disconnecting. */
2708 remove_breakpoints ();
2709
2710 for (t = current_target.beneath; t != NULL; t = t->beneath)
2711 if (t->to_disconnect != NULL)
2712 {
2713 if (targetdebug)
2714 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2715 args, from_tty);
2716 t->to_disconnect (t, args, from_tty);
2717 return;
2718 }
2719
2720 tcomplain ();
2721 }
2722
2723 ptid_t
2724 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2725 {
2726 struct target_ops *t;
2727 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2728 status, options);
2729
2730 if (targetdebug)
2731 {
2732 char *status_string;
2733 char *options_string;
2734
2735 status_string = target_waitstatus_to_string (status);
2736 options_string = target_options_to_string (options);
2737 fprintf_unfiltered (gdb_stdlog,
2738 "target_wait (%d, status, options={%s})"
2739 " = %d, %s\n",
2740 ptid_get_pid (ptid), options_string,
2741 ptid_get_pid (retval), status_string);
2742 xfree (status_string);
2743 xfree (options_string);
2744 }
2745
2746 return retval;
2747 }
2748
2749 char *
2750 target_pid_to_str (ptid_t ptid)
2751 {
2752 struct target_ops *t;
2753
2754 for (t = current_target.beneath; t != NULL; t = t->beneath)
2755 {
2756 if (t->to_pid_to_str != NULL)
2757 return (*t->to_pid_to_str) (t, ptid);
2758 }
2759
2760 return normal_pid_to_str (ptid);
2761 }
2762
2763 char *
2764 target_thread_name (struct thread_info *info)
2765 {
2766 struct target_ops *t;
2767
2768 for (t = current_target.beneath; t != NULL; t = t->beneath)
2769 {
2770 if (t->to_thread_name != NULL)
2771 return (*t->to_thread_name) (info);
2772 }
2773
2774 return NULL;
2775 }
2776
2777 void
2778 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2779 {
2780 struct target_ops *t;
2781
2782 target_dcache_invalidate ();
2783
2784 current_target.to_resume (&current_target, ptid, step, signal);
2785 if (targetdebug)
2786 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2787 ptid_get_pid (ptid),
2788 step ? "step" : "continue",
2789 gdb_signal_to_name (signal));
2790
2791 registers_changed_ptid (ptid);
2792 set_executing (ptid, 1);
2793 set_running (ptid, 1);
2794 clear_inline_frame_state (ptid);
2795 }
2796
2797 void
2798 target_pass_signals (int numsigs, unsigned char *pass_signals)
2799 {
2800 struct target_ops *t;
2801
2802 for (t = current_target.beneath; t != NULL; t = t->beneath)
2803 {
2804 if (t->to_pass_signals != NULL)
2805 {
2806 if (targetdebug)
2807 {
2808 int i;
2809
2810 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2811 numsigs);
2812
2813 for (i = 0; i < numsigs; i++)
2814 if (pass_signals[i])
2815 fprintf_unfiltered (gdb_stdlog, " %s",
2816 gdb_signal_to_name (i));
2817
2818 fprintf_unfiltered (gdb_stdlog, " })\n");
2819 }
2820
2821 (*t->to_pass_signals) (numsigs, pass_signals);
2822 return;
2823 }
2824 }
2825 }
2826
2827 void
2828 target_program_signals (int numsigs, unsigned char *program_signals)
2829 {
2830 struct target_ops *t;
2831
2832 for (t = current_target.beneath; t != NULL; t = t->beneath)
2833 {
2834 if (t->to_program_signals != NULL)
2835 {
2836 if (targetdebug)
2837 {
2838 int i;
2839
2840 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2841 numsigs);
2842
2843 for (i = 0; i < numsigs; i++)
2844 if (program_signals[i])
2845 fprintf_unfiltered (gdb_stdlog, " %s",
2846 gdb_signal_to_name (i));
2847
2848 fprintf_unfiltered (gdb_stdlog, " })\n");
2849 }
2850
2851 (*t->to_program_signals) (numsigs, program_signals);
2852 return;
2853 }
2854 }
2855 }
2856
2857 /* Look through the list of possible targets for a target that can
2858 follow forks. */
2859
2860 int
2861 target_follow_fork (int follow_child, int detach_fork)
2862 {
2863 struct target_ops *t;
2864
2865 for (t = current_target.beneath; t != NULL; t = t->beneath)
2866 {
2867 if (t->to_follow_fork != NULL)
2868 {
2869 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2870
2871 if (targetdebug)
2872 fprintf_unfiltered (gdb_stdlog,
2873 "target_follow_fork (%d, %d) = %d\n",
2874 follow_child, detach_fork, retval);
2875 return retval;
2876 }
2877 }
2878
2879 /* Some target returned a fork event, but did not know how to follow it. */
2880 internal_error (__FILE__, __LINE__,
2881 _("could not find a target to follow fork"));
2882 }
2883
2884 void
2885 target_mourn_inferior (void)
2886 {
2887 struct target_ops *t;
2888
2889 for (t = current_target.beneath; t != NULL; t = t->beneath)
2890 {
2891 if (t->to_mourn_inferior != NULL)
2892 {
2893 t->to_mourn_inferior (t);
2894 if (targetdebug)
2895 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2896
2897 /* We no longer need to keep handles on any of the object files.
2898 Make sure to release them to avoid unnecessarily locking any
2899 of them while we're not actually debugging. */
2900 bfd_cache_close_all ();
2901
2902 return;
2903 }
2904 }
2905
2906 internal_error (__FILE__, __LINE__,
2907 _("could not find a target to follow mourn inferior"));
2908 }
2909
2910 /* Look for a target which can describe architectural features, starting
2911 from TARGET. If we find one, return its description. */
2912
2913 const struct target_desc *
2914 target_read_description (struct target_ops *target)
2915 {
2916 struct target_ops *t;
2917
2918 for (t = target; t != NULL; t = t->beneath)
2919 if (t->to_read_description != NULL)
2920 {
2921 const struct target_desc *tdesc;
2922
2923 tdesc = t->to_read_description (t);
2924 if (tdesc)
2925 return tdesc;
2926 }
2927
2928 return NULL;
2929 }
2930
2931 /* The default implementation of to_search_memory.
2932 This implements a basic search of memory, reading target memory and
2933 performing the search here (as opposed to performing the search in on the
2934 target side with, for example, gdbserver). */
2935
2936 int
2937 simple_search_memory (struct target_ops *ops,
2938 CORE_ADDR start_addr, ULONGEST search_space_len,
2939 const gdb_byte *pattern, ULONGEST pattern_len,
2940 CORE_ADDR *found_addrp)
2941 {
2942 /* NOTE: also defined in find.c testcase. */
2943 #define SEARCH_CHUNK_SIZE 16000
2944 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2945 /* Buffer to hold memory contents for searching. */
2946 gdb_byte *search_buf;
2947 unsigned search_buf_size;
2948 struct cleanup *old_cleanups;
2949
2950 search_buf_size = chunk_size + pattern_len - 1;
2951
2952 /* No point in trying to allocate a buffer larger than the search space. */
2953 if (search_space_len < search_buf_size)
2954 search_buf_size = search_space_len;
2955
2956 search_buf = malloc (search_buf_size);
2957 if (search_buf == NULL)
2958 error (_("Unable to allocate memory to perform the search."));
2959 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2960
2961 /* Prime the search buffer. */
2962
2963 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2964 search_buf, start_addr, search_buf_size) != search_buf_size)
2965 {
2966 warning (_("Unable to access %s bytes of target "
2967 "memory at %s, halting search."),
2968 pulongest (search_buf_size), hex_string (start_addr));
2969 do_cleanups (old_cleanups);
2970 return -1;
2971 }
2972
2973 /* Perform the search.
2974
2975 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2976 When we've scanned N bytes we copy the trailing bytes to the start and
2977 read in another N bytes. */
2978
2979 while (search_space_len >= pattern_len)
2980 {
2981 gdb_byte *found_ptr;
2982 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2983
2984 found_ptr = memmem (search_buf, nr_search_bytes,
2985 pattern, pattern_len);
2986
2987 if (found_ptr != NULL)
2988 {
2989 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2990
2991 *found_addrp = found_addr;
2992 do_cleanups (old_cleanups);
2993 return 1;
2994 }
2995
2996 /* Not found in this chunk, skip to next chunk. */
2997
2998 /* Don't let search_space_len wrap here, it's unsigned. */
2999 if (search_space_len >= chunk_size)
3000 search_space_len -= chunk_size;
3001 else
3002 search_space_len = 0;
3003
3004 if (search_space_len >= pattern_len)
3005 {
3006 unsigned keep_len = search_buf_size - chunk_size;
3007 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3008 int nr_to_read;
3009
3010 /* Copy the trailing part of the previous iteration to the front
3011 of the buffer for the next iteration. */
3012 gdb_assert (keep_len == pattern_len - 1);
3013 memcpy (search_buf, search_buf + chunk_size, keep_len);
3014
3015 nr_to_read = min (search_space_len - keep_len, chunk_size);
3016
3017 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3018 search_buf + keep_len, read_addr,
3019 nr_to_read) != nr_to_read)
3020 {
3021 warning (_("Unable to access %s bytes of target "
3022 "memory at %s, halting search."),
3023 plongest (nr_to_read),
3024 hex_string (read_addr));
3025 do_cleanups (old_cleanups);
3026 return -1;
3027 }
3028
3029 start_addr += chunk_size;
3030 }
3031 }
3032
3033 /* Not found. */
3034
3035 do_cleanups (old_cleanups);
3036 return 0;
3037 }
3038
3039 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3040 sequence of bytes in PATTERN with length PATTERN_LEN.
3041
3042 The result is 1 if found, 0 if not found, and -1 if there was an error
3043 requiring halting of the search (e.g. memory read error).
3044 If the pattern is found the address is recorded in FOUND_ADDRP. */
3045
3046 int
3047 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3048 const gdb_byte *pattern, ULONGEST pattern_len,
3049 CORE_ADDR *found_addrp)
3050 {
3051 struct target_ops *t;
3052 int found;
3053
3054 /* We don't use INHERIT to set current_target.to_search_memory,
3055 so we have to scan the target stack and handle targetdebug
3056 ourselves. */
3057
3058 if (targetdebug)
3059 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3060 hex_string (start_addr));
3061
3062 for (t = current_target.beneath; t != NULL; t = t->beneath)
3063 if (t->to_search_memory != NULL)
3064 break;
3065
3066 if (t != NULL)
3067 {
3068 found = t->to_search_memory (t, start_addr, search_space_len,
3069 pattern, pattern_len, found_addrp);
3070 }
3071 else
3072 {
3073 /* If a special version of to_search_memory isn't available, use the
3074 simple version. */
3075 found = simple_search_memory (current_target.beneath,
3076 start_addr, search_space_len,
3077 pattern, pattern_len, found_addrp);
3078 }
3079
3080 if (targetdebug)
3081 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3082
3083 return found;
3084 }
3085
3086 /* Look through the currently pushed targets. If none of them will
3087 be able to restart the currently running process, issue an error
3088 message. */
3089
3090 void
3091 target_require_runnable (void)
3092 {
3093 struct target_ops *t;
3094
3095 for (t = target_stack; t != NULL; t = t->beneath)
3096 {
3097 /* If this target knows how to create a new program, then
3098 assume we will still be able to after killing the current
3099 one. Either killing and mourning will not pop T, or else
3100 find_default_run_target will find it again. */
3101 if (t->to_create_inferior != NULL)
3102 return;
3103
3104 /* Do not worry about thread_stratum targets that can not
3105 create inferiors. Assume they will be pushed again if
3106 necessary, and continue to the process_stratum. */
3107 if (t->to_stratum == thread_stratum
3108 || t->to_stratum == arch_stratum)
3109 continue;
3110
3111 error (_("The \"%s\" target does not support \"run\". "
3112 "Try \"help target\" or \"continue\"."),
3113 t->to_shortname);
3114 }
3115
3116 /* This function is only called if the target is running. In that
3117 case there should have been a process_stratum target and it
3118 should either know how to create inferiors, or not... */
3119 internal_error (__FILE__, __LINE__, _("No targets found"));
3120 }
3121
3122 /* Look through the list of possible targets for a target that can
3123 execute a run or attach command without any other data. This is
3124 used to locate the default process stratum.
3125
3126 If DO_MESG is not NULL, the result is always valid (error() is
3127 called for errors); else, return NULL on error. */
3128
3129 static struct target_ops *
3130 find_default_run_target (char *do_mesg)
3131 {
3132 struct target_ops **t;
3133 struct target_ops *runable = NULL;
3134 int count;
3135
3136 count = 0;
3137
3138 for (t = target_structs; t < target_structs + target_struct_size;
3139 ++t)
3140 {
3141 if ((*t)->to_can_run && target_can_run (*t))
3142 {
3143 runable = *t;
3144 ++count;
3145 }
3146 }
3147
3148 if (count != 1)
3149 {
3150 if (do_mesg)
3151 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3152 else
3153 return NULL;
3154 }
3155
3156 return runable;
3157 }
3158
3159 void
3160 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3161 {
3162 struct target_ops *t;
3163
3164 t = find_default_run_target ("attach");
3165 (t->to_attach) (t, args, from_tty);
3166 return;
3167 }
3168
3169 void
3170 find_default_create_inferior (struct target_ops *ops,
3171 char *exec_file, char *allargs, char **env,
3172 int from_tty)
3173 {
3174 struct target_ops *t;
3175
3176 t = find_default_run_target ("run");
3177 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3178 return;
3179 }
3180
3181 static int
3182 find_default_can_async_p (struct target_ops *ignore)
3183 {
3184 struct target_ops *t;
3185
3186 /* This may be called before the target is pushed on the stack;
3187 look for the default process stratum. If there's none, gdb isn't
3188 configured with a native debugger, and target remote isn't
3189 connected yet. */
3190 t = find_default_run_target (NULL);
3191 if (t && t->to_can_async_p != delegate_can_async_p)
3192 return (t->to_can_async_p) (t);
3193 return 0;
3194 }
3195
3196 static int
3197 find_default_is_async_p (struct target_ops *ignore)
3198 {
3199 struct target_ops *t;
3200
3201 /* This may be called before the target is pushed on the stack;
3202 look for the default process stratum. If there's none, gdb isn't
3203 configured with a native debugger, and target remote isn't
3204 connected yet. */
3205 t = find_default_run_target (NULL);
3206 if (t && t->to_is_async_p != delegate_is_async_p)
3207 return (t->to_is_async_p) (t);
3208 return 0;
3209 }
3210
3211 static int
3212 find_default_supports_non_stop (void)
3213 {
3214 struct target_ops *t;
3215
3216 t = find_default_run_target (NULL);
3217 if (t && t->to_supports_non_stop)
3218 return (t->to_supports_non_stop) ();
3219 return 0;
3220 }
3221
3222 int
3223 target_supports_non_stop (void)
3224 {
3225 struct target_ops *t;
3226
3227 for (t = &current_target; t != NULL; t = t->beneath)
3228 if (t->to_supports_non_stop)
3229 return t->to_supports_non_stop ();
3230
3231 return 0;
3232 }
3233
3234 /* Implement the "info proc" command. */
3235
3236 int
3237 target_info_proc (char *args, enum info_proc_what what)
3238 {
3239 struct target_ops *t;
3240
3241 /* If we're already connected to something that can get us OS
3242 related data, use it. Otherwise, try using the native
3243 target. */
3244 if (current_target.to_stratum >= process_stratum)
3245 t = current_target.beneath;
3246 else
3247 t = find_default_run_target (NULL);
3248
3249 for (; t != NULL; t = t->beneath)
3250 {
3251 if (t->to_info_proc != NULL)
3252 {
3253 t->to_info_proc (t, args, what);
3254
3255 if (targetdebug)
3256 fprintf_unfiltered (gdb_stdlog,
3257 "target_info_proc (\"%s\", %d)\n", args, what);
3258
3259 return 1;
3260 }
3261 }
3262
3263 return 0;
3264 }
3265
3266 static int
3267 find_default_supports_disable_randomization (void)
3268 {
3269 struct target_ops *t;
3270
3271 t = find_default_run_target (NULL);
3272 if (t && t->to_supports_disable_randomization)
3273 return (t->to_supports_disable_randomization) ();
3274 return 0;
3275 }
3276
3277 int
3278 target_supports_disable_randomization (void)
3279 {
3280 struct target_ops *t;
3281
3282 for (t = &current_target; t != NULL; t = t->beneath)
3283 if (t->to_supports_disable_randomization)
3284 return t->to_supports_disable_randomization ();
3285
3286 return 0;
3287 }
3288
3289 char *
3290 target_get_osdata (const char *type)
3291 {
3292 struct target_ops *t;
3293
3294 /* If we're already connected to something that can get us OS
3295 related data, use it. Otherwise, try using the native
3296 target. */
3297 if (current_target.to_stratum >= process_stratum)
3298 t = current_target.beneath;
3299 else
3300 t = find_default_run_target ("get OS data");
3301
3302 if (!t)
3303 return NULL;
3304
3305 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3306 }
3307
3308 /* Determine the current address space of thread PTID. */
3309
3310 struct address_space *
3311 target_thread_address_space (ptid_t ptid)
3312 {
3313 struct address_space *aspace;
3314 struct inferior *inf;
3315 struct target_ops *t;
3316
3317 for (t = current_target.beneath; t != NULL; t = t->beneath)
3318 {
3319 if (t->to_thread_address_space != NULL)
3320 {
3321 aspace = t->to_thread_address_space (t, ptid);
3322 gdb_assert (aspace);
3323
3324 if (targetdebug)
3325 fprintf_unfiltered (gdb_stdlog,
3326 "target_thread_address_space (%s) = %d\n",
3327 target_pid_to_str (ptid),
3328 address_space_num (aspace));
3329 return aspace;
3330 }
3331 }
3332
3333 /* Fall-back to the "main" address space of the inferior. */
3334 inf = find_inferior_pid (ptid_get_pid (ptid));
3335
3336 if (inf == NULL || inf->aspace == NULL)
3337 internal_error (__FILE__, __LINE__,
3338 _("Can't determine the current "
3339 "address space of thread %s\n"),
3340 target_pid_to_str (ptid));
3341
3342 return inf->aspace;
3343 }
3344
3345
3346 /* Target file operations. */
3347
3348 static struct target_ops *
3349 default_fileio_target (void)
3350 {
3351 /* If we're already connected to something that can perform
3352 file I/O, use it. Otherwise, try using the native target. */
3353 if (current_target.to_stratum >= process_stratum)
3354 return current_target.beneath;
3355 else
3356 return find_default_run_target ("file I/O");
3357 }
3358
3359 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3360 target file descriptor, or -1 if an error occurs (and set
3361 *TARGET_ERRNO). */
3362 int
3363 target_fileio_open (const char *filename, int flags, int mode,
3364 int *target_errno)
3365 {
3366 struct target_ops *t;
3367
3368 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3369 {
3370 if (t->to_fileio_open != NULL)
3371 {
3372 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3373
3374 if (targetdebug)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3377 filename, flags, mode,
3378 fd, fd != -1 ? 0 : *target_errno);
3379 return fd;
3380 }
3381 }
3382
3383 *target_errno = FILEIO_ENOSYS;
3384 return -1;
3385 }
3386
3387 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3388 Return the number of bytes written, or -1 if an error occurs
3389 (and set *TARGET_ERRNO). */
3390 int
3391 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3392 ULONGEST offset, int *target_errno)
3393 {
3394 struct target_ops *t;
3395
3396 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3397 {
3398 if (t->to_fileio_pwrite != NULL)
3399 {
3400 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3401 target_errno);
3402
3403 if (targetdebug)
3404 fprintf_unfiltered (gdb_stdlog,
3405 "target_fileio_pwrite (%d,...,%d,%s) "
3406 "= %d (%d)\n",
3407 fd, len, pulongest (offset),
3408 ret, ret != -1 ? 0 : *target_errno);
3409 return ret;
3410 }
3411 }
3412
3413 *target_errno = FILEIO_ENOSYS;
3414 return -1;
3415 }
3416
3417 /* Read up to LEN bytes FD on the target into READ_BUF.
3418 Return the number of bytes read, or -1 if an error occurs
3419 (and set *TARGET_ERRNO). */
3420 int
3421 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3422 ULONGEST offset, int *target_errno)
3423 {
3424 struct target_ops *t;
3425
3426 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3427 {
3428 if (t->to_fileio_pread != NULL)
3429 {
3430 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3431 target_errno);
3432
3433 if (targetdebug)
3434 fprintf_unfiltered (gdb_stdlog,
3435 "target_fileio_pread (%d,...,%d,%s) "
3436 "= %d (%d)\n",
3437 fd, len, pulongest (offset),
3438 ret, ret != -1 ? 0 : *target_errno);
3439 return ret;
3440 }
3441 }
3442
3443 *target_errno = FILEIO_ENOSYS;
3444 return -1;
3445 }
3446
3447 /* Close FD on the target. Return 0, or -1 if an error occurs
3448 (and set *TARGET_ERRNO). */
3449 int
3450 target_fileio_close (int fd, int *target_errno)
3451 {
3452 struct target_ops *t;
3453
3454 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3455 {
3456 if (t->to_fileio_close != NULL)
3457 {
3458 int ret = t->to_fileio_close (fd, target_errno);
3459
3460 if (targetdebug)
3461 fprintf_unfiltered (gdb_stdlog,
3462 "target_fileio_close (%d) = %d (%d)\n",
3463 fd, ret, ret != -1 ? 0 : *target_errno);
3464 return ret;
3465 }
3466 }
3467
3468 *target_errno = FILEIO_ENOSYS;
3469 return -1;
3470 }
3471
3472 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3473 occurs (and set *TARGET_ERRNO). */
3474 int
3475 target_fileio_unlink (const char *filename, int *target_errno)
3476 {
3477 struct target_ops *t;
3478
3479 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3480 {
3481 if (t->to_fileio_unlink != NULL)
3482 {
3483 int ret = t->to_fileio_unlink (filename, target_errno);
3484
3485 if (targetdebug)
3486 fprintf_unfiltered (gdb_stdlog,
3487 "target_fileio_unlink (%s) = %d (%d)\n",
3488 filename, ret, ret != -1 ? 0 : *target_errno);
3489 return ret;
3490 }
3491 }
3492
3493 *target_errno = FILEIO_ENOSYS;
3494 return -1;
3495 }
3496
3497 /* Read value of symbolic link FILENAME on the target. Return a
3498 null-terminated string allocated via xmalloc, or NULL if an error
3499 occurs (and set *TARGET_ERRNO). */
3500 char *
3501 target_fileio_readlink (const char *filename, int *target_errno)
3502 {
3503 struct target_ops *t;
3504
3505 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3506 {
3507 if (t->to_fileio_readlink != NULL)
3508 {
3509 char *ret = t->to_fileio_readlink (filename, target_errno);
3510
3511 if (targetdebug)
3512 fprintf_unfiltered (gdb_stdlog,
3513 "target_fileio_readlink (%s) = %s (%d)\n",
3514 filename, ret? ret : "(nil)",
3515 ret? 0 : *target_errno);
3516 return ret;
3517 }
3518 }
3519
3520 *target_errno = FILEIO_ENOSYS;
3521 return NULL;
3522 }
3523
3524 static void
3525 target_fileio_close_cleanup (void *opaque)
3526 {
3527 int fd = *(int *) opaque;
3528 int target_errno;
3529
3530 target_fileio_close (fd, &target_errno);
3531 }
3532
3533 /* Read target file FILENAME. Store the result in *BUF_P and
3534 return the size of the transferred data. PADDING additional bytes are
3535 available in *BUF_P. This is a helper function for
3536 target_fileio_read_alloc; see the declaration of that function for more
3537 information. */
3538
3539 static LONGEST
3540 target_fileio_read_alloc_1 (const char *filename,
3541 gdb_byte **buf_p, int padding)
3542 {
3543 struct cleanup *close_cleanup;
3544 size_t buf_alloc, buf_pos;
3545 gdb_byte *buf;
3546 LONGEST n;
3547 int fd;
3548 int target_errno;
3549
3550 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3551 if (fd == -1)
3552 return -1;
3553
3554 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3555
3556 /* Start by reading up to 4K at a time. The target will throttle
3557 this number down if necessary. */
3558 buf_alloc = 4096;
3559 buf = xmalloc (buf_alloc);
3560 buf_pos = 0;
3561 while (1)
3562 {
3563 n = target_fileio_pread (fd, &buf[buf_pos],
3564 buf_alloc - buf_pos - padding, buf_pos,
3565 &target_errno);
3566 if (n < 0)
3567 {
3568 /* An error occurred. */
3569 do_cleanups (close_cleanup);
3570 xfree (buf);
3571 return -1;
3572 }
3573 else if (n == 0)
3574 {
3575 /* Read all there was. */
3576 do_cleanups (close_cleanup);
3577 if (buf_pos == 0)
3578 xfree (buf);
3579 else
3580 *buf_p = buf;
3581 return buf_pos;
3582 }
3583
3584 buf_pos += n;
3585
3586 /* If the buffer is filling up, expand it. */
3587 if (buf_alloc < buf_pos * 2)
3588 {
3589 buf_alloc *= 2;
3590 buf = xrealloc (buf, buf_alloc);
3591 }
3592
3593 QUIT;
3594 }
3595 }
3596
3597 /* Read target file FILENAME. Store the result in *BUF_P and return
3598 the size of the transferred data. See the declaration in "target.h"
3599 function for more information about the return value. */
3600
3601 LONGEST
3602 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3603 {
3604 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3605 }
3606
3607 /* Read target file FILENAME. The result is NUL-terminated and
3608 returned as a string, allocated using xmalloc. If an error occurs
3609 or the transfer is unsupported, NULL is returned. Empty objects
3610 are returned as allocated but empty strings. A warning is issued
3611 if the result contains any embedded NUL bytes. */
3612
3613 char *
3614 target_fileio_read_stralloc (const char *filename)
3615 {
3616 gdb_byte *buffer;
3617 char *bufstr;
3618 LONGEST i, transferred;
3619
3620 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3621 bufstr = (char *) buffer;
3622
3623 if (transferred < 0)
3624 return NULL;
3625
3626 if (transferred == 0)
3627 return xstrdup ("");
3628
3629 bufstr[transferred] = 0;
3630
3631 /* Check for embedded NUL bytes; but allow trailing NULs. */
3632 for (i = strlen (bufstr); i < transferred; i++)
3633 if (bufstr[i] != 0)
3634 {
3635 warning (_("target file %s "
3636 "contained unexpected null characters"),
3637 filename);
3638 break;
3639 }
3640
3641 return bufstr;
3642 }
3643
3644
3645 static int
3646 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3647 CORE_ADDR addr, int len)
3648 {
3649 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3650 }
3651
3652 static int
3653 default_watchpoint_addr_within_range (struct target_ops *target,
3654 CORE_ADDR addr,
3655 CORE_ADDR start, int length)
3656 {
3657 return addr >= start && addr < start + length;
3658 }
3659
3660 static struct gdbarch *
3661 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3662 {
3663 return target_gdbarch ();
3664 }
3665
3666 static int
3667 return_zero (void)
3668 {
3669 return 0;
3670 }
3671
3672 static int
3673 return_one (void)
3674 {
3675 return 1;
3676 }
3677
3678 static int
3679 return_minus_one (void)
3680 {
3681 return -1;
3682 }
3683
3684 static void *
3685 return_null (void)
3686 {
3687 return 0;
3688 }
3689
3690 /*
3691 * Find the next target down the stack from the specified target.
3692 */
3693
3694 struct target_ops *
3695 find_target_beneath (struct target_ops *t)
3696 {
3697 return t->beneath;
3698 }
3699
3700 /* See target.h. */
3701
3702 struct target_ops *
3703 find_target_at (enum strata stratum)
3704 {
3705 struct target_ops *t;
3706
3707 for (t = current_target.beneath; t != NULL; t = t->beneath)
3708 if (t->to_stratum == stratum)
3709 return t;
3710
3711 return NULL;
3712 }
3713
3714 \f
3715 /* The inferior process has died. Long live the inferior! */
3716
3717 void
3718 generic_mourn_inferior (void)
3719 {
3720 ptid_t ptid;
3721
3722 ptid = inferior_ptid;
3723 inferior_ptid = null_ptid;
3724
3725 /* Mark breakpoints uninserted in case something tries to delete a
3726 breakpoint while we delete the inferior's threads (which would
3727 fail, since the inferior is long gone). */
3728 mark_breakpoints_out ();
3729
3730 if (!ptid_equal (ptid, null_ptid))
3731 {
3732 int pid = ptid_get_pid (ptid);
3733 exit_inferior (pid);
3734 }
3735
3736 /* Note this wipes step-resume breakpoints, so needs to be done
3737 after exit_inferior, which ends up referencing the step-resume
3738 breakpoints through clear_thread_inferior_resources. */
3739 breakpoint_init_inferior (inf_exited);
3740
3741 registers_changed ();
3742
3743 reopen_exec_file ();
3744 reinit_frame_cache ();
3745
3746 if (deprecated_detach_hook)
3747 deprecated_detach_hook ();
3748 }
3749 \f
3750 /* Convert a normal process ID to a string. Returns the string in a
3751 static buffer. */
3752
3753 char *
3754 normal_pid_to_str (ptid_t ptid)
3755 {
3756 static char buf[32];
3757
3758 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3759 return buf;
3760 }
3761
3762 static char *
3763 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3764 {
3765 return normal_pid_to_str (ptid);
3766 }
3767
3768 /* Error-catcher for target_find_memory_regions. */
3769 static int
3770 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3771 {
3772 error (_("Command not implemented for this target."));
3773 return 0;
3774 }
3775
3776 /* Error-catcher for target_make_corefile_notes. */
3777 static char *
3778 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3779 {
3780 error (_("Command not implemented for this target."));
3781 return NULL;
3782 }
3783
3784 /* Error-catcher for target_get_bookmark. */
3785 static gdb_byte *
3786 dummy_get_bookmark (char *ignore1, int ignore2)
3787 {
3788 tcomplain ();
3789 return NULL;
3790 }
3791
3792 /* Error-catcher for target_goto_bookmark. */
3793 static void
3794 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3795 {
3796 tcomplain ();
3797 }
3798
3799 /* Set up the handful of non-empty slots needed by the dummy target
3800 vector. */
3801
3802 static void
3803 init_dummy_target (void)
3804 {
3805 dummy_target.to_shortname = "None";
3806 dummy_target.to_longname = "None";
3807 dummy_target.to_doc = "";
3808 dummy_target.to_attach = find_default_attach;
3809 dummy_target.to_detach =
3810 (void (*)(struct target_ops *, const char *, int))target_ignore;
3811 dummy_target.to_create_inferior = find_default_create_inferior;
3812 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3813 dummy_target.to_supports_disable_randomization
3814 = find_default_supports_disable_randomization;
3815 dummy_target.to_pid_to_str = dummy_pid_to_str;
3816 dummy_target.to_stratum = dummy_stratum;
3817 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3818 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3819 dummy_target.to_get_bookmark = dummy_get_bookmark;
3820 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3821 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3822 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3823 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3824 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3825 dummy_target.to_has_execution
3826 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3827 dummy_target.to_magic = OPS_MAGIC;
3828
3829 install_dummy_methods (&dummy_target);
3830 }
3831 \f
3832 static void
3833 debug_to_open (char *args, int from_tty)
3834 {
3835 debug_target.to_open (args, from_tty);
3836
3837 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3838 }
3839
3840 void
3841 target_close (struct target_ops *targ)
3842 {
3843 gdb_assert (!target_is_pushed (targ));
3844
3845 if (targ->to_xclose != NULL)
3846 targ->to_xclose (targ);
3847 else if (targ->to_close != NULL)
3848 targ->to_close (targ);
3849
3850 if (targetdebug)
3851 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3852 }
3853
3854 void
3855 target_attach (char *args, int from_tty)
3856 {
3857 struct target_ops *t;
3858
3859 for (t = current_target.beneath; t != NULL; t = t->beneath)
3860 {
3861 if (t->to_attach != NULL)
3862 {
3863 t->to_attach (t, args, from_tty);
3864 if (targetdebug)
3865 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3866 args, from_tty);
3867 return;
3868 }
3869 }
3870
3871 internal_error (__FILE__, __LINE__,
3872 _("could not find a target to attach"));
3873 }
3874
3875 int
3876 target_thread_alive (ptid_t ptid)
3877 {
3878 struct target_ops *t;
3879
3880 for (t = current_target.beneath; t != NULL; t = t->beneath)
3881 {
3882 if (t->to_thread_alive != NULL)
3883 {
3884 int retval;
3885
3886 retval = t->to_thread_alive (t, ptid);
3887 if (targetdebug)
3888 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3889 ptid_get_pid (ptid), retval);
3890
3891 return retval;
3892 }
3893 }
3894
3895 return 0;
3896 }
3897
3898 void
3899 target_find_new_threads (void)
3900 {
3901 struct target_ops *t;
3902
3903 for (t = current_target.beneath; t != NULL; t = t->beneath)
3904 {
3905 if (t->to_find_new_threads != NULL)
3906 {
3907 t->to_find_new_threads (t);
3908 if (targetdebug)
3909 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3910
3911 return;
3912 }
3913 }
3914 }
3915
3916 void
3917 target_stop (ptid_t ptid)
3918 {
3919 if (!may_stop)
3920 {
3921 warning (_("May not interrupt or stop the target, ignoring attempt"));
3922 return;
3923 }
3924
3925 (*current_target.to_stop) (ptid);
3926 }
3927
3928 static void
3929 debug_to_post_attach (struct target_ops *self, int pid)
3930 {
3931 debug_target.to_post_attach (&debug_target, pid);
3932
3933 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3934 }
3935
3936 /* Concatenate ELEM to LIST, a comma separate list, and return the
3937 result. The LIST incoming argument is released. */
3938
3939 static char *
3940 str_comma_list_concat_elem (char *list, const char *elem)
3941 {
3942 if (list == NULL)
3943 return xstrdup (elem);
3944 else
3945 return reconcat (list, list, ", ", elem, (char *) NULL);
3946 }
3947
3948 /* Helper for target_options_to_string. If OPT is present in
3949 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3950 Returns the new resulting string. OPT is removed from
3951 TARGET_OPTIONS. */
3952
3953 static char *
3954 do_option (int *target_options, char *ret,
3955 int opt, char *opt_str)
3956 {
3957 if ((*target_options & opt) != 0)
3958 {
3959 ret = str_comma_list_concat_elem (ret, opt_str);
3960 *target_options &= ~opt;
3961 }
3962
3963 return ret;
3964 }
3965
3966 char *
3967 target_options_to_string (int target_options)
3968 {
3969 char *ret = NULL;
3970
3971 #define DO_TARG_OPTION(OPT) \
3972 ret = do_option (&target_options, ret, OPT, #OPT)
3973
3974 DO_TARG_OPTION (TARGET_WNOHANG);
3975
3976 if (target_options != 0)
3977 ret = str_comma_list_concat_elem (ret, "unknown???");
3978
3979 if (ret == NULL)
3980 ret = xstrdup ("");
3981 return ret;
3982 }
3983
3984 static void
3985 debug_print_register (const char * func,
3986 struct regcache *regcache, int regno)
3987 {
3988 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3989
3990 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3991 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3992 && gdbarch_register_name (gdbarch, regno) != NULL
3993 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3994 fprintf_unfiltered (gdb_stdlog, "(%s)",
3995 gdbarch_register_name (gdbarch, regno));
3996 else
3997 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3998 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3999 {
4000 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4001 int i, size = register_size (gdbarch, regno);
4002 gdb_byte buf[MAX_REGISTER_SIZE];
4003
4004 regcache_raw_collect (regcache, regno, buf);
4005 fprintf_unfiltered (gdb_stdlog, " = ");
4006 for (i = 0; i < size; i++)
4007 {
4008 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4009 }
4010 if (size <= sizeof (LONGEST))
4011 {
4012 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4013
4014 fprintf_unfiltered (gdb_stdlog, " %s %s",
4015 core_addr_to_string_nz (val), plongest (val));
4016 }
4017 }
4018 fprintf_unfiltered (gdb_stdlog, "\n");
4019 }
4020
4021 void
4022 target_fetch_registers (struct regcache *regcache, int regno)
4023 {
4024 struct target_ops *t;
4025
4026 for (t = current_target.beneath; t != NULL; t = t->beneath)
4027 {
4028 if (t->to_fetch_registers != NULL)
4029 {
4030 t->to_fetch_registers (t, regcache, regno);
4031 if (targetdebug)
4032 debug_print_register ("target_fetch_registers", regcache, regno);
4033 return;
4034 }
4035 }
4036 }
4037
4038 void
4039 target_store_registers (struct regcache *regcache, int regno)
4040 {
4041 struct target_ops *t;
4042
4043 if (!may_write_registers)
4044 error (_("Writing to registers is not allowed (regno %d)"), regno);
4045
4046 current_target.to_store_registers (&current_target, regcache, regno);
4047 if (targetdebug)
4048 {
4049 debug_print_register ("target_store_registers", regcache, regno);
4050 }
4051 }
4052
4053 int
4054 target_core_of_thread (ptid_t ptid)
4055 {
4056 struct target_ops *t;
4057
4058 for (t = current_target.beneath; t != NULL; t = t->beneath)
4059 {
4060 if (t->to_core_of_thread != NULL)
4061 {
4062 int retval = t->to_core_of_thread (t, ptid);
4063
4064 if (targetdebug)
4065 fprintf_unfiltered (gdb_stdlog,
4066 "target_core_of_thread (%d) = %d\n",
4067 ptid_get_pid (ptid), retval);
4068 return retval;
4069 }
4070 }
4071
4072 return -1;
4073 }
4074
4075 int
4076 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4077 {
4078 struct target_ops *t;
4079
4080 for (t = current_target.beneath; t != NULL; t = t->beneath)
4081 {
4082 if (t->to_verify_memory != NULL)
4083 {
4084 int retval = t->to_verify_memory (t, data, memaddr, size);
4085
4086 if (targetdebug)
4087 fprintf_unfiltered (gdb_stdlog,
4088 "target_verify_memory (%s, %s) = %d\n",
4089 paddress (target_gdbarch (), memaddr),
4090 pulongest (size),
4091 retval);
4092 return retval;
4093 }
4094 }
4095
4096 tcomplain ();
4097 }
4098
4099 /* The documentation for this function is in its prototype declaration in
4100 target.h. */
4101
4102 int
4103 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4104 {
4105 struct target_ops *t;
4106
4107 for (t = current_target.beneath; t != NULL; t = t->beneath)
4108 if (t->to_insert_mask_watchpoint != NULL)
4109 {
4110 int ret;
4111
4112 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4113
4114 if (targetdebug)
4115 fprintf_unfiltered (gdb_stdlog, "\
4116 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4117 core_addr_to_string (addr),
4118 core_addr_to_string (mask), rw, ret);
4119
4120 return ret;
4121 }
4122
4123 return 1;
4124 }
4125
4126 /* The documentation for this function is in its prototype declaration in
4127 target.h. */
4128
4129 int
4130 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4131 {
4132 struct target_ops *t;
4133
4134 for (t = current_target.beneath; t != NULL; t = t->beneath)
4135 if (t->to_remove_mask_watchpoint != NULL)
4136 {
4137 int ret;
4138
4139 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4140
4141 if (targetdebug)
4142 fprintf_unfiltered (gdb_stdlog, "\
4143 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4144 core_addr_to_string (addr),
4145 core_addr_to_string (mask), rw, ret);
4146
4147 return ret;
4148 }
4149
4150 return 1;
4151 }
4152
4153 /* The documentation for this function is in its prototype declaration
4154 in target.h. */
4155
4156 int
4157 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4158 {
4159 struct target_ops *t;
4160
4161 for (t = current_target.beneath; t != NULL; t = t->beneath)
4162 if (t->to_masked_watch_num_registers != NULL)
4163 return t->to_masked_watch_num_registers (t, addr, mask);
4164
4165 return -1;
4166 }
4167
4168 /* The documentation for this function is in its prototype declaration
4169 in target.h. */
4170
4171 int
4172 target_ranged_break_num_registers (void)
4173 {
4174 struct target_ops *t;
4175
4176 for (t = current_target.beneath; t != NULL; t = t->beneath)
4177 if (t->to_ranged_break_num_registers != NULL)
4178 return t->to_ranged_break_num_registers (t);
4179
4180 return -1;
4181 }
4182
4183 /* See target.h. */
4184
4185 struct btrace_target_info *
4186 target_enable_btrace (ptid_t ptid)
4187 {
4188 struct target_ops *t;
4189
4190 for (t = current_target.beneath; t != NULL; t = t->beneath)
4191 if (t->to_enable_btrace != NULL)
4192 return t->to_enable_btrace (ptid);
4193
4194 tcomplain ();
4195 return NULL;
4196 }
4197
4198 /* See target.h. */
4199
4200 void
4201 target_disable_btrace (struct btrace_target_info *btinfo)
4202 {
4203 struct target_ops *t;
4204
4205 for (t = current_target.beneath; t != NULL; t = t->beneath)
4206 if (t->to_disable_btrace != NULL)
4207 {
4208 t->to_disable_btrace (btinfo);
4209 return;
4210 }
4211
4212 tcomplain ();
4213 }
4214
4215 /* See target.h. */
4216
4217 void
4218 target_teardown_btrace (struct btrace_target_info *btinfo)
4219 {
4220 struct target_ops *t;
4221
4222 for (t = current_target.beneath; t != NULL; t = t->beneath)
4223 if (t->to_teardown_btrace != NULL)
4224 {
4225 t->to_teardown_btrace (btinfo);
4226 return;
4227 }
4228
4229 tcomplain ();
4230 }
4231
4232 /* See target.h. */
4233
4234 enum btrace_error
4235 target_read_btrace (VEC (btrace_block_s) **btrace,
4236 struct btrace_target_info *btinfo,
4237 enum btrace_read_type type)
4238 {
4239 struct target_ops *t;
4240
4241 for (t = current_target.beneath; t != NULL; t = t->beneath)
4242 if (t->to_read_btrace != NULL)
4243 return t->to_read_btrace (btrace, btinfo, type);
4244
4245 tcomplain ();
4246 return BTRACE_ERR_NOT_SUPPORTED;
4247 }
4248
4249 /* See target.h. */
4250
4251 void
4252 target_stop_recording (void)
4253 {
4254 struct target_ops *t;
4255
4256 for (t = current_target.beneath; t != NULL; t = t->beneath)
4257 if (t->to_stop_recording != NULL)
4258 {
4259 t->to_stop_recording ();
4260 return;
4261 }
4262
4263 /* This is optional. */
4264 }
4265
4266 /* See target.h. */
4267
4268 void
4269 target_info_record (void)
4270 {
4271 struct target_ops *t;
4272
4273 for (t = current_target.beneath; t != NULL; t = t->beneath)
4274 if (t->to_info_record != NULL)
4275 {
4276 t->to_info_record ();
4277 return;
4278 }
4279
4280 tcomplain ();
4281 }
4282
4283 /* See target.h. */
4284
4285 void
4286 target_save_record (const char *filename)
4287 {
4288 struct target_ops *t;
4289
4290 for (t = current_target.beneath; t != NULL; t = t->beneath)
4291 if (t->to_save_record != NULL)
4292 {
4293 t->to_save_record (filename);
4294 return;
4295 }
4296
4297 tcomplain ();
4298 }
4299
4300 /* See target.h. */
4301
4302 int
4303 target_supports_delete_record (void)
4304 {
4305 struct target_ops *t;
4306
4307 for (t = current_target.beneath; t != NULL; t = t->beneath)
4308 if (t->to_delete_record != NULL)
4309 return 1;
4310
4311 return 0;
4312 }
4313
4314 /* See target.h. */
4315
4316 void
4317 target_delete_record (void)
4318 {
4319 struct target_ops *t;
4320
4321 for (t = current_target.beneath; t != NULL; t = t->beneath)
4322 if (t->to_delete_record != NULL)
4323 {
4324 t->to_delete_record ();
4325 return;
4326 }
4327
4328 tcomplain ();
4329 }
4330
4331 /* See target.h. */
4332
4333 int
4334 target_record_is_replaying (void)
4335 {
4336 struct target_ops *t;
4337
4338 for (t = current_target.beneath; t != NULL; t = t->beneath)
4339 if (t->to_record_is_replaying != NULL)
4340 return t->to_record_is_replaying ();
4341
4342 return 0;
4343 }
4344
4345 /* See target.h. */
4346
4347 void
4348 target_goto_record_begin (void)
4349 {
4350 struct target_ops *t;
4351
4352 for (t = current_target.beneath; t != NULL; t = t->beneath)
4353 if (t->to_goto_record_begin != NULL)
4354 {
4355 t->to_goto_record_begin ();
4356 return;
4357 }
4358
4359 tcomplain ();
4360 }
4361
4362 /* See target.h. */
4363
4364 void
4365 target_goto_record_end (void)
4366 {
4367 struct target_ops *t;
4368
4369 for (t = current_target.beneath; t != NULL; t = t->beneath)
4370 if (t->to_goto_record_end != NULL)
4371 {
4372 t->to_goto_record_end ();
4373 return;
4374 }
4375
4376 tcomplain ();
4377 }
4378
4379 /* See target.h. */
4380
4381 void
4382 target_goto_record (ULONGEST insn)
4383 {
4384 struct target_ops *t;
4385
4386 for (t = current_target.beneath; t != NULL; t = t->beneath)
4387 if (t->to_goto_record != NULL)
4388 {
4389 t->to_goto_record (insn);
4390 return;
4391 }
4392
4393 tcomplain ();
4394 }
4395
4396 /* See target.h. */
4397
4398 void
4399 target_insn_history (int size, int flags)
4400 {
4401 struct target_ops *t;
4402
4403 for (t = current_target.beneath; t != NULL; t = t->beneath)
4404 if (t->to_insn_history != NULL)
4405 {
4406 t->to_insn_history (size, flags);
4407 return;
4408 }
4409
4410 tcomplain ();
4411 }
4412
4413 /* See target.h. */
4414
4415 void
4416 target_insn_history_from (ULONGEST from, int size, int flags)
4417 {
4418 struct target_ops *t;
4419
4420 for (t = current_target.beneath; t != NULL; t = t->beneath)
4421 if (t->to_insn_history_from != NULL)
4422 {
4423 t->to_insn_history_from (from, size, flags);
4424 return;
4425 }
4426
4427 tcomplain ();
4428 }
4429
4430 /* See target.h. */
4431
4432 void
4433 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4434 {
4435 struct target_ops *t;
4436
4437 for (t = current_target.beneath; t != NULL; t = t->beneath)
4438 if (t->to_insn_history_range != NULL)
4439 {
4440 t->to_insn_history_range (begin, end, flags);
4441 return;
4442 }
4443
4444 tcomplain ();
4445 }
4446
4447 /* See target.h. */
4448
4449 void
4450 target_call_history (int size, int flags)
4451 {
4452 struct target_ops *t;
4453
4454 for (t = current_target.beneath; t != NULL; t = t->beneath)
4455 if (t->to_call_history != NULL)
4456 {
4457 t->to_call_history (size, flags);
4458 return;
4459 }
4460
4461 tcomplain ();
4462 }
4463
4464 /* See target.h. */
4465
4466 void
4467 target_call_history_from (ULONGEST begin, int size, int flags)
4468 {
4469 struct target_ops *t;
4470
4471 for (t = current_target.beneath; t != NULL; t = t->beneath)
4472 if (t->to_call_history_from != NULL)
4473 {
4474 t->to_call_history_from (begin, size, flags);
4475 return;
4476 }
4477
4478 tcomplain ();
4479 }
4480
4481 /* See target.h. */
4482
4483 void
4484 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4485 {
4486 struct target_ops *t;
4487
4488 for (t = current_target.beneath; t != NULL; t = t->beneath)
4489 if (t->to_call_history_range != NULL)
4490 {
4491 t->to_call_history_range (begin, end, flags);
4492 return;
4493 }
4494
4495 tcomplain ();
4496 }
4497
4498 static void
4499 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4500 {
4501 debug_target.to_prepare_to_store (&debug_target, regcache);
4502
4503 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4504 }
4505
4506 /* See target.h. */
4507
4508 const struct frame_unwind *
4509 target_get_unwinder (void)
4510 {
4511 struct target_ops *t;
4512
4513 for (t = current_target.beneath; t != NULL; t = t->beneath)
4514 if (t->to_get_unwinder != NULL)
4515 return t->to_get_unwinder;
4516
4517 return NULL;
4518 }
4519
4520 /* See target.h. */
4521
4522 const struct frame_unwind *
4523 target_get_tailcall_unwinder (void)
4524 {
4525 struct target_ops *t;
4526
4527 for (t = current_target.beneath; t != NULL; t = t->beneath)
4528 if (t->to_get_tailcall_unwinder != NULL)
4529 return t->to_get_tailcall_unwinder;
4530
4531 return NULL;
4532 }
4533
4534 /* See target.h. */
4535
4536 CORE_ADDR
4537 forward_target_decr_pc_after_break (struct target_ops *ops,
4538 struct gdbarch *gdbarch)
4539 {
4540 for (; ops != NULL; ops = ops->beneath)
4541 if (ops->to_decr_pc_after_break != NULL)
4542 return ops->to_decr_pc_after_break (ops, gdbarch);
4543
4544 return gdbarch_decr_pc_after_break (gdbarch);
4545 }
4546
4547 /* See target.h. */
4548
4549 CORE_ADDR
4550 target_decr_pc_after_break (struct gdbarch *gdbarch)
4551 {
4552 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4553 }
4554
4555 static int
4556 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4557 int write, struct mem_attrib *attrib,
4558 struct target_ops *target)
4559 {
4560 int retval;
4561
4562 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4563 attrib, target);
4564
4565 fprintf_unfiltered (gdb_stdlog,
4566 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4567 paddress (target_gdbarch (), memaddr), len,
4568 write ? "write" : "read", retval);
4569
4570 if (retval > 0)
4571 {
4572 int i;
4573
4574 fputs_unfiltered (", bytes =", gdb_stdlog);
4575 for (i = 0; i < retval; i++)
4576 {
4577 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4578 {
4579 if (targetdebug < 2 && i > 0)
4580 {
4581 fprintf_unfiltered (gdb_stdlog, " ...");
4582 break;
4583 }
4584 fprintf_unfiltered (gdb_stdlog, "\n");
4585 }
4586
4587 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4588 }
4589 }
4590
4591 fputc_unfiltered ('\n', gdb_stdlog);
4592
4593 return retval;
4594 }
4595
4596 static void
4597 debug_to_files_info (struct target_ops *target)
4598 {
4599 debug_target.to_files_info (target);
4600
4601 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4602 }
4603
4604 static int
4605 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4606 struct bp_target_info *bp_tgt)
4607 {
4608 int retval;
4609
4610 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4611
4612 fprintf_unfiltered (gdb_stdlog,
4613 "target_insert_breakpoint (%s, xxx) = %ld\n",
4614 core_addr_to_string (bp_tgt->placed_address),
4615 (unsigned long) retval);
4616 return retval;
4617 }
4618
4619 static int
4620 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4621 struct bp_target_info *bp_tgt)
4622 {
4623 int retval;
4624
4625 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4626
4627 fprintf_unfiltered (gdb_stdlog,
4628 "target_remove_breakpoint (%s, xxx) = %ld\n",
4629 core_addr_to_string (bp_tgt->placed_address),
4630 (unsigned long) retval);
4631 return retval;
4632 }
4633
4634 static int
4635 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4636 int type, int cnt, int from_tty)
4637 {
4638 int retval;
4639
4640 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4641 type, cnt, from_tty);
4642
4643 fprintf_unfiltered (gdb_stdlog,
4644 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4645 (unsigned long) type,
4646 (unsigned long) cnt,
4647 (unsigned long) from_tty,
4648 (unsigned long) retval);
4649 return retval;
4650 }
4651
4652 static int
4653 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4654 CORE_ADDR addr, int len)
4655 {
4656 CORE_ADDR retval;
4657
4658 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4659 addr, len);
4660
4661 fprintf_unfiltered (gdb_stdlog,
4662 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4663 core_addr_to_string (addr), (unsigned long) len,
4664 core_addr_to_string (retval));
4665 return retval;
4666 }
4667
4668 static int
4669 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4670 struct expression *cond)
4671 {
4672 int retval;
4673
4674 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4675 rw, cond);
4676
4677 fprintf_unfiltered (gdb_stdlog,
4678 "target_can_accel_watchpoint_condition "
4679 "(%s, %d, %d, %s) = %ld\n",
4680 core_addr_to_string (addr), len, rw,
4681 host_address_to_string (cond), (unsigned long) retval);
4682 return retval;
4683 }
4684
4685 static int
4686 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4687 {
4688 int retval;
4689
4690 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4691
4692 fprintf_unfiltered (gdb_stdlog,
4693 "target_stopped_by_watchpoint () = %ld\n",
4694 (unsigned long) retval);
4695 return retval;
4696 }
4697
4698 static int
4699 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4700 {
4701 int retval;
4702
4703 retval = debug_target.to_stopped_data_address (target, addr);
4704
4705 fprintf_unfiltered (gdb_stdlog,
4706 "target_stopped_data_address ([%s]) = %ld\n",
4707 core_addr_to_string (*addr),
4708 (unsigned long)retval);
4709 return retval;
4710 }
4711
4712 static int
4713 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4714 CORE_ADDR addr,
4715 CORE_ADDR start, int length)
4716 {
4717 int retval;
4718
4719 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4720 start, length);
4721
4722 fprintf_filtered (gdb_stdlog,
4723 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4724 core_addr_to_string (addr), core_addr_to_string (start),
4725 length, retval);
4726 return retval;
4727 }
4728
4729 static int
4730 debug_to_insert_hw_breakpoint (struct target_ops *self,
4731 struct gdbarch *gdbarch,
4732 struct bp_target_info *bp_tgt)
4733 {
4734 int retval;
4735
4736 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4737 gdbarch, bp_tgt);
4738
4739 fprintf_unfiltered (gdb_stdlog,
4740 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4741 core_addr_to_string (bp_tgt->placed_address),
4742 (unsigned long) retval);
4743 return retval;
4744 }
4745
4746 static int
4747 debug_to_remove_hw_breakpoint (struct target_ops *self,
4748 struct gdbarch *gdbarch,
4749 struct bp_target_info *bp_tgt)
4750 {
4751 int retval;
4752
4753 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4754 gdbarch, bp_tgt);
4755
4756 fprintf_unfiltered (gdb_stdlog,
4757 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4758 core_addr_to_string (bp_tgt->placed_address),
4759 (unsigned long) retval);
4760 return retval;
4761 }
4762
4763 static int
4764 debug_to_insert_watchpoint (struct target_ops *self,
4765 CORE_ADDR addr, int len, int type,
4766 struct expression *cond)
4767 {
4768 int retval;
4769
4770 retval = debug_target.to_insert_watchpoint (&debug_target,
4771 addr, len, type, cond);
4772
4773 fprintf_unfiltered (gdb_stdlog,
4774 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4775 core_addr_to_string (addr), len, type,
4776 host_address_to_string (cond), (unsigned long) retval);
4777 return retval;
4778 }
4779
4780 static int
4781 debug_to_remove_watchpoint (struct target_ops *self,
4782 CORE_ADDR addr, int len, int type,
4783 struct expression *cond)
4784 {
4785 int retval;
4786
4787 retval = debug_target.to_remove_watchpoint (&debug_target,
4788 addr, len, type, cond);
4789
4790 fprintf_unfiltered (gdb_stdlog,
4791 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4792 core_addr_to_string (addr), len, type,
4793 host_address_to_string (cond), (unsigned long) retval);
4794 return retval;
4795 }
4796
4797 static void
4798 debug_to_terminal_init (void)
4799 {
4800 debug_target.to_terminal_init ();
4801
4802 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4803 }
4804
4805 static void
4806 debug_to_terminal_inferior (void)
4807 {
4808 debug_target.to_terminal_inferior ();
4809
4810 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4811 }
4812
4813 static void
4814 debug_to_terminal_ours_for_output (void)
4815 {
4816 debug_target.to_terminal_ours_for_output ();
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4819 }
4820
4821 static void
4822 debug_to_terminal_ours (void)
4823 {
4824 debug_target.to_terminal_ours ();
4825
4826 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4827 }
4828
4829 static void
4830 debug_to_terminal_save_ours (void)
4831 {
4832 debug_target.to_terminal_save_ours ();
4833
4834 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4835 }
4836
4837 static void
4838 debug_to_terminal_info (const char *arg, int from_tty)
4839 {
4840 debug_target.to_terminal_info (arg, from_tty);
4841
4842 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4843 from_tty);
4844 }
4845
4846 static void
4847 debug_to_load (char *args, int from_tty)
4848 {
4849 debug_target.to_load (args, from_tty);
4850
4851 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4852 }
4853
4854 static void
4855 debug_to_post_startup_inferior (ptid_t ptid)
4856 {
4857 debug_target.to_post_startup_inferior (ptid);
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4860 ptid_get_pid (ptid));
4861 }
4862
4863 static int
4864 debug_to_insert_fork_catchpoint (int pid)
4865 {
4866 int retval;
4867
4868 retval = debug_target.to_insert_fork_catchpoint (pid);
4869
4870 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4871 pid, retval);
4872
4873 return retval;
4874 }
4875
4876 static int
4877 debug_to_remove_fork_catchpoint (int pid)
4878 {
4879 int retval;
4880
4881 retval = debug_target.to_remove_fork_catchpoint (pid);
4882
4883 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4884 pid, retval);
4885
4886 return retval;
4887 }
4888
4889 static int
4890 debug_to_insert_vfork_catchpoint (int pid)
4891 {
4892 int retval;
4893
4894 retval = debug_target.to_insert_vfork_catchpoint (pid);
4895
4896 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4897 pid, retval);
4898
4899 return retval;
4900 }
4901
4902 static int
4903 debug_to_remove_vfork_catchpoint (int pid)
4904 {
4905 int retval;
4906
4907 retval = debug_target.to_remove_vfork_catchpoint (pid);
4908
4909 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4910 pid, retval);
4911
4912 return retval;
4913 }
4914
4915 static int
4916 debug_to_insert_exec_catchpoint (int pid)
4917 {
4918 int retval;
4919
4920 retval = debug_target.to_insert_exec_catchpoint (pid);
4921
4922 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4923 pid, retval);
4924
4925 return retval;
4926 }
4927
4928 static int
4929 debug_to_remove_exec_catchpoint (int pid)
4930 {
4931 int retval;
4932
4933 retval = debug_target.to_remove_exec_catchpoint (pid);
4934
4935 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4936 pid, retval);
4937
4938 return retval;
4939 }
4940
4941 static int
4942 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4943 {
4944 int has_exited;
4945
4946 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4947
4948 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4949 pid, wait_status, *exit_status, has_exited);
4950
4951 return has_exited;
4952 }
4953
4954 static int
4955 debug_to_can_run (void)
4956 {
4957 int retval;
4958
4959 retval = debug_target.to_can_run ();
4960
4961 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4962
4963 return retval;
4964 }
4965
4966 static struct gdbarch *
4967 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4968 {
4969 struct gdbarch *retval;
4970
4971 retval = debug_target.to_thread_architecture (ops, ptid);
4972
4973 fprintf_unfiltered (gdb_stdlog,
4974 "target_thread_architecture (%s) = %s [%s]\n",
4975 target_pid_to_str (ptid),
4976 host_address_to_string (retval),
4977 gdbarch_bfd_arch_info (retval)->printable_name);
4978 return retval;
4979 }
4980
4981 static void
4982 debug_to_stop (ptid_t ptid)
4983 {
4984 debug_target.to_stop (ptid);
4985
4986 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4987 target_pid_to_str (ptid));
4988 }
4989
4990 static void
4991 debug_to_rcmd (char *command,
4992 struct ui_file *outbuf)
4993 {
4994 debug_target.to_rcmd (command, outbuf);
4995 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4996 }
4997
4998 static char *
4999 debug_to_pid_to_exec_file (int pid)
5000 {
5001 char *exec_file;
5002
5003 exec_file = debug_target.to_pid_to_exec_file (pid);
5004
5005 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
5006 pid, exec_file);
5007
5008 return exec_file;
5009 }
5010
5011 static void
5012 setup_target_debug (void)
5013 {
5014 memcpy (&debug_target, &current_target, sizeof debug_target);
5015
5016 current_target.to_open = debug_to_open;
5017 current_target.to_post_attach = debug_to_post_attach;
5018 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5019 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5020 current_target.to_files_info = debug_to_files_info;
5021 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5022 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5023 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5024 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5025 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5026 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5027 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5028 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5029 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5030 current_target.to_watchpoint_addr_within_range
5031 = debug_to_watchpoint_addr_within_range;
5032 current_target.to_region_ok_for_hw_watchpoint
5033 = debug_to_region_ok_for_hw_watchpoint;
5034 current_target.to_can_accel_watchpoint_condition
5035 = debug_to_can_accel_watchpoint_condition;
5036 current_target.to_terminal_init = debug_to_terminal_init;
5037 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5038 current_target.to_terminal_ours_for_output
5039 = debug_to_terminal_ours_for_output;
5040 current_target.to_terminal_ours = debug_to_terminal_ours;
5041 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5042 current_target.to_terminal_info = debug_to_terminal_info;
5043 current_target.to_load = debug_to_load;
5044 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5045 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5046 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5047 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5048 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5049 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5050 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5051 current_target.to_has_exited = debug_to_has_exited;
5052 current_target.to_can_run = debug_to_can_run;
5053 current_target.to_stop = debug_to_stop;
5054 current_target.to_rcmd = debug_to_rcmd;
5055 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5056 current_target.to_thread_architecture = debug_to_thread_architecture;
5057 }
5058 \f
5059
5060 static char targ_desc[] =
5061 "Names of targets and files being debugged.\nShows the entire \
5062 stack of targets currently in use (including the exec-file,\n\
5063 core-file, and process, if any), as well as the symbol file name.";
5064
5065 static void
5066 do_monitor_command (char *cmd,
5067 int from_tty)
5068 {
5069 if ((current_target.to_rcmd
5070 == (void (*) (char *, struct ui_file *)) tcomplain)
5071 || (current_target.to_rcmd == debug_to_rcmd
5072 && (debug_target.to_rcmd
5073 == (void (*) (char *, struct ui_file *)) tcomplain)))
5074 error (_("\"monitor\" command not supported by this target."));
5075 target_rcmd (cmd, gdb_stdtarg);
5076 }
5077
5078 /* Print the name of each layers of our target stack. */
5079
5080 static void
5081 maintenance_print_target_stack (char *cmd, int from_tty)
5082 {
5083 struct target_ops *t;
5084
5085 printf_filtered (_("The current target stack is:\n"));
5086
5087 for (t = target_stack; t != NULL; t = t->beneath)
5088 {
5089 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5090 }
5091 }
5092
5093 /* Controls if async mode is permitted. */
5094 int target_async_permitted = 0;
5095
5096 /* The set command writes to this variable. If the inferior is
5097 executing, target_async_permitted is *not* updated. */
5098 static int target_async_permitted_1 = 0;
5099
5100 static void
5101 set_target_async_command (char *args, int from_tty,
5102 struct cmd_list_element *c)
5103 {
5104 if (have_live_inferiors ())
5105 {
5106 target_async_permitted_1 = target_async_permitted;
5107 error (_("Cannot change this setting while the inferior is running."));
5108 }
5109
5110 target_async_permitted = target_async_permitted_1;
5111 }
5112
5113 static void
5114 show_target_async_command (struct ui_file *file, int from_tty,
5115 struct cmd_list_element *c,
5116 const char *value)
5117 {
5118 fprintf_filtered (file,
5119 _("Controlling the inferior in "
5120 "asynchronous mode is %s.\n"), value);
5121 }
5122
5123 /* Temporary copies of permission settings. */
5124
5125 static int may_write_registers_1 = 1;
5126 static int may_write_memory_1 = 1;
5127 static int may_insert_breakpoints_1 = 1;
5128 static int may_insert_tracepoints_1 = 1;
5129 static int may_insert_fast_tracepoints_1 = 1;
5130 static int may_stop_1 = 1;
5131
5132 /* Make the user-set values match the real values again. */
5133
5134 void
5135 update_target_permissions (void)
5136 {
5137 may_write_registers_1 = may_write_registers;
5138 may_write_memory_1 = may_write_memory;
5139 may_insert_breakpoints_1 = may_insert_breakpoints;
5140 may_insert_tracepoints_1 = may_insert_tracepoints;
5141 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5142 may_stop_1 = may_stop;
5143 }
5144
5145 /* The one function handles (most of) the permission flags in the same
5146 way. */
5147
5148 static void
5149 set_target_permissions (char *args, int from_tty,
5150 struct cmd_list_element *c)
5151 {
5152 if (target_has_execution)
5153 {
5154 update_target_permissions ();
5155 error (_("Cannot change this setting while the inferior is running."));
5156 }
5157
5158 /* Make the real values match the user-changed values. */
5159 may_write_registers = may_write_registers_1;
5160 may_insert_breakpoints = may_insert_breakpoints_1;
5161 may_insert_tracepoints = may_insert_tracepoints_1;
5162 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5163 may_stop = may_stop_1;
5164 update_observer_mode ();
5165 }
5166
5167 /* Set memory write permission independently of observer mode. */
5168
5169 static void
5170 set_write_memory_permission (char *args, int from_tty,
5171 struct cmd_list_element *c)
5172 {
5173 /* Make the real values match the user-changed values. */
5174 may_write_memory = may_write_memory_1;
5175 update_observer_mode ();
5176 }
5177
5178
5179 void
5180 initialize_targets (void)
5181 {
5182 init_dummy_target ();
5183 push_target (&dummy_target);
5184
5185 add_info ("target", target_info, targ_desc);
5186 add_info ("files", target_info, targ_desc);
5187
5188 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5189 Set target debugging."), _("\
5190 Show target debugging."), _("\
5191 When non-zero, target debugging is enabled. Higher numbers are more\n\
5192 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5193 command."),
5194 NULL,
5195 show_targetdebug,
5196 &setdebuglist, &showdebuglist);
5197
5198 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5199 &trust_readonly, _("\
5200 Set mode for reading from readonly sections."), _("\
5201 Show mode for reading from readonly sections."), _("\
5202 When this mode is on, memory reads from readonly sections (such as .text)\n\
5203 will be read from the object file instead of from the target. This will\n\
5204 result in significant performance improvement for remote targets."),
5205 NULL,
5206 show_trust_readonly,
5207 &setlist, &showlist);
5208
5209 add_com ("monitor", class_obscure, do_monitor_command,
5210 _("Send a command to the remote monitor (remote targets only)."));
5211
5212 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5213 _("Print the name of each layer of the internal target stack."),
5214 &maintenanceprintlist);
5215
5216 add_setshow_boolean_cmd ("target-async", no_class,
5217 &target_async_permitted_1, _("\
5218 Set whether gdb controls the inferior in asynchronous mode."), _("\
5219 Show whether gdb controls the inferior in asynchronous mode."), _("\
5220 Tells gdb whether to control the inferior in asynchronous mode."),
5221 set_target_async_command,
5222 show_target_async_command,
5223 &setlist,
5224 &showlist);
5225
5226 add_setshow_boolean_cmd ("may-write-registers", class_support,
5227 &may_write_registers_1, _("\
5228 Set permission to write into registers."), _("\
5229 Show permission to write into registers."), _("\
5230 When this permission is on, GDB may write into the target's registers.\n\
5231 Otherwise, any sort of write attempt will result in an error."),
5232 set_target_permissions, NULL,
5233 &setlist, &showlist);
5234
5235 add_setshow_boolean_cmd ("may-write-memory", class_support,
5236 &may_write_memory_1, _("\
5237 Set permission to write into target memory."), _("\
5238 Show permission to write into target memory."), _("\
5239 When this permission is on, GDB may write into the target's memory.\n\
5240 Otherwise, any sort of write attempt will result in an error."),
5241 set_write_memory_permission, NULL,
5242 &setlist, &showlist);
5243
5244 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5245 &may_insert_breakpoints_1, _("\
5246 Set permission to insert breakpoints in the target."), _("\
5247 Show permission to insert breakpoints in the target."), _("\
5248 When this permission is on, GDB may insert breakpoints in the program.\n\
5249 Otherwise, any sort of insertion attempt will result in an error."),
5250 set_target_permissions, NULL,
5251 &setlist, &showlist);
5252
5253 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5254 &may_insert_tracepoints_1, _("\
5255 Set permission to insert tracepoints in the target."), _("\
5256 Show permission to insert tracepoints in the target."), _("\
5257 When this permission is on, GDB may insert tracepoints in the program.\n\
5258 Otherwise, any sort of insertion attempt will result in an error."),
5259 set_target_permissions, NULL,
5260 &setlist, &showlist);
5261
5262 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5263 &may_insert_fast_tracepoints_1, _("\
5264 Set permission to insert fast tracepoints in the target."), _("\
5265 Show permission to insert fast tracepoints in the target."), _("\
5266 When this permission is on, GDB may insert fast tracepoints.\n\
5267 Otherwise, any sort of insertion attempt will result in an error."),
5268 set_target_permissions, NULL,
5269 &setlist, &showlist);
5270
5271 add_setshow_boolean_cmd ("may-interrupt", class_support,
5272 &may_stop_1, _("\
5273 Set permission to interrupt or signal the target."), _("\
5274 Show permission to interrupt or signal the target."), _("\
5275 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5276 Otherwise, any attempt to interrupt or stop will be ignored."),
5277 set_target_permissions, NULL,
5278 &setlist, &showlist);
5279 }
This page took 0.151402 seconds and 4 git commands to generate.