convert to_remove_fork_catchpoint
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 static void *return_null (void);
71
72 void target_ignore (void);
73
74 static void target_command (char *, int);
75
76 static struct target_ops *find_default_run_target (char *);
77
78 static target_xfer_partial_ftype default_xfer_partial;
79
80 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
81 ptid_t ptid);
82
83 static int find_default_can_async_p (struct target_ops *ignore);
84
85 static int find_default_is_async_p (struct target_ops *ignore);
86
87 #include "target-delegates.c"
88
89 static void init_dummy_target (void);
90
91 static struct target_ops debug_target;
92
93 static void debug_to_open (char *, int);
94
95 static void debug_to_prepare_to_store (struct target_ops *self,
96 struct regcache *);
97
98 static void debug_to_files_info (struct target_ops *);
99
100 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
107 int, int, int);
108
109 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
110 struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
114 struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (struct target_ops *self,
118 CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (struct target_ops *self,
122 CORE_ADDR, int, int,
123 struct expression *);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
131 CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
134 CORE_ADDR, int, int,
135 struct expression *);
136
137 static void debug_to_terminal_init (struct target_ops *self);
138
139 static void debug_to_terminal_inferior (struct target_ops *self);
140
141 static void debug_to_terminal_ours_for_output (struct target_ops *self);
142
143 static void debug_to_terminal_save_ours (struct target_ops *self);
144
145 static void debug_to_terminal_ours (struct target_ops *self);
146
147 static void debug_to_load (struct target_ops *self, char *, int);
148
149 static int debug_to_can_run (struct target_ops *self);
150
151 static void debug_to_stop (struct target_ops *self, ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_allocsize;
159 #define DEFAULT_ALLOCSIZE 10
160
161 /* The initial current target, so that there is always a semi-valid
162 current target. */
163
164 static struct target_ops dummy_target;
165
166 /* Top of target stack. */
167
168 static struct target_ops *target_stack;
169
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
172
173 struct target_ops current_target;
174
175 /* Command list for target. */
176
177 static struct cmd_list_element *targetlist = NULL;
178
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
181
182 static int trust_readonly = 0;
183
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
186
187 static int show_memory_breakpoints = 0;
188
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
192
193 int may_write_registers = 1;
194
195 int may_write_memory = 1;
196
197 int may_insert_breakpoints = 1;
198
199 int may_insert_tracepoints = 1;
200
201 int may_insert_fast_tracepoints = 1;
202
203 int may_stop = 1;
204
205 /* Non-zero if we want to see trace of target level stuff. */
206
207 static unsigned int targetdebug = 0;
208 static void
209 show_targetdebug (struct ui_file *file, int from_tty,
210 struct cmd_list_element *c, const char *value)
211 {
212 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
213 }
214
215 static void setup_target_debug (void);
216
217 /* The user just typed 'target' without the name of a target. */
218
219 static void
220 target_command (char *arg, int from_tty)
221 {
222 fputs_filtered ("Argument required (target name). Try `help target'\n",
223 gdb_stdout);
224 }
225
226 /* Default target_has_* methods for process_stratum targets. */
227
228 int
229 default_child_has_all_memory (struct target_ops *ops)
230 {
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_memory (struct target_ops *ops)
240 {
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_stack (struct target_ops *ops)
250 {
251 /* If no inferior selected, there's no stack. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_registers (struct target_ops *ops)
260 {
261 /* Can't read registers from no inferior. */
262 if (ptid_equal (inferior_ptid, null_ptid))
263 return 0;
264
265 return 1;
266 }
267
268 int
269 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
270 {
271 /* If there's no thread selected, then we can't make it run through
272 hoops. */
273 if (ptid_equal (the_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279
280 int
281 target_has_all_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_all_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_memory_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_memory (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_stack_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_stack (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_registers_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_registers (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_1 (ptid_t the_ptid)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_execution (t, the_ptid))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_execution_current (void)
342 {
343 return target_has_execution_1 (inferior_ptid);
344 }
345
346 /* Complete initialization of T. This ensures that various fields in
347 T are set, if needed by the target implementation. */
348
349 void
350 complete_target_initialization (struct target_ops *t)
351 {
352 /* Provide default values for all "must have" methods. */
353 if (t->to_xfer_partial == NULL)
354 t->to_xfer_partial = default_xfer_partial;
355
356 if (t->to_has_all_memory == NULL)
357 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_memory == NULL)
360 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
361
362 if (t->to_has_stack == NULL)
363 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
364
365 if (t->to_has_registers == NULL)
366 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_execution == NULL)
369 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
370
371 install_delegators (t);
372 }
373
374 /* Add possible target architecture T to the list and add a new
375 command 'target T->to_shortname'. Set COMPLETER as the command's
376 completer if not NULL. */
377
378 void
379 add_target_with_completer (struct target_ops *t,
380 completer_ftype *completer)
381 {
382 struct cmd_list_element *c;
383
384 complete_target_initialization (t);
385
386 if (!target_structs)
387 {
388 target_struct_allocsize = DEFAULT_ALLOCSIZE;
389 target_structs = (struct target_ops **) xmalloc
390 (target_struct_allocsize * sizeof (*target_structs));
391 }
392 if (target_struct_size >= target_struct_allocsize)
393 {
394 target_struct_allocsize *= 2;
395 target_structs = (struct target_ops **)
396 xrealloc ((char *) target_structs,
397 target_struct_allocsize * sizeof (*target_structs));
398 }
399 target_structs[target_struct_size++] = t;
400
401 if (targetlist == NULL)
402 add_prefix_cmd ("target", class_run, target_command, _("\
403 Connect to a target machine or process.\n\
404 The first argument is the type or protocol of the target machine.\n\
405 Remaining arguments are interpreted by the target protocol. For more\n\
406 information on the arguments for a particular protocol, type\n\
407 `help target ' followed by the protocol name."),
408 &targetlist, "target ", 0, &cmdlist);
409 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
410 &targetlist);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
413 }
414
415 /* Add a possible target architecture to the list. */
416
417 void
418 add_target (struct target_ops *t)
419 {
420 add_target_with_completer (t, NULL);
421 }
422
423 /* See target.h. */
424
425 void
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 {
428 struct cmd_list_element *c;
429 char *alt;
430
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 see PR cli/15104. */
433 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
434 alt = xstrprintf ("target %s", t->to_shortname);
435 deprecate_cmd (c, alt);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (&current_target, arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) (&current_target);
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops *self)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 /* Install the delegators. */
580 install_delegators (&current_target);
581
582 #define INHERIT(FIELD, TARGET) \
583 if (!current_target.FIELD) \
584 current_target.FIELD = (TARGET)->FIELD
585
586 for (t = target_stack; t; t = t->beneath)
587 {
588 INHERIT (to_shortname, t);
589 INHERIT (to_longname, t);
590 INHERIT (to_doc, t);
591 /* Do not inherit to_open. */
592 /* Do not inherit to_close. */
593 /* Do not inherit to_attach. */
594 /* Do not inherit to_post_attach. */
595 INHERIT (to_attach_no_wait, t);
596 /* Do not inherit to_detach. */
597 /* Do not inherit to_disconnect. */
598 /* Do not inherit to_resume. */
599 /* Do not inherit to_wait. */
600 /* Do not inherit to_fetch_registers. */
601 /* Do not inherit to_store_registers. */
602 /* Do not inherit to_prepare_to_store. */
603 INHERIT (deprecated_xfer_memory, t);
604 /* Do not inherit to_files_info. */
605 /* Do not inherit to_insert_breakpoint. */
606 /* Do not inherit to_remove_breakpoint. */
607 /* Do not inherit to_can_use_hw_breakpoint. */
608 /* Do not inherit to_insert_hw_breakpoint. */
609 /* Do not inherit to_remove_hw_breakpoint. */
610 /* Do not inherit to_ranged_break_num_registers. */
611 /* Do not inherit to_insert_watchpoint. */
612 /* Do not inherit to_remove_watchpoint. */
613 /* Do not inherit to_insert_mask_watchpoint. */
614 /* Do not inherit to_remove_mask_watchpoint. */
615 /* Do not inherit to_stopped_data_address. */
616 INHERIT (to_have_steppable_watchpoint, t);
617 INHERIT (to_have_continuable_watchpoint, t);
618 /* Do not inherit to_stopped_by_watchpoint. */
619 /* Do not inherit to_watchpoint_addr_within_range. */
620 /* Do not inherit to_region_ok_for_hw_watchpoint. */
621 /* Do not inherit to_can_accel_watchpoint_condition. */
622 /* Do not inherit to_masked_watch_num_registers. */
623 /* Do not inherit to_terminal_init. */
624 /* Do not inherit to_terminal_inferior. */
625 /* Do not inherit to_terminal_ours_for_output. */
626 /* Do not inherit to_terminal_ours. */
627 /* Do not inherit to_terminal_save_ours. */
628 /* Do not inherit to_terminal_info. */
629 /* Do not inherit to_kill. */
630 /* Do not inherit to_load. */
631 /* Do no inherit to_create_inferior. */
632 /* Do not inherit to_post_startup_inferior. */
633 /* Do not inherit to_insert_fork_catchpoint. */
634 /* Do not inherit to_remove_fork_catchpoint. */
635 INHERIT (to_insert_vfork_catchpoint, t);
636 INHERIT (to_remove_vfork_catchpoint, t);
637 /* Do not inherit to_follow_fork. */
638 INHERIT (to_insert_exec_catchpoint, t);
639 INHERIT (to_remove_exec_catchpoint, t);
640 INHERIT (to_set_syscall_catchpoint, t);
641 INHERIT (to_has_exited, t);
642 /* Do not inherit to_mourn_inferior. */
643 INHERIT (to_can_run, t);
644 /* Do not inherit to_pass_signals. */
645 /* Do not inherit to_program_signals. */
646 /* Do not inherit to_thread_alive. */
647 /* Do not inherit to_find_new_threads. */
648 /* Do not inherit to_pid_to_str. */
649 INHERIT (to_extra_thread_info, t);
650 INHERIT (to_thread_name, t);
651 INHERIT (to_stop, t);
652 /* Do not inherit to_xfer_partial. */
653 /* Do not inherit to_rcmd. */
654 INHERIT (to_pid_to_exec_file, t);
655 INHERIT (to_log_command, t);
656 INHERIT (to_stratum, t);
657 /* Do not inherit to_has_all_memory. */
658 /* Do not inherit to_has_memory. */
659 /* Do not inherit to_has_stack. */
660 /* Do not inherit to_has_registers. */
661 /* Do not inherit to_has_execution. */
662 INHERIT (to_has_thread_control, t);
663 /* Do not inherit to_can_async_p. */
664 /* Do not inherit to_is_async_p. */
665 /* Do not inherit to_async. */
666 INHERIT (to_find_memory_regions, t);
667 INHERIT (to_make_corefile_notes, t);
668 INHERIT (to_get_bookmark, t);
669 INHERIT (to_goto_bookmark, t);
670 /* Do not inherit to_get_thread_local_address. */
671 INHERIT (to_can_execute_reverse, t);
672 INHERIT (to_execution_direction, t);
673 INHERIT (to_thread_architecture, t);
674 /* Do not inherit to_read_description. */
675 INHERIT (to_get_ada_task_ptid, t);
676 /* Do not inherit to_search_memory. */
677 INHERIT (to_supports_multi_process, t);
678 INHERIT (to_supports_enable_disable_tracepoint, t);
679 INHERIT (to_supports_string_tracing, t);
680 INHERIT (to_trace_init, t);
681 INHERIT (to_download_tracepoint, t);
682 INHERIT (to_can_download_tracepoint, t);
683 INHERIT (to_download_trace_state_variable, t);
684 INHERIT (to_enable_tracepoint, t);
685 INHERIT (to_disable_tracepoint, t);
686 INHERIT (to_trace_set_readonly_regions, t);
687 INHERIT (to_trace_start, t);
688 INHERIT (to_get_trace_status, t);
689 INHERIT (to_get_tracepoint_status, t);
690 INHERIT (to_trace_stop, t);
691 INHERIT (to_trace_find, t);
692 INHERIT (to_get_trace_state_variable_value, t);
693 INHERIT (to_save_trace_data, t);
694 INHERIT (to_upload_tracepoints, t);
695 INHERIT (to_upload_trace_state_variables, t);
696 INHERIT (to_get_raw_trace_data, t);
697 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
698 INHERIT (to_set_disconnected_tracing, t);
699 INHERIT (to_set_circular_trace_buffer, t);
700 INHERIT (to_set_trace_buffer_size, t);
701 INHERIT (to_set_trace_notes, t);
702 INHERIT (to_get_tib_address, t);
703 INHERIT (to_set_permissions, t);
704 INHERIT (to_static_tracepoint_marker_at, t);
705 INHERIT (to_static_tracepoint_markers_by_strid, t);
706 INHERIT (to_traceframe_info, t);
707 INHERIT (to_use_agent, t);
708 INHERIT (to_can_use_agent, t);
709 INHERIT (to_augmented_libraries_svr4_read, t);
710 INHERIT (to_magic, t);
711 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
712 INHERIT (to_can_run_breakpoint_commands, t);
713 /* Do not inherit to_memory_map. */
714 /* Do not inherit to_flash_erase. */
715 /* Do not inherit to_flash_done. */
716 }
717 #undef INHERIT
718
719 /* Clean up a target struct so it no longer has any zero pointers in
720 it. Some entries are defaulted to a method that print an error,
721 others are hard-wired to a standard recursive default. */
722
723 #define de_fault(field, value) \
724 if (!current_target.field) \
725 current_target.field = value
726
727 de_fault (to_open,
728 (void (*) (char *, int))
729 tcomplain);
730 de_fault (to_close,
731 (void (*) (struct target_ops *))
732 target_ignore);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_insert_vfork_catchpoint,
738 (int (*) (struct target_ops *, int))
739 return_one);
740 de_fault (to_remove_vfork_catchpoint,
741 (int (*) (struct target_ops *, int))
742 return_one);
743 de_fault (to_insert_exec_catchpoint,
744 (int (*) (struct target_ops *, int))
745 return_one);
746 de_fault (to_remove_exec_catchpoint,
747 (int (*) (struct target_ops *, int))
748 return_one);
749 de_fault (to_set_syscall_catchpoint,
750 (int (*) (struct target_ops *, int, int, int, int, int *))
751 return_one);
752 de_fault (to_has_exited,
753 (int (*) (struct target_ops *, int, int, int *))
754 return_zero);
755 de_fault (to_can_run,
756 (int (*) (struct target_ops *))
757 return_zero);
758 de_fault (to_extra_thread_info,
759 (char *(*) (struct target_ops *, struct thread_info *))
760 return_null);
761 de_fault (to_thread_name,
762 (char *(*) (struct target_ops *, struct thread_info *))
763 return_null);
764 de_fault (to_stop,
765 (void (*) (struct target_ops *, ptid_t))
766 target_ignore);
767 de_fault (to_pid_to_exec_file,
768 (char *(*) (struct target_ops *, int))
769 return_null);
770 de_fault (to_thread_architecture,
771 default_thread_architecture);
772 current_target.to_read_description = NULL;
773 de_fault (to_get_ada_task_ptid,
774 (ptid_t (*) (struct target_ops *, long, long))
775 default_get_ada_task_ptid);
776 de_fault (to_supports_multi_process,
777 (int (*) (struct target_ops *))
778 return_zero);
779 de_fault (to_supports_enable_disable_tracepoint,
780 (int (*) (struct target_ops *))
781 return_zero);
782 de_fault (to_supports_string_tracing,
783 (int (*) (struct target_ops *))
784 return_zero);
785 de_fault (to_trace_init,
786 (void (*) (struct target_ops *))
787 tcomplain);
788 de_fault (to_download_tracepoint,
789 (void (*) (struct target_ops *, struct bp_location *))
790 tcomplain);
791 de_fault (to_can_download_tracepoint,
792 (int (*) (struct target_ops *))
793 return_zero);
794 de_fault (to_download_trace_state_variable,
795 (void (*) (struct target_ops *, struct trace_state_variable *))
796 tcomplain);
797 de_fault (to_enable_tracepoint,
798 (void (*) (struct target_ops *, struct bp_location *))
799 tcomplain);
800 de_fault (to_disable_tracepoint,
801 (void (*) (struct target_ops *, struct bp_location *))
802 tcomplain);
803 de_fault (to_trace_set_readonly_regions,
804 (void (*) (struct target_ops *))
805 tcomplain);
806 de_fault (to_trace_start,
807 (void (*) (struct target_ops *))
808 tcomplain);
809 de_fault (to_get_trace_status,
810 (int (*) (struct target_ops *, struct trace_status *))
811 return_minus_one);
812 de_fault (to_get_tracepoint_status,
813 (void (*) (struct target_ops *, struct breakpoint *,
814 struct uploaded_tp *))
815 tcomplain);
816 de_fault (to_trace_stop,
817 (void (*) (struct target_ops *))
818 tcomplain);
819 de_fault (to_trace_find,
820 (int (*) (struct target_ops *,
821 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
822 return_minus_one);
823 de_fault (to_get_trace_state_variable_value,
824 (int (*) (struct target_ops *, int, LONGEST *))
825 return_zero);
826 de_fault (to_save_trace_data,
827 (int (*) (struct target_ops *, const char *))
828 tcomplain);
829 de_fault (to_upload_tracepoints,
830 (int (*) (struct target_ops *, struct uploaded_tp **))
831 return_zero);
832 de_fault (to_upload_trace_state_variables,
833 (int (*) (struct target_ops *, struct uploaded_tsv **))
834 return_zero);
835 de_fault (to_get_raw_trace_data,
836 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
837 tcomplain);
838 de_fault (to_get_min_fast_tracepoint_insn_len,
839 (int (*) (struct target_ops *))
840 return_minus_one);
841 de_fault (to_set_disconnected_tracing,
842 (void (*) (struct target_ops *, int))
843 target_ignore);
844 de_fault (to_set_circular_trace_buffer,
845 (void (*) (struct target_ops *, int))
846 target_ignore);
847 de_fault (to_set_trace_buffer_size,
848 (void (*) (struct target_ops *, LONGEST))
849 target_ignore);
850 de_fault (to_set_trace_notes,
851 (int (*) (struct target_ops *,
852 const char *, const char *, const char *))
853 return_zero);
854 de_fault (to_get_tib_address,
855 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
856 tcomplain);
857 de_fault (to_set_permissions,
858 (void (*) (struct target_ops *))
859 target_ignore);
860 de_fault (to_static_tracepoint_marker_at,
861 (int (*) (struct target_ops *,
862 CORE_ADDR, struct static_tracepoint_marker *))
863 return_zero);
864 de_fault (to_static_tracepoint_markers_by_strid,
865 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
866 const char *))
867 tcomplain);
868 de_fault (to_traceframe_info,
869 (struct traceframe_info * (*) (struct target_ops *))
870 return_null);
871 de_fault (to_supports_evaluation_of_breakpoint_conditions,
872 (int (*) (struct target_ops *))
873 return_zero);
874 de_fault (to_can_run_breakpoint_commands,
875 (int (*) (struct target_ops *))
876 return_zero);
877 de_fault (to_use_agent,
878 (int (*) (struct target_ops *, int))
879 tcomplain);
880 de_fault (to_can_use_agent,
881 (int (*) (struct target_ops *))
882 return_zero);
883 de_fault (to_augmented_libraries_svr4_read,
884 (int (*) (struct target_ops *))
885 return_zero);
886 de_fault (to_execution_direction, default_execution_direction);
887
888 #undef de_fault
889
890 /* Finally, position the target-stack beneath the squashed
891 "current_target". That way code looking for a non-inherited
892 target method can quickly and simply find it. */
893 current_target.beneath = target_stack;
894
895 if (targetdebug)
896 setup_target_debug ();
897 }
898
899 /* Push a new target type into the stack of the existing target accessors,
900 possibly superseding some of the existing accessors.
901
902 Rather than allow an empty stack, we always have the dummy target at
903 the bottom stratum, so we can call the function vectors without
904 checking them. */
905
906 void
907 push_target (struct target_ops *t)
908 {
909 struct target_ops **cur;
910
911 /* Check magic number. If wrong, it probably means someone changed
912 the struct definition, but not all the places that initialize one. */
913 if (t->to_magic != OPS_MAGIC)
914 {
915 fprintf_unfiltered (gdb_stderr,
916 "Magic number of %s target struct wrong\n",
917 t->to_shortname);
918 internal_error (__FILE__, __LINE__,
919 _("failed internal consistency check"));
920 }
921
922 /* Find the proper stratum to install this target in. */
923 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
924 {
925 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
926 break;
927 }
928
929 /* If there's already targets at this stratum, remove them. */
930 /* FIXME: cagney/2003-10-15: I think this should be popping all
931 targets to CUR, and not just those at this stratum level. */
932 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
933 {
934 /* There's already something at this stratum level. Close it,
935 and un-hook it from the stack. */
936 struct target_ops *tmp = (*cur);
937
938 (*cur) = (*cur)->beneath;
939 tmp->beneath = NULL;
940 target_close (tmp);
941 }
942
943 /* We have removed all targets in our stratum, now add the new one. */
944 t->beneath = (*cur);
945 (*cur) = t;
946
947 update_current_target ();
948 }
949
950 /* Remove a target_ops vector from the stack, wherever it may be.
951 Return how many times it was removed (0 or 1). */
952
953 int
954 unpush_target (struct target_ops *t)
955 {
956 struct target_ops **cur;
957 struct target_ops *tmp;
958
959 if (t->to_stratum == dummy_stratum)
960 internal_error (__FILE__, __LINE__,
961 _("Attempt to unpush the dummy target"));
962
963 /* Look for the specified target. Note that we assume that a target
964 can only occur once in the target stack. */
965
966 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
967 {
968 if ((*cur) == t)
969 break;
970 }
971
972 /* If we don't find target_ops, quit. Only open targets should be
973 closed. */
974 if ((*cur) == NULL)
975 return 0;
976
977 /* Unchain the target. */
978 tmp = (*cur);
979 (*cur) = (*cur)->beneath;
980 tmp->beneath = NULL;
981
982 update_current_target ();
983
984 /* Finally close the target. Note we do this after unchaining, so
985 any target method calls from within the target_close
986 implementation don't end up in T anymore. */
987 target_close (t);
988
989 return 1;
990 }
991
992 void
993 pop_all_targets_above (enum strata above_stratum)
994 {
995 while ((int) (current_target.to_stratum) > (int) above_stratum)
996 {
997 if (!unpush_target (target_stack))
998 {
999 fprintf_unfiltered (gdb_stderr,
1000 "pop_all_targets couldn't find target %s\n",
1001 target_stack->to_shortname);
1002 internal_error (__FILE__, __LINE__,
1003 _("failed internal consistency check"));
1004 break;
1005 }
1006 }
1007 }
1008
1009 void
1010 pop_all_targets (void)
1011 {
1012 pop_all_targets_above (dummy_stratum);
1013 }
1014
1015 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1016
1017 int
1018 target_is_pushed (struct target_ops *t)
1019 {
1020 struct target_ops **cur;
1021
1022 /* Check magic number. If wrong, it probably means someone changed
1023 the struct definition, but not all the places that initialize one. */
1024 if (t->to_magic != OPS_MAGIC)
1025 {
1026 fprintf_unfiltered (gdb_stderr,
1027 "Magic number of %s target struct wrong\n",
1028 t->to_shortname);
1029 internal_error (__FILE__, __LINE__,
1030 _("failed internal consistency check"));
1031 }
1032
1033 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1034 if (*cur == t)
1035 return 1;
1036
1037 return 0;
1038 }
1039
1040 /* Using the objfile specified in OBJFILE, find the address for the
1041 current thread's thread-local storage with offset OFFSET. */
1042 CORE_ADDR
1043 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1044 {
1045 volatile CORE_ADDR addr = 0;
1046 struct target_ops *target;
1047
1048 for (target = current_target.beneath;
1049 target != NULL;
1050 target = target->beneath)
1051 {
1052 if (target->to_get_thread_local_address != NULL)
1053 break;
1054 }
1055
1056 if (target != NULL
1057 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1058 {
1059 ptid_t ptid = inferior_ptid;
1060 volatile struct gdb_exception ex;
1061
1062 TRY_CATCH (ex, RETURN_MASK_ALL)
1063 {
1064 CORE_ADDR lm_addr;
1065
1066 /* Fetch the load module address for this objfile. */
1067 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1068 objfile);
1069 /* If it's 0, throw the appropriate exception. */
1070 if (lm_addr == 0)
1071 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1072 _("TLS load module not found"));
1073
1074 addr = target->to_get_thread_local_address (target, ptid,
1075 lm_addr, offset);
1076 }
1077 /* If an error occurred, print TLS related messages here. Otherwise,
1078 throw the error to some higher catcher. */
1079 if (ex.reason < 0)
1080 {
1081 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1082
1083 switch (ex.error)
1084 {
1085 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1086 error (_("Cannot find thread-local variables "
1087 "in this thread library."));
1088 break;
1089 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1090 if (objfile_is_library)
1091 error (_("Cannot find shared library `%s' in dynamic"
1092 " linker's load module list"), objfile_name (objfile));
1093 else
1094 error (_("Cannot find executable file `%s' in dynamic"
1095 " linker's load module list"), objfile_name (objfile));
1096 break;
1097 case TLS_NOT_ALLOCATED_YET_ERROR:
1098 if (objfile_is_library)
1099 error (_("The inferior has not yet allocated storage for"
1100 " thread-local variables in\n"
1101 "the shared library `%s'\n"
1102 "for %s"),
1103 objfile_name (objfile), target_pid_to_str (ptid));
1104 else
1105 error (_("The inferior has not yet allocated storage for"
1106 " thread-local variables in\n"
1107 "the executable `%s'\n"
1108 "for %s"),
1109 objfile_name (objfile), target_pid_to_str (ptid));
1110 break;
1111 case TLS_GENERIC_ERROR:
1112 if (objfile_is_library)
1113 error (_("Cannot find thread-local storage for %s, "
1114 "shared library %s:\n%s"),
1115 target_pid_to_str (ptid),
1116 objfile_name (objfile), ex.message);
1117 else
1118 error (_("Cannot find thread-local storage for %s, "
1119 "executable file %s:\n%s"),
1120 target_pid_to_str (ptid),
1121 objfile_name (objfile), ex.message);
1122 break;
1123 default:
1124 throw_exception (ex);
1125 break;
1126 }
1127 }
1128 }
1129 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1130 TLS is an ABI-specific thing. But we don't do that yet. */
1131 else
1132 error (_("Cannot find thread-local variables on this target"));
1133
1134 return addr;
1135 }
1136
1137 const char *
1138 target_xfer_status_to_string (enum target_xfer_status err)
1139 {
1140 #define CASE(X) case X: return #X
1141 switch (err)
1142 {
1143 CASE(TARGET_XFER_E_IO);
1144 CASE(TARGET_XFER_E_UNAVAILABLE);
1145 default:
1146 return "<unknown>";
1147 }
1148 #undef CASE
1149 };
1150
1151
1152 #undef MIN
1153 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1154
1155 /* target_read_string -- read a null terminated string, up to LEN bytes,
1156 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1157 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1158 is responsible for freeing it. Return the number of bytes successfully
1159 read. */
1160
1161 int
1162 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1163 {
1164 int tlen, offset, i;
1165 gdb_byte buf[4];
1166 int errcode = 0;
1167 char *buffer;
1168 int buffer_allocated;
1169 char *bufptr;
1170 unsigned int nbytes_read = 0;
1171
1172 gdb_assert (string);
1173
1174 /* Small for testing. */
1175 buffer_allocated = 4;
1176 buffer = xmalloc (buffer_allocated);
1177 bufptr = buffer;
1178
1179 while (len > 0)
1180 {
1181 tlen = MIN (len, 4 - (memaddr & 3));
1182 offset = memaddr & 3;
1183
1184 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1185 if (errcode != 0)
1186 {
1187 /* The transfer request might have crossed the boundary to an
1188 unallocated region of memory. Retry the transfer, requesting
1189 a single byte. */
1190 tlen = 1;
1191 offset = 0;
1192 errcode = target_read_memory (memaddr, buf, 1);
1193 if (errcode != 0)
1194 goto done;
1195 }
1196
1197 if (bufptr - buffer + tlen > buffer_allocated)
1198 {
1199 unsigned int bytes;
1200
1201 bytes = bufptr - buffer;
1202 buffer_allocated *= 2;
1203 buffer = xrealloc (buffer, buffer_allocated);
1204 bufptr = buffer + bytes;
1205 }
1206
1207 for (i = 0; i < tlen; i++)
1208 {
1209 *bufptr++ = buf[i + offset];
1210 if (buf[i + offset] == '\000')
1211 {
1212 nbytes_read += i + 1;
1213 goto done;
1214 }
1215 }
1216
1217 memaddr += tlen;
1218 len -= tlen;
1219 nbytes_read += tlen;
1220 }
1221 done:
1222 *string = buffer;
1223 if (errnop != NULL)
1224 *errnop = errcode;
1225 return nbytes_read;
1226 }
1227
1228 struct target_section_table *
1229 target_get_section_table (struct target_ops *target)
1230 {
1231 struct target_ops *t;
1232
1233 if (targetdebug)
1234 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1235
1236 for (t = target; t != NULL; t = t->beneath)
1237 if (t->to_get_section_table != NULL)
1238 return (*t->to_get_section_table) (t);
1239
1240 return NULL;
1241 }
1242
1243 /* Find a section containing ADDR. */
1244
1245 struct target_section *
1246 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1247 {
1248 struct target_section_table *table = target_get_section_table (target);
1249 struct target_section *secp;
1250
1251 if (table == NULL)
1252 return NULL;
1253
1254 for (secp = table->sections; secp < table->sections_end; secp++)
1255 {
1256 if (addr >= secp->addr && addr < secp->endaddr)
1257 return secp;
1258 }
1259 return NULL;
1260 }
1261
1262 /* Read memory from the live target, even if currently inspecting a
1263 traceframe. The return is the same as that of target_read. */
1264
1265 static enum target_xfer_status
1266 target_read_live_memory (enum target_object object,
1267 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1268 ULONGEST *xfered_len)
1269 {
1270 enum target_xfer_status ret;
1271 struct cleanup *cleanup;
1272
1273 /* Switch momentarily out of tfind mode so to access live memory.
1274 Note that this must not clear global state, such as the frame
1275 cache, which must still remain valid for the previous traceframe.
1276 We may be _building_ the frame cache at this point. */
1277 cleanup = make_cleanup_restore_traceframe_number ();
1278 set_traceframe_number (-1);
1279
1280 ret = target_xfer_partial (current_target.beneath, object, NULL,
1281 myaddr, NULL, memaddr, len, xfered_len);
1282
1283 do_cleanups (cleanup);
1284 return ret;
1285 }
1286
1287 /* Using the set of read-only target sections of OPS, read live
1288 read-only memory. Note that the actual reads start from the
1289 top-most target again.
1290
1291 For interface/parameters/return description see target.h,
1292 to_xfer_partial. */
1293
1294 static enum target_xfer_status
1295 memory_xfer_live_readonly_partial (struct target_ops *ops,
1296 enum target_object object,
1297 gdb_byte *readbuf, ULONGEST memaddr,
1298 ULONGEST len, ULONGEST *xfered_len)
1299 {
1300 struct target_section *secp;
1301 struct target_section_table *table;
1302
1303 secp = target_section_by_addr (ops, memaddr);
1304 if (secp != NULL
1305 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1306 secp->the_bfd_section)
1307 & SEC_READONLY))
1308 {
1309 struct target_section *p;
1310 ULONGEST memend = memaddr + len;
1311
1312 table = target_get_section_table (ops);
1313
1314 for (p = table->sections; p < table->sections_end; p++)
1315 {
1316 if (memaddr >= p->addr)
1317 {
1318 if (memend <= p->endaddr)
1319 {
1320 /* Entire transfer is within this section. */
1321 return target_read_live_memory (object, memaddr,
1322 readbuf, len, xfered_len);
1323 }
1324 else if (memaddr >= p->endaddr)
1325 {
1326 /* This section ends before the transfer starts. */
1327 continue;
1328 }
1329 else
1330 {
1331 /* This section overlaps the transfer. Just do half. */
1332 len = p->endaddr - memaddr;
1333 return target_read_live_memory (object, memaddr,
1334 readbuf, len, xfered_len);
1335 }
1336 }
1337 }
1338 }
1339
1340 return TARGET_XFER_EOF;
1341 }
1342
1343 /* Read memory from more than one valid target. A core file, for
1344 instance, could have some of memory but delegate other bits to
1345 the target below it. So, we must manually try all targets. */
1346
1347 static enum target_xfer_status
1348 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1349 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1350 ULONGEST *xfered_len)
1351 {
1352 enum target_xfer_status res;
1353
1354 do
1355 {
1356 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1357 readbuf, writebuf, memaddr, len,
1358 xfered_len);
1359 if (res == TARGET_XFER_OK)
1360 break;
1361
1362 /* Stop if the target reports that the memory is not available. */
1363 if (res == TARGET_XFER_E_UNAVAILABLE)
1364 break;
1365
1366 /* We want to continue past core files to executables, but not
1367 past a running target's memory. */
1368 if (ops->to_has_all_memory (ops))
1369 break;
1370
1371 ops = ops->beneath;
1372 }
1373 while (ops != NULL);
1374
1375 return res;
1376 }
1377
1378 /* Perform a partial memory transfer.
1379 For docs see target.h, to_xfer_partial. */
1380
1381 static enum target_xfer_status
1382 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1383 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1384 ULONGEST len, ULONGEST *xfered_len)
1385 {
1386 enum target_xfer_status res;
1387 int reg_len;
1388 struct mem_region *region;
1389 struct inferior *inf;
1390
1391 /* For accesses to unmapped overlay sections, read directly from
1392 files. Must do this first, as MEMADDR may need adjustment. */
1393 if (readbuf != NULL && overlay_debugging)
1394 {
1395 struct obj_section *section = find_pc_overlay (memaddr);
1396
1397 if (pc_in_unmapped_range (memaddr, section))
1398 {
1399 struct target_section_table *table
1400 = target_get_section_table (ops);
1401 const char *section_name = section->the_bfd_section->name;
1402
1403 memaddr = overlay_mapped_address (memaddr, section);
1404 return section_table_xfer_memory_partial (readbuf, writebuf,
1405 memaddr, len, xfered_len,
1406 table->sections,
1407 table->sections_end,
1408 section_name);
1409 }
1410 }
1411
1412 /* Try the executable files, if "trust-readonly-sections" is set. */
1413 if (readbuf != NULL && trust_readonly)
1414 {
1415 struct target_section *secp;
1416 struct target_section_table *table;
1417
1418 secp = target_section_by_addr (ops, memaddr);
1419 if (secp != NULL
1420 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1421 secp->the_bfd_section)
1422 & SEC_READONLY))
1423 {
1424 table = target_get_section_table (ops);
1425 return section_table_xfer_memory_partial (readbuf, writebuf,
1426 memaddr, len, xfered_len,
1427 table->sections,
1428 table->sections_end,
1429 NULL);
1430 }
1431 }
1432
1433 /* If reading unavailable memory in the context of traceframes, and
1434 this address falls within a read-only section, fallback to
1435 reading from live memory. */
1436 if (readbuf != NULL && get_traceframe_number () != -1)
1437 {
1438 VEC(mem_range_s) *available;
1439
1440 /* If we fail to get the set of available memory, then the
1441 target does not support querying traceframe info, and so we
1442 attempt reading from the traceframe anyway (assuming the
1443 target implements the old QTro packet then). */
1444 if (traceframe_available_memory (&available, memaddr, len))
1445 {
1446 struct cleanup *old_chain;
1447
1448 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1449
1450 if (VEC_empty (mem_range_s, available)
1451 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1452 {
1453 /* Don't read into the traceframe's available
1454 memory. */
1455 if (!VEC_empty (mem_range_s, available))
1456 {
1457 LONGEST oldlen = len;
1458
1459 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1460 gdb_assert (len <= oldlen);
1461 }
1462
1463 do_cleanups (old_chain);
1464
1465 /* This goes through the topmost target again. */
1466 res = memory_xfer_live_readonly_partial (ops, object,
1467 readbuf, memaddr,
1468 len, xfered_len);
1469 if (res == TARGET_XFER_OK)
1470 return TARGET_XFER_OK;
1471 else
1472 {
1473 /* No use trying further, we know some memory starting
1474 at MEMADDR isn't available. */
1475 *xfered_len = len;
1476 return TARGET_XFER_E_UNAVAILABLE;
1477 }
1478 }
1479
1480 /* Don't try to read more than how much is available, in
1481 case the target implements the deprecated QTro packet to
1482 cater for older GDBs (the target's knowledge of read-only
1483 sections may be outdated by now). */
1484 len = VEC_index (mem_range_s, available, 0)->length;
1485
1486 do_cleanups (old_chain);
1487 }
1488 }
1489
1490 /* Try GDB's internal data cache. */
1491 region = lookup_mem_region (memaddr);
1492 /* region->hi == 0 means there's no upper bound. */
1493 if (memaddr + len < region->hi || region->hi == 0)
1494 reg_len = len;
1495 else
1496 reg_len = region->hi - memaddr;
1497
1498 switch (region->attrib.mode)
1499 {
1500 case MEM_RO:
1501 if (writebuf != NULL)
1502 return TARGET_XFER_E_IO;
1503 break;
1504
1505 case MEM_WO:
1506 if (readbuf != NULL)
1507 return TARGET_XFER_E_IO;
1508 break;
1509
1510 case MEM_FLASH:
1511 /* We only support writing to flash during "load" for now. */
1512 if (writebuf != NULL)
1513 error (_("Writing to flash memory forbidden in this context"));
1514 break;
1515
1516 case MEM_NONE:
1517 return TARGET_XFER_E_IO;
1518 }
1519
1520 if (!ptid_equal (inferior_ptid, null_ptid))
1521 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1522 else
1523 inf = NULL;
1524
1525 if (inf != NULL
1526 /* The dcache reads whole cache lines; that doesn't play well
1527 with reading from a trace buffer, because reading outside of
1528 the collected memory range fails. */
1529 && get_traceframe_number () == -1
1530 && (region->attrib.cache
1531 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1532 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1533 {
1534 DCACHE *dcache = target_dcache_get_or_init ();
1535 int l;
1536
1537 if (readbuf != NULL)
1538 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1539 else
1540 /* FIXME drow/2006-08-09: If we're going to preserve const
1541 correctness dcache_xfer_memory should take readbuf and
1542 writebuf. */
1543 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1544 reg_len, 1);
1545 if (l <= 0)
1546 return TARGET_XFER_E_IO;
1547 else
1548 {
1549 *xfered_len = (ULONGEST) l;
1550 return TARGET_XFER_OK;
1551 }
1552 }
1553
1554 /* If none of those methods found the memory we wanted, fall back
1555 to a target partial transfer. Normally a single call to
1556 to_xfer_partial is enough; if it doesn't recognize an object
1557 it will call the to_xfer_partial of the next target down.
1558 But for memory this won't do. Memory is the only target
1559 object which can be read from more than one valid target.
1560 A core file, for instance, could have some of memory but
1561 delegate other bits to the target below it. So, we must
1562 manually try all targets. */
1563
1564 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1565 xfered_len);
1566
1567 /* Make sure the cache gets updated no matter what - if we are writing
1568 to the stack. Even if this write is not tagged as such, we still need
1569 to update the cache. */
1570
1571 if (res == TARGET_XFER_OK
1572 && inf != NULL
1573 && writebuf != NULL
1574 && target_dcache_init_p ()
1575 && !region->attrib.cache
1576 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1577 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1578 {
1579 DCACHE *dcache = target_dcache_get ();
1580
1581 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1582 }
1583
1584 /* If we still haven't got anything, return the last error. We
1585 give up. */
1586 return res;
1587 }
1588
1589 /* Perform a partial memory transfer. For docs see target.h,
1590 to_xfer_partial. */
1591
1592 static enum target_xfer_status
1593 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1594 gdb_byte *readbuf, const gdb_byte *writebuf,
1595 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1596 {
1597 enum target_xfer_status res;
1598
1599 /* Zero length requests are ok and require no work. */
1600 if (len == 0)
1601 return TARGET_XFER_EOF;
1602
1603 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1604 breakpoint insns, thus hiding out from higher layers whether
1605 there are software breakpoints inserted in the code stream. */
1606 if (readbuf != NULL)
1607 {
1608 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1609 xfered_len);
1610
1611 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1612 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1613 }
1614 else
1615 {
1616 void *buf;
1617 struct cleanup *old_chain;
1618
1619 /* A large write request is likely to be partially satisfied
1620 by memory_xfer_partial_1. We will continually malloc
1621 and free a copy of the entire write request for breakpoint
1622 shadow handling even though we only end up writing a small
1623 subset of it. Cap writes to 4KB to mitigate this. */
1624 len = min (4096, len);
1625
1626 buf = xmalloc (len);
1627 old_chain = make_cleanup (xfree, buf);
1628 memcpy (buf, writebuf, len);
1629
1630 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1631 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1632 xfered_len);
1633
1634 do_cleanups (old_chain);
1635 }
1636
1637 return res;
1638 }
1639
1640 static void
1641 restore_show_memory_breakpoints (void *arg)
1642 {
1643 show_memory_breakpoints = (uintptr_t) arg;
1644 }
1645
1646 struct cleanup *
1647 make_show_memory_breakpoints_cleanup (int show)
1648 {
1649 int current = show_memory_breakpoints;
1650
1651 show_memory_breakpoints = show;
1652 return make_cleanup (restore_show_memory_breakpoints,
1653 (void *) (uintptr_t) current);
1654 }
1655
1656 /* For docs see target.h, to_xfer_partial. */
1657
1658 enum target_xfer_status
1659 target_xfer_partial (struct target_ops *ops,
1660 enum target_object object, const char *annex,
1661 gdb_byte *readbuf, const gdb_byte *writebuf,
1662 ULONGEST offset, ULONGEST len,
1663 ULONGEST *xfered_len)
1664 {
1665 enum target_xfer_status retval;
1666
1667 gdb_assert (ops->to_xfer_partial != NULL);
1668
1669 /* Transfer is done when LEN is zero. */
1670 if (len == 0)
1671 return TARGET_XFER_EOF;
1672
1673 if (writebuf && !may_write_memory)
1674 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1675 core_addr_to_string_nz (offset), plongest (len));
1676
1677 *xfered_len = 0;
1678
1679 /* If this is a memory transfer, let the memory-specific code
1680 have a look at it instead. Memory transfers are more
1681 complicated. */
1682 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1683 || object == TARGET_OBJECT_CODE_MEMORY)
1684 retval = memory_xfer_partial (ops, object, readbuf,
1685 writebuf, offset, len, xfered_len);
1686 else if (object == TARGET_OBJECT_RAW_MEMORY)
1687 {
1688 /* Request the normal memory object from other layers. */
1689 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1690 xfered_len);
1691 }
1692 else
1693 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1694 writebuf, offset, len, xfered_len);
1695
1696 if (targetdebug)
1697 {
1698 const unsigned char *myaddr = NULL;
1699
1700 fprintf_unfiltered (gdb_stdlog,
1701 "%s:target_xfer_partial "
1702 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1703 ops->to_shortname,
1704 (int) object,
1705 (annex ? annex : "(null)"),
1706 host_address_to_string (readbuf),
1707 host_address_to_string (writebuf),
1708 core_addr_to_string_nz (offset),
1709 pulongest (len), retval,
1710 pulongest (*xfered_len));
1711
1712 if (readbuf)
1713 myaddr = readbuf;
1714 if (writebuf)
1715 myaddr = writebuf;
1716 if (retval == TARGET_XFER_OK && myaddr != NULL)
1717 {
1718 int i;
1719
1720 fputs_unfiltered (", bytes =", gdb_stdlog);
1721 for (i = 0; i < *xfered_len; i++)
1722 {
1723 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1724 {
1725 if (targetdebug < 2 && i > 0)
1726 {
1727 fprintf_unfiltered (gdb_stdlog, " ...");
1728 break;
1729 }
1730 fprintf_unfiltered (gdb_stdlog, "\n");
1731 }
1732
1733 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1734 }
1735 }
1736
1737 fputc_unfiltered ('\n', gdb_stdlog);
1738 }
1739
1740 /* Check implementations of to_xfer_partial update *XFERED_LEN
1741 properly. Do assertion after printing debug messages, so that we
1742 can find more clues on assertion failure from debugging messages. */
1743 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1744 gdb_assert (*xfered_len > 0);
1745
1746 return retval;
1747 }
1748
1749 /* Read LEN bytes of target memory at address MEMADDR, placing the
1750 results in GDB's memory at MYADDR. Returns either 0 for success or
1751 TARGET_XFER_E_IO if any error occurs.
1752
1753 If an error occurs, no guarantee is made about the contents of the data at
1754 MYADDR. In particular, the caller should not depend upon partial reads
1755 filling the buffer with good data. There is no way for the caller to know
1756 how much good data might have been transfered anyway. Callers that can
1757 deal with partial reads should call target_read (which will retry until
1758 it makes no progress, and then return how much was transferred). */
1759
1760 int
1761 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1762 {
1763 /* Dispatch to the topmost target, not the flattened current_target.
1764 Memory accesses check target->to_has_(all_)memory, and the
1765 flattened target doesn't inherit those. */
1766 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1767 myaddr, memaddr, len) == len)
1768 return 0;
1769 else
1770 return TARGET_XFER_E_IO;
1771 }
1772
1773 /* Like target_read_memory, but specify explicitly that this is a read
1774 from the target's raw memory. That is, this read bypasses the
1775 dcache, breakpoint shadowing, etc. */
1776
1777 int
1778 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1779 {
1780 /* See comment in target_read_memory about why the request starts at
1781 current_target.beneath. */
1782 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1783 myaddr, memaddr, len) == len)
1784 return 0;
1785 else
1786 return TARGET_XFER_E_IO;
1787 }
1788
1789 /* Like target_read_memory, but specify explicitly that this is a read from
1790 the target's stack. This may trigger different cache behavior. */
1791
1792 int
1793 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1794 {
1795 /* See comment in target_read_memory about why the request starts at
1796 current_target.beneath. */
1797 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1798 myaddr, memaddr, len) == len)
1799 return 0;
1800 else
1801 return TARGET_XFER_E_IO;
1802 }
1803
1804 /* Like target_read_memory, but specify explicitly that this is a read from
1805 the target's code. This may trigger different cache behavior. */
1806
1807 int
1808 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1809 {
1810 /* See comment in target_read_memory about why the request starts at
1811 current_target.beneath. */
1812 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1813 myaddr, memaddr, len) == len)
1814 return 0;
1815 else
1816 return TARGET_XFER_E_IO;
1817 }
1818
1819 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1820 Returns either 0 for success or TARGET_XFER_E_IO if any
1821 error occurs. If an error occurs, no guarantee is made about how
1822 much data got written. Callers that can deal with partial writes
1823 should call target_write. */
1824
1825 int
1826 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1827 {
1828 /* See comment in target_read_memory about why the request starts at
1829 current_target.beneath. */
1830 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1831 myaddr, memaddr, len) == len)
1832 return 0;
1833 else
1834 return TARGET_XFER_E_IO;
1835 }
1836
1837 /* Write LEN bytes from MYADDR to target raw memory at address
1838 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1839 if any error occurs. If an error occurs, no guarantee is made
1840 about how much data got written. Callers that can deal with
1841 partial writes should call target_write. */
1842
1843 int
1844 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1845 {
1846 /* See comment in target_read_memory about why the request starts at
1847 current_target.beneath. */
1848 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1849 myaddr, memaddr, len) == len)
1850 return 0;
1851 else
1852 return TARGET_XFER_E_IO;
1853 }
1854
1855 /* Fetch the target's memory map. */
1856
1857 VEC(mem_region_s) *
1858 target_memory_map (void)
1859 {
1860 VEC(mem_region_s) *result;
1861 struct mem_region *last_one, *this_one;
1862 int ix;
1863 struct target_ops *t;
1864
1865 if (targetdebug)
1866 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1867
1868 for (t = current_target.beneath; t != NULL; t = t->beneath)
1869 if (t->to_memory_map != NULL)
1870 break;
1871
1872 if (t == NULL)
1873 return NULL;
1874
1875 result = t->to_memory_map (t);
1876 if (result == NULL)
1877 return NULL;
1878
1879 qsort (VEC_address (mem_region_s, result),
1880 VEC_length (mem_region_s, result),
1881 sizeof (struct mem_region), mem_region_cmp);
1882
1883 /* Check that regions do not overlap. Simultaneously assign
1884 a numbering for the "mem" commands to use to refer to
1885 each region. */
1886 last_one = NULL;
1887 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1888 {
1889 this_one->number = ix;
1890
1891 if (last_one && last_one->hi > this_one->lo)
1892 {
1893 warning (_("Overlapping regions in memory map: ignoring"));
1894 VEC_free (mem_region_s, result);
1895 return NULL;
1896 }
1897 last_one = this_one;
1898 }
1899
1900 return result;
1901 }
1902
1903 void
1904 target_flash_erase (ULONGEST address, LONGEST length)
1905 {
1906 struct target_ops *t;
1907
1908 for (t = current_target.beneath; t != NULL; t = t->beneath)
1909 if (t->to_flash_erase != NULL)
1910 {
1911 if (targetdebug)
1912 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1913 hex_string (address), phex (length, 0));
1914 t->to_flash_erase (t, address, length);
1915 return;
1916 }
1917
1918 tcomplain ();
1919 }
1920
1921 void
1922 target_flash_done (void)
1923 {
1924 struct target_ops *t;
1925
1926 for (t = current_target.beneath; t != NULL; t = t->beneath)
1927 if (t->to_flash_done != NULL)
1928 {
1929 if (targetdebug)
1930 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1931 t->to_flash_done (t);
1932 return;
1933 }
1934
1935 tcomplain ();
1936 }
1937
1938 static void
1939 show_trust_readonly (struct ui_file *file, int from_tty,
1940 struct cmd_list_element *c, const char *value)
1941 {
1942 fprintf_filtered (file,
1943 _("Mode for reading from readonly sections is %s.\n"),
1944 value);
1945 }
1946
1947 /* More generic transfers. */
1948
1949 static enum target_xfer_status
1950 default_xfer_partial (struct target_ops *ops, enum target_object object,
1951 const char *annex, gdb_byte *readbuf,
1952 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1953 ULONGEST *xfered_len)
1954 {
1955 if (object == TARGET_OBJECT_MEMORY
1956 && ops->deprecated_xfer_memory != NULL)
1957 /* If available, fall back to the target's
1958 "deprecated_xfer_memory" method. */
1959 {
1960 int xfered = -1;
1961
1962 errno = 0;
1963 if (writebuf != NULL)
1964 {
1965 void *buffer = xmalloc (len);
1966 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1967
1968 memcpy (buffer, writebuf, len);
1969 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1970 1/*write*/, NULL, ops);
1971 do_cleanups (cleanup);
1972 }
1973 if (readbuf != NULL)
1974 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1975 0/*read*/, NULL, ops);
1976 if (xfered > 0)
1977 {
1978 *xfered_len = (ULONGEST) xfered;
1979 return TARGET_XFER_E_IO;
1980 }
1981 else if (xfered == 0 && errno == 0)
1982 /* "deprecated_xfer_memory" uses 0, cross checked against
1983 ERRNO as one indication of an error. */
1984 return TARGET_XFER_EOF;
1985 else
1986 return TARGET_XFER_E_IO;
1987 }
1988 else
1989 {
1990 gdb_assert (ops->beneath != NULL);
1991 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1992 readbuf, writebuf, offset, len,
1993 xfered_len);
1994 }
1995 }
1996
1997 /* Target vector read/write partial wrapper functions. */
1998
1999 static enum target_xfer_status
2000 target_read_partial (struct target_ops *ops,
2001 enum target_object object,
2002 const char *annex, gdb_byte *buf,
2003 ULONGEST offset, ULONGEST len,
2004 ULONGEST *xfered_len)
2005 {
2006 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2007 xfered_len);
2008 }
2009
2010 static enum target_xfer_status
2011 target_write_partial (struct target_ops *ops,
2012 enum target_object object,
2013 const char *annex, const gdb_byte *buf,
2014 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2015 {
2016 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2017 xfered_len);
2018 }
2019
2020 /* Wrappers to perform the full transfer. */
2021
2022 /* For docs on target_read see target.h. */
2023
2024 LONGEST
2025 target_read (struct target_ops *ops,
2026 enum target_object object,
2027 const char *annex, gdb_byte *buf,
2028 ULONGEST offset, LONGEST len)
2029 {
2030 LONGEST xfered = 0;
2031
2032 while (xfered < len)
2033 {
2034 ULONGEST xfered_len;
2035 enum target_xfer_status status;
2036
2037 status = target_read_partial (ops, object, annex,
2038 (gdb_byte *) buf + xfered,
2039 offset + xfered, len - xfered,
2040 &xfered_len);
2041
2042 /* Call an observer, notifying them of the xfer progress? */
2043 if (status == TARGET_XFER_EOF)
2044 return xfered;
2045 else if (status == TARGET_XFER_OK)
2046 {
2047 xfered += xfered_len;
2048 QUIT;
2049 }
2050 else
2051 return -1;
2052
2053 }
2054 return len;
2055 }
2056
2057 /* Assuming that the entire [begin, end) range of memory cannot be
2058 read, try to read whatever subrange is possible to read.
2059
2060 The function returns, in RESULT, either zero or one memory block.
2061 If there's a readable subrange at the beginning, it is completely
2062 read and returned. Any further readable subrange will not be read.
2063 Otherwise, if there's a readable subrange at the end, it will be
2064 completely read and returned. Any readable subranges before it
2065 (obviously, not starting at the beginning), will be ignored. In
2066 other cases -- either no readable subrange, or readable subrange(s)
2067 that is neither at the beginning, or end, nothing is returned.
2068
2069 The purpose of this function is to handle a read across a boundary
2070 of accessible memory in a case when memory map is not available.
2071 The above restrictions are fine for this case, but will give
2072 incorrect results if the memory is 'patchy'. However, supporting
2073 'patchy' memory would require trying to read every single byte,
2074 and it seems unacceptable solution. Explicit memory map is
2075 recommended for this case -- and target_read_memory_robust will
2076 take care of reading multiple ranges then. */
2077
2078 static void
2079 read_whatever_is_readable (struct target_ops *ops,
2080 ULONGEST begin, ULONGEST end,
2081 VEC(memory_read_result_s) **result)
2082 {
2083 gdb_byte *buf = xmalloc (end - begin);
2084 ULONGEST current_begin = begin;
2085 ULONGEST current_end = end;
2086 int forward;
2087 memory_read_result_s r;
2088 ULONGEST xfered_len;
2089
2090 /* If we previously failed to read 1 byte, nothing can be done here. */
2091 if (end - begin <= 1)
2092 {
2093 xfree (buf);
2094 return;
2095 }
2096
2097 /* Check that either first or the last byte is readable, and give up
2098 if not. This heuristic is meant to permit reading accessible memory
2099 at the boundary of accessible region. */
2100 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2101 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2102 {
2103 forward = 1;
2104 ++current_begin;
2105 }
2106 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2107 buf + (end-begin) - 1, end - 1, 1,
2108 &xfered_len) == TARGET_XFER_OK)
2109 {
2110 forward = 0;
2111 --current_end;
2112 }
2113 else
2114 {
2115 xfree (buf);
2116 return;
2117 }
2118
2119 /* Loop invariant is that the [current_begin, current_end) was previously
2120 found to be not readable as a whole.
2121
2122 Note loop condition -- if the range has 1 byte, we can't divide the range
2123 so there's no point trying further. */
2124 while (current_end - current_begin > 1)
2125 {
2126 ULONGEST first_half_begin, first_half_end;
2127 ULONGEST second_half_begin, second_half_end;
2128 LONGEST xfer;
2129 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2130
2131 if (forward)
2132 {
2133 first_half_begin = current_begin;
2134 first_half_end = middle;
2135 second_half_begin = middle;
2136 second_half_end = current_end;
2137 }
2138 else
2139 {
2140 first_half_begin = middle;
2141 first_half_end = current_end;
2142 second_half_begin = current_begin;
2143 second_half_end = middle;
2144 }
2145
2146 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2147 buf + (first_half_begin - begin),
2148 first_half_begin,
2149 first_half_end - first_half_begin);
2150
2151 if (xfer == first_half_end - first_half_begin)
2152 {
2153 /* This half reads up fine. So, the error must be in the
2154 other half. */
2155 current_begin = second_half_begin;
2156 current_end = second_half_end;
2157 }
2158 else
2159 {
2160 /* This half is not readable. Because we've tried one byte, we
2161 know some part of this half if actually redable. Go to the next
2162 iteration to divide again and try to read.
2163
2164 We don't handle the other half, because this function only tries
2165 to read a single readable subrange. */
2166 current_begin = first_half_begin;
2167 current_end = first_half_end;
2168 }
2169 }
2170
2171 if (forward)
2172 {
2173 /* The [begin, current_begin) range has been read. */
2174 r.begin = begin;
2175 r.end = current_begin;
2176 r.data = buf;
2177 }
2178 else
2179 {
2180 /* The [current_end, end) range has been read. */
2181 LONGEST rlen = end - current_end;
2182
2183 r.data = xmalloc (rlen);
2184 memcpy (r.data, buf + current_end - begin, rlen);
2185 r.begin = current_end;
2186 r.end = end;
2187 xfree (buf);
2188 }
2189 VEC_safe_push(memory_read_result_s, (*result), &r);
2190 }
2191
2192 void
2193 free_memory_read_result_vector (void *x)
2194 {
2195 VEC(memory_read_result_s) *v = x;
2196 memory_read_result_s *current;
2197 int ix;
2198
2199 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2200 {
2201 xfree (current->data);
2202 }
2203 VEC_free (memory_read_result_s, v);
2204 }
2205
2206 VEC(memory_read_result_s) *
2207 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2208 {
2209 VEC(memory_read_result_s) *result = 0;
2210
2211 LONGEST xfered = 0;
2212 while (xfered < len)
2213 {
2214 struct mem_region *region = lookup_mem_region (offset + xfered);
2215 LONGEST rlen;
2216
2217 /* If there is no explicit region, a fake one should be created. */
2218 gdb_assert (region);
2219
2220 if (region->hi == 0)
2221 rlen = len - xfered;
2222 else
2223 rlen = region->hi - offset;
2224
2225 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2226 {
2227 /* Cannot read this region. Note that we can end up here only
2228 if the region is explicitly marked inaccessible, or
2229 'inaccessible-by-default' is in effect. */
2230 xfered += rlen;
2231 }
2232 else
2233 {
2234 LONGEST to_read = min (len - xfered, rlen);
2235 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2236
2237 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2238 (gdb_byte *) buffer,
2239 offset + xfered, to_read);
2240 /* Call an observer, notifying them of the xfer progress? */
2241 if (xfer <= 0)
2242 {
2243 /* Got an error reading full chunk. See if maybe we can read
2244 some subrange. */
2245 xfree (buffer);
2246 read_whatever_is_readable (ops, offset + xfered,
2247 offset + xfered + to_read, &result);
2248 xfered += to_read;
2249 }
2250 else
2251 {
2252 struct memory_read_result r;
2253 r.data = buffer;
2254 r.begin = offset + xfered;
2255 r.end = r.begin + xfer;
2256 VEC_safe_push (memory_read_result_s, result, &r);
2257 xfered += xfer;
2258 }
2259 QUIT;
2260 }
2261 }
2262 return result;
2263 }
2264
2265
2266 /* An alternative to target_write with progress callbacks. */
2267
2268 LONGEST
2269 target_write_with_progress (struct target_ops *ops,
2270 enum target_object object,
2271 const char *annex, const gdb_byte *buf,
2272 ULONGEST offset, LONGEST len,
2273 void (*progress) (ULONGEST, void *), void *baton)
2274 {
2275 LONGEST xfered = 0;
2276
2277 /* Give the progress callback a chance to set up. */
2278 if (progress)
2279 (*progress) (0, baton);
2280
2281 while (xfered < len)
2282 {
2283 ULONGEST xfered_len;
2284 enum target_xfer_status status;
2285
2286 status = target_write_partial (ops, object, annex,
2287 (gdb_byte *) buf + xfered,
2288 offset + xfered, len - xfered,
2289 &xfered_len);
2290
2291 if (status == TARGET_XFER_EOF)
2292 return xfered;
2293 if (TARGET_XFER_STATUS_ERROR_P (status))
2294 return -1;
2295
2296 gdb_assert (status == TARGET_XFER_OK);
2297 if (progress)
2298 (*progress) (xfered_len, baton);
2299
2300 xfered += xfered_len;
2301 QUIT;
2302 }
2303 return len;
2304 }
2305
2306 /* For docs on target_write see target.h. */
2307
2308 LONGEST
2309 target_write (struct target_ops *ops,
2310 enum target_object object,
2311 const char *annex, const gdb_byte *buf,
2312 ULONGEST offset, LONGEST len)
2313 {
2314 return target_write_with_progress (ops, object, annex, buf, offset, len,
2315 NULL, NULL);
2316 }
2317
2318 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2319 the size of the transferred data. PADDING additional bytes are
2320 available in *BUF_P. This is a helper function for
2321 target_read_alloc; see the declaration of that function for more
2322 information. */
2323
2324 static LONGEST
2325 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2326 const char *annex, gdb_byte **buf_p, int padding)
2327 {
2328 size_t buf_alloc, buf_pos;
2329 gdb_byte *buf;
2330
2331 /* This function does not have a length parameter; it reads the
2332 entire OBJECT). Also, it doesn't support objects fetched partly
2333 from one target and partly from another (in a different stratum,
2334 e.g. a core file and an executable). Both reasons make it
2335 unsuitable for reading memory. */
2336 gdb_assert (object != TARGET_OBJECT_MEMORY);
2337
2338 /* Start by reading up to 4K at a time. The target will throttle
2339 this number down if necessary. */
2340 buf_alloc = 4096;
2341 buf = xmalloc (buf_alloc);
2342 buf_pos = 0;
2343 while (1)
2344 {
2345 ULONGEST xfered_len;
2346 enum target_xfer_status status;
2347
2348 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2349 buf_pos, buf_alloc - buf_pos - padding,
2350 &xfered_len);
2351
2352 if (status == TARGET_XFER_EOF)
2353 {
2354 /* Read all there was. */
2355 if (buf_pos == 0)
2356 xfree (buf);
2357 else
2358 *buf_p = buf;
2359 return buf_pos;
2360 }
2361 else if (status != TARGET_XFER_OK)
2362 {
2363 /* An error occurred. */
2364 xfree (buf);
2365 return TARGET_XFER_E_IO;
2366 }
2367
2368 buf_pos += xfered_len;
2369
2370 /* If the buffer is filling up, expand it. */
2371 if (buf_alloc < buf_pos * 2)
2372 {
2373 buf_alloc *= 2;
2374 buf = xrealloc (buf, buf_alloc);
2375 }
2376
2377 QUIT;
2378 }
2379 }
2380
2381 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2382 the size of the transferred data. See the declaration in "target.h"
2383 function for more information about the return value. */
2384
2385 LONGEST
2386 target_read_alloc (struct target_ops *ops, enum target_object object,
2387 const char *annex, gdb_byte **buf_p)
2388 {
2389 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2390 }
2391
2392 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2393 returned as a string, allocated using xmalloc. If an error occurs
2394 or the transfer is unsupported, NULL is returned. Empty objects
2395 are returned as allocated but empty strings. A warning is issued
2396 if the result contains any embedded NUL bytes. */
2397
2398 char *
2399 target_read_stralloc (struct target_ops *ops, enum target_object object,
2400 const char *annex)
2401 {
2402 gdb_byte *buffer;
2403 char *bufstr;
2404 LONGEST i, transferred;
2405
2406 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2407 bufstr = (char *) buffer;
2408
2409 if (transferred < 0)
2410 return NULL;
2411
2412 if (transferred == 0)
2413 return xstrdup ("");
2414
2415 bufstr[transferred] = 0;
2416
2417 /* Check for embedded NUL bytes; but allow trailing NULs. */
2418 for (i = strlen (bufstr); i < transferred; i++)
2419 if (bufstr[i] != 0)
2420 {
2421 warning (_("target object %d, annex %s, "
2422 "contained unexpected null characters"),
2423 (int) object, annex ? annex : "(none)");
2424 break;
2425 }
2426
2427 return bufstr;
2428 }
2429
2430 /* Memory transfer methods. */
2431
2432 void
2433 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2434 LONGEST len)
2435 {
2436 /* This method is used to read from an alternate, non-current
2437 target. This read must bypass the overlay support (as symbols
2438 don't match this target), and GDB's internal cache (wrong cache
2439 for this target). */
2440 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2441 != len)
2442 memory_error (TARGET_XFER_E_IO, addr);
2443 }
2444
2445 ULONGEST
2446 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2447 int len, enum bfd_endian byte_order)
2448 {
2449 gdb_byte buf[sizeof (ULONGEST)];
2450
2451 gdb_assert (len <= sizeof (buf));
2452 get_target_memory (ops, addr, buf, len);
2453 return extract_unsigned_integer (buf, len, byte_order);
2454 }
2455
2456 /* See target.h. */
2457
2458 int
2459 target_insert_breakpoint (struct gdbarch *gdbarch,
2460 struct bp_target_info *bp_tgt)
2461 {
2462 if (!may_insert_breakpoints)
2463 {
2464 warning (_("May not insert breakpoints"));
2465 return 1;
2466 }
2467
2468 return current_target.to_insert_breakpoint (&current_target,
2469 gdbarch, bp_tgt);
2470 }
2471
2472 /* See target.h. */
2473
2474 int
2475 target_remove_breakpoint (struct gdbarch *gdbarch,
2476 struct bp_target_info *bp_tgt)
2477 {
2478 /* This is kind of a weird case to handle, but the permission might
2479 have been changed after breakpoints were inserted - in which case
2480 we should just take the user literally and assume that any
2481 breakpoints should be left in place. */
2482 if (!may_insert_breakpoints)
2483 {
2484 warning (_("May not remove breakpoints"));
2485 return 1;
2486 }
2487
2488 return current_target.to_remove_breakpoint (&current_target,
2489 gdbarch, bp_tgt);
2490 }
2491
2492 static void
2493 target_info (char *args, int from_tty)
2494 {
2495 struct target_ops *t;
2496 int has_all_mem = 0;
2497
2498 if (symfile_objfile != NULL)
2499 printf_unfiltered (_("Symbols from \"%s\".\n"),
2500 objfile_name (symfile_objfile));
2501
2502 for (t = target_stack; t != NULL; t = t->beneath)
2503 {
2504 if (!(*t->to_has_memory) (t))
2505 continue;
2506
2507 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2508 continue;
2509 if (has_all_mem)
2510 printf_unfiltered (_("\tWhile running this, "
2511 "GDB does not access memory from...\n"));
2512 printf_unfiltered ("%s:\n", t->to_longname);
2513 (t->to_files_info) (t);
2514 has_all_mem = (*t->to_has_all_memory) (t);
2515 }
2516 }
2517
2518 /* This function is called before any new inferior is created, e.g.
2519 by running a program, attaching, or connecting to a target.
2520 It cleans up any state from previous invocations which might
2521 change between runs. This is a subset of what target_preopen
2522 resets (things which might change between targets). */
2523
2524 void
2525 target_pre_inferior (int from_tty)
2526 {
2527 /* Clear out solib state. Otherwise the solib state of the previous
2528 inferior might have survived and is entirely wrong for the new
2529 target. This has been observed on GNU/Linux using glibc 2.3. How
2530 to reproduce:
2531
2532 bash$ ./foo&
2533 [1] 4711
2534 bash$ ./foo&
2535 [1] 4712
2536 bash$ gdb ./foo
2537 [...]
2538 (gdb) attach 4711
2539 (gdb) detach
2540 (gdb) attach 4712
2541 Cannot access memory at address 0xdeadbeef
2542 */
2543
2544 /* In some OSs, the shared library list is the same/global/shared
2545 across inferiors. If code is shared between processes, so are
2546 memory regions and features. */
2547 if (!gdbarch_has_global_solist (target_gdbarch ()))
2548 {
2549 no_shared_libraries (NULL, from_tty);
2550
2551 invalidate_target_mem_regions ();
2552
2553 target_clear_description ();
2554 }
2555
2556 agent_capability_invalidate ();
2557 }
2558
2559 /* Callback for iterate_over_inferiors. Gets rid of the given
2560 inferior. */
2561
2562 static int
2563 dispose_inferior (struct inferior *inf, void *args)
2564 {
2565 struct thread_info *thread;
2566
2567 thread = any_thread_of_process (inf->pid);
2568 if (thread)
2569 {
2570 switch_to_thread (thread->ptid);
2571
2572 /* Core inferiors actually should be detached, not killed. */
2573 if (target_has_execution)
2574 target_kill ();
2575 else
2576 target_detach (NULL, 0);
2577 }
2578
2579 return 0;
2580 }
2581
2582 /* This is to be called by the open routine before it does
2583 anything. */
2584
2585 void
2586 target_preopen (int from_tty)
2587 {
2588 dont_repeat ();
2589
2590 if (have_inferiors ())
2591 {
2592 if (!from_tty
2593 || !have_live_inferiors ()
2594 || query (_("A program is being debugged already. Kill it? ")))
2595 iterate_over_inferiors (dispose_inferior, NULL);
2596 else
2597 error (_("Program not killed."));
2598 }
2599
2600 /* Calling target_kill may remove the target from the stack. But if
2601 it doesn't (which seems like a win for UDI), remove it now. */
2602 /* Leave the exec target, though. The user may be switching from a
2603 live process to a core of the same program. */
2604 pop_all_targets_above (file_stratum);
2605
2606 target_pre_inferior (from_tty);
2607 }
2608
2609 /* Detach a target after doing deferred register stores. */
2610
2611 void
2612 target_detach (const char *args, int from_tty)
2613 {
2614 struct target_ops* t;
2615
2616 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2617 /* Don't remove global breakpoints here. They're removed on
2618 disconnection from the target. */
2619 ;
2620 else
2621 /* If we're in breakpoints-always-inserted mode, have to remove
2622 them before detaching. */
2623 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2624
2625 prepare_for_detach ();
2626
2627 current_target.to_detach (&current_target, args, from_tty);
2628 if (targetdebug)
2629 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2630 args, from_tty);
2631 }
2632
2633 void
2634 target_disconnect (char *args, int from_tty)
2635 {
2636 struct target_ops *t;
2637
2638 /* If we're in breakpoints-always-inserted mode or if breakpoints
2639 are global across processes, we have to remove them before
2640 disconnecting. */
2641 remove_breakpoints ();
2642
2643 for (t = current_target.beneath; t != NULL; t = t->beneath)
2644 if (t->to_disconnect != NULL)
2645 {
2646 if (targetdebug)
2647 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2648 args, from_tty);
2649 t->to_disconnect (t, args, from_tty);
2650 return;
2651 }
2652
2653 tcomplain ();
2654 }
2655
2656 ptid_t
2657 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2658 {
2659 struct target_ops *t;
2660 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2661 status, options);
2662
2663 if (targetdebug)
2664 {
2665 char *status_string;
2666 char *options_string;
2667
2668 status_string = target_waitstatus_to_string (status);
2669 options_string = target_options_to_string (options);
2670 fprintf_unfiltered (gdb_stdlog,
2671 "target_wait (%d, status, options={%s})"
2672 " = %d, %s\n",
2673 ptid_get_pid (ptid), options_string,
2674 ptid_get_pid (retval), status_string);
2675 xfree (status_string);
2676 xfree (options_string);
2677 }
2678
2679 return retval;
2680 }
2681
2682 char *
2683 target_pid_to_str (ptid_t ptid)
2684 {
2685 struct target_ops *t;
2686
2687 for (t = current_target.beneath; t != NULL; t = t->beneath)
2688 {
2689 if (t->to_pid_to_str != NULL)
2690 return (*t->to_pid_to_str) (t, ptid);
2691 }
2692
2693 return normal_pid_to_str (ptid);
2694 }
2695
2696 char *
2697 target_thread_name (struct thread_info *info)
2698 {
2699 struct target_ops *t;
2700
2701 for (t = current_target.beneath; t != NULL; t = t->beneath)
2702 {
2703 if (t->to_thread_name != NULL)
2704 return (*t->to_thread_name) (t, info);
2705 }
2706
2707 return NULL;
2708 }
2709
2710 void
2711 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2712 {
2713 struct target_ops *t;
2714
2715 target_dcache_invalidate ();
2716
2717 current_target.to_resume (&current_target, ptid, step, signal);
2718 if (targetdebug)
2719 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2720 ptid_get_pid (ptid),
2721 step ? "step" : "continue",
2722 gdb_signal_to_name (signal));
2723
2724 registers_changed_ptid (ptid);
2725 set_executing (ptid, 1);
2726 set_running (ptid, 1);
2727 clear_inline_frame_state (ptid);
2728 }
2729
2730 void
2731 target_pass_signals (int numsigs, unsigned char *pass_signals)
2732 {
2733 struct target_ops *t;
2734
2735 for (t = current_target.beneath; t != NULL; t = t->beneath)
2736 {
2737 if (t->to_pass_signals != NULL)
2738 {
2739 if (targetdebug)
2740 {
2741 int i;
2742
2743 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2744 numsigs);
2745
2746 for (i = 0; i < numsigs; i++)
2747 if (pass_signals[i])
2748 fprintf_unfiltered (gdb_stdlog, " %s",
2749 gdb_signal_to_name (i));
2750
2751 fprintf_unfiltered (gdb_stdlog, " })\n");
2752 }
2753
2754 (*t->to_pass_signals) (t, numsigs, pass_signals);
2755 return;
2756 }
2757 }
2758 }
2759
2760 void
2761 target_program_signals (int numsigs, unsigned char *program_signals)
2762 {
2763 struct target_ops *t;
2764
2765 for (t = current_target.beneath; t != NULL; t = t->beneath)
2766 {
2767 if (t->to_program_signals != NULL)
2768 {
2769 if (targetdebug)
2770 {
2771 int i;
2772
2773 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2774 numsigs);
2775
2776 for (i = 0; i < numsigs; i++)
2777 if (program_signals[i])
2778 fprintf_unfiltered (gdb_stdlog, " %s",
2779 gdb_signal_to_name (i));
2780
2781 fprintf_unfiltered (gdb_stdlog, " })\n");
2782 }
2783
2784 (*t->to_program_signals) (t, numsigs, program_signals);
2785 return;
2786 }
2787 }
2788 }
2789
2790 /* Look through the list of possible targets for a target that can
2791 follow forks. */
2792
2793 int
2794 target_follow_fork (int follow_child, int detach_fork)
2795 {
2796 struct target_ops *t;
2797
2798 for (t = current_target.beneath; t != NULL; t = t->beneath)
2799 {
2800 if (t->to_follow_fork != NULL)
2801 {
2802 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2803
2804 if (targetdebug)
2805 fprintf_unfiltered (gdb_stdlog,
2806 "target_follow_fork (%d, %d) = %d\n",
2807 follow_child, detach_fork, retval);
2808 return retval;
2809 }
2810 }
2811
2812 /* Some target returned a fork event, but did not know how to follow it. */
2813 internal_error (__FILE__, __LINE__,
2814 _("could not find a target to follow fork"));
2815 }
2816
2817 void
2818 target_mourn_inferior (void)
2819 {
2820 struct target_ops *t;
2821
2822 for (t = current_target.beneath; t != NULL; t = t->beneath)
2823 {
2824 if (t->to_mourn_inferior != NULL)
2825 {
2826 t->to_mourn_inferior (t);
2827 if (targetdebug)
2828 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2829
2830 /* We no longer need to keep handles on any of the object files.
2831 Make sure to release them to avoid unnecessarily locking any
2832 of them while we're not actually debugging. */
2833 bfd_cache_close_all ();
2834
2835 return;
2836 }
2837 }
2838
2839 internal_error (__FILE__, __LINE__,
2840 _("could not find a target to follow mourn inferior"));
2841 }
2842
2843 /* Look for a target which can describe architectural features, starting
2844 from TARGET. If we find one, return its description. */
2845
2846 const struct target_desc *
2847 target_read_description (struct target_ops *target)
2848 {
2849 struct target_ops *t;
2850
2851 for (t = target; t != NULL; t = t->beneath)
2852 if (t->to_read_description != NULL)
2853 {
2854 const struct target_desc *tdesc;
2855
2856 tdesc = t->to_read_description (t);
2857 if (tdesc)
2858 return tdesc;
2859 }
2860
2861 return NULL;
2862 }
2863
2864 /* The default implementation of to_search_memory.
2865 This implements a basic search of memory, reading target memory and
2866 performing the search here (as opposed to performing the search in on the
2867 target side with, for example, gdbserver). */
2868
2869 int
2870 simple_search_memory (struct target_ops *ops,
2871 CORE_ADDR start_addr, ULONGEST search_space_len,
2872 const gdb_byte *pattern, ULONGEST pattern_len,
2873 CORE_ADDR *found_addrp)
2874 {
2875 /* NOTE: also defined in find.c testcase. */
2876 #define SEARCH_CHUNK_SIZE 16000
2877 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2878 /* Buffer to hold memory contents for searching. */
2879 gdb_byte *search_buf;
2880 unsigned search_buf_size;
2881 struct cleanup *old_cleanups;
2882
2883 search_buf_size = chunk_size + pattern_len - 1;
2884
2885 /* No point in trying to allocate a buffer larger than the search space. */
2886 if (search_space_len < search_buf_size)
2887 search_buf_size = search_space_len;
2888
2889 search_buf = malloc (search_buf_size);
2890 if (search_buf == NULL)
2891 error (_("Unable to allocate memory to perform the search."));
2892 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2893
2894 /* Prime the search buffer. */
2895
2896 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2897 search_buf, start_addr, search_buf_size) != search_buf_size)
2898 {
2899 warning (_("Unable to access %s bytes of target "
2900 "memory at %s, halting search."),
2901 pulongest (search_buf_size), hex_string (start_addr));
2902 do_cleanups (old_cleanups);
2903 return -1;
2904 }
2905
2906 /* Perform the search.
2907
2908 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2909 When we've scanned N bytes we copy the trailing bytes to the start and
2910 read in another N bytes. */
2911
2912 while (search_space_len >= pattern_len)
2913 {
2914 gdb_byte *found_ptr;
2915 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2916
2917 found_ptr = memmem (search_buf, nr_search_bytes,
2918 pattern, pattern_len);
2919
2920 if (found_ptr != NULL)
2921 {
2922 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2923
2924 *found_addrp = found_addr;
2925 do_cleanups (old_cleanups);
2926 return 1;
2927 }
2928
2929 /* Not found in this chunk, skip to next chunk. */
2930
2931 /* Don't let search_space_len wrap here, it's unsigned. */
2932 if (search_space_len >= chunk_size)
2933 search_space_len -= chunk_size;
2934 else
2935 search_space_len = 0;
2936
2937 if (search_space_len >= pattern_len)
2938 {
2939 unsigned keep_len = search_buf_size - chunk_size;
2940 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2941 int nr_to_read;
2942
2943 /* Copy the trailing part of the previous iteration to the front
2944 of the buffer for the next iteration. */
2945 gdb_assert (keep_len == pattern_len - 1);
2946 memcpy (search_buf, search_buf + chunk_size, keep_len);
2947
2948 nr_to_read = min (search_space_len - keep_len, chunk_size);
2949
2950 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2951 search_buf + keep_len, read_addr,
2952 nr_to_read) != nr_to_read)
2953 {
2954 warning (_("Unable to access %s bytes of target "
2955 "memory at %s, halting search."),
2956 plongest (nr_to_read),
2957 hex_string (read_addr));
2958 do_cleanups (old_cleanups);
2959 return -1;
2960 }
2961
2962 start_addr += chunk_size;
2963 }
2964 }
2965
2966 /* Not found. */
2967
2968 do_cleanups (old_cleanups);
2969 return 0;
2970 }
2971
2972 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2973 sequence of bytes in PATTERN with length PATTERN_LEN.
2974
2975 The result is 1 if found, 0 if not found, and -1 if there was an error
2976 requiring halting of the search (e.g. memory read error).
2977 If the pattern is found the address is recorded in FOUND_ADDRP. */
2978
2979 int
2980 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2981 const gdb_byte *pattern, ULONGEST pattern_len,
2982 CORE_ADDR *found_addrp)
2983 {
2984 struct target_ops *t;
2985 int found;
2986
2987 /* We don't use INHERIT to set current_target.to_search_memory,
2988 so we have to scan the target stack and handle targetdebug
2989 ourselves. */
2990
2991 if (targetdebug)
2992 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2993 hex_string (start_addr));
2994
2995 for (t = current_target.beneath; t != NULL; t = t->beneath)
2996 if (t->to_search_memory != NULL)
2997 break;
2998
2999 if (t != NULL)
3000 {
3001 found = t->to_search_memory (t, start_addr, search_space_len,
3002 pattern, pattern_len, found_addrp);
3003 }
3004 else
3005 {
3006 /* If a special version of to_search_memory isn't available, use the
3007 simple version. */
3008 found = simple_search_memory (current_target.beneath,
3009 start_addr, search_space_len,
3010 pattern, pattern_len, found_addrp);
3011 }
3012
3013 if (targetdebug)
3014 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3015
3016 return found;
3017 }
3018
3019 /* Look through the currently pushed targets. If none of them will
3020 be able to restart the currently running process, issue an error
3021 message. */
3022
3023 void
3024 target_require_runnable (void)
3025 {
3026 struct target_ops *t;
3027
3028 for (t = target_stack; t != NULL; t = t->beneath)
3029 {
3030 /* If this target knows how to create a new program, then
3031 assume we will still be able to after killing the current
3032 one. Either killing and mourning will not pop T, or else
3033 find_default_run_target will find it again. */
3034 if (t->to_create_inferior != NULL)
3035 return;
3036
3037 /* Do not worry about thread_stratum targets that can not
3038 create inferiors. Assume they will be pushed again if
3039 necessary, and continue to the process_stratum. */
3040 if (t->to_stratum == thread_stratum
3041 || t->to_stratum == arch_stratum)
3042 continue;
3043
3044 error (_("The \"%s\" target does not support \"run\". "
3045 "Try \"help target\" or \"continue\"."),
3046 t->to_shortname);
3047 }
3048
3049 /* This function is only called if the target is running. In that
3050 case there should have been a process_stratum target and it
3051 should either know how to create inferiors, or not... */
3052 internal_error (__FILE__, __LINE__, _("No targets found"));
3053 }
3054
3055 /* Look through the list of possible targets for a target that can
3056 execute a run or attach command without any other data. This is
3057 used to locate the default process stratum.
3058
3059 If DO_MESG is not NULL, the result is always valid (error() is
3060 called for errors); else, return NULL on error. */
3061
3062 static struct target_ops *
3063 find_default_run_target (char *do_mesg)
3064 {
3065 struct target_ops **t;
3066 struct target_ops *runable = NULL;
3067 int count;
3068
3069 count = 0;
3070
3071 for (t = target_structs; t < target_structs + target_struct_size;
3072 ++t)
3073 {
3074 if ((*t)->to_can_run && target_can_run (*t))
3075 {
3076 runable = *t;
3077 ++count;
3078 }
3079 }
3080
3081 if (count != 1)
3082 {
3083 if (do_mesg)
3084 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3085 else
3086 return NULL;
3087 }
3088
3089 return runable;
3090 }
3091
3092 void
3093 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3094 {
3095 struct target_ops *t;
3096
3097 t = find_default_run_target ("attach");
3098 (t->to_attach) (t, args, from_tty);
3099 return;
3100 }
3101
3102 void
3103 find_default_create_inferior (struct target_ops *ops,
3104 char *exec_file, char *allargs, char **env,
3105 int from_tty)
3106 {
3107 struct target_ops *t;
3108
3109 t = find_default_run_target ("run");
3110 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3111 return;
3112 }
3113
3114 static int
3115 find_default_can_async_p (struct target_ops *ignore)
3116 {
3117 struct target_ops *t;
3118
3119 /* This may be called before the target is pushed on the stack;
3120 look for the default process stratum. If there's none, gdb isn't
3121 configured with a native debugger, and target remote isn't
3122 connected yet. */
3123 t = find_default_run_target (NULL);
3124 if (t && t->to_can_async_p != delegate_can_async_p)
3125 return (t->to_can_async_p) (t);
3126 return 0;
3127 }
3128
3129 static int
3130 find_default_is_async_p (struct target_ops *ignore)
3131 {
3132 struct target_ops *t;
3133
3134 /* This may be called before the target is pushed on the stack;
3135 look for the default process stratum. If there's none, gdb isn't
3136 configured with a native debugger, and target remote isn't
3137 connected yet. */
3138 t = find_default_run_target (NULL);
3139 if (t && t->to_is_async_p != delegate_is_async_p)
3140 return (t->to_is_async_p) (t);
3141 return 0;
3142 }
3143
3144 static int
3145 find_default_supports_non_stop (struct target_ops *self)
3146 {
3147 struct target_ops *t;
3148
3149 t = find_default_run_target (NULL);
3150 if (t && t->to_supports_non_stop)
3151 return (t->to_supports_non_stop) (t);
3152 return 0;
3153 }
3154
3155 int
3156 target_supports_non_stop (void)
3157 {
3158 struct target_ops *t;
3159
3160 for (t = &current_target; t != NULL; t = t->beneath)
3161 if (t->to_supports_non_stop)
3162 return t->to_supports_non_stop (t);
3163
3164 return 0;
3165 }
3166
3167 /* Implement the "info proc" command. */
3168
3169 int
3170 target_info_proc (char *args, enum info_proc_what what)
3171 {
3172 struct target_ops *t;
3173
3174 /* If we're already connected to something that can get us OS
3175 related data, use it. Otherwise, try using the native
3176 target. */
3177 if (current_target.to_stratum >= process_stratum)
3178 t = current_target.beneath;
3179 else
3180 t = find_default_run_target (NULL);
3181
3182 for (; t != NULL; t = t->beneath)
3183 {
3184 if (t->to_info_proc != NULL)
3185 {
3186 t->to_info_proc (t, args, what);
3187
3188 if (targetdebug)
3189 fprintf_unfiltered (gdb_stdlog,
3190 "target_info_proc (\"%s\", %d)\n", args, what);
3191
3192 return 1;
3193 }
3194 }
3195
3196 return 0;
3197 }
3198
3199 static int
3200 find_default_supports_disable_randomization (struct target_ops *self)
3201 {
3202 struct target_ops *t;
3203
3204 t = find_default_run_target (NULL);
3205 if (t && t->to_supports_disable_randomization)
3206 return (t->to_supports_disable_randomization) (t);
3207 return 0;
3208 }
3209
3210 int
3211 target_supports_disable_randomization (void)
3212 {
3213 struct target_ops *t;
3214
3215 for (t = &current_target; t != NULL; t = t->beneath)
3216 if (t->to_supports_disable_randomization)
3217 return t->to_supports_disable_randomization (t);
3218
3219 return 0;
3220 }
3221
3222 char *
3223 target_get_osdata (const char *type)
3224 {
3225 struct target_ops *t;
3226
3227 /* If we're already connected to something that can get us OS
3228 related data, use it. Otherwise, try using the native
3229 target. */
3230 if (current_target.to_stratum >= process_stratum)
3231 t = current_target.beneath;
3232 else
3233 t = find_default_run_target ("get OS data");
3234
3235 if (!t)
3236 return NULL;
3237
3238 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3239 }
3240
3241 /* Determine the current address space of thread PTID. */
3242
3243 struct address_space *
3244 target_thread_address_space (ptid_t ptid)
3245 {
3246 struct address_space *aspace;
3247 struct inferior *inf;
3248 struct target_ops *t;
3249
3250 for (t = current_target.beneath; t != NULL; t = t->beneath)
3251 {
3252 if (t->to_thread_address_space != NULL)
3253 {
3254 aspace = t->to_thread_address_space (t, ptid);
3255 gdb_assert (aspace);
3256
3257 if (targetdebug)
3258 fprintf_unfiltered (gdb_stdlog,
3259 "target_thread_address_space (%s) = %d\n",
3260 target_pid_to_str (ptid),
3261 address_space_num (aspace));
3262 return aspace;
3263 }
3264 }
3265
3266 /* Fall-back to the "main" address space of the inferior. */
3267 inf = find_inferior_pid (ptid_get_pid (ptid));
3268
3269 if (inf == NULL || inf->aspace == NULL)
3270 internal_error (__FILE__, __LINE__,
3271 _("Can't determine the current "
3272 "address space of thread %s\n"),
3273 target_pid_to_str (ptid));
3274
3275 return inf->aspace;
3276 }
3277
3278
3279 /* Target file operations. */
3280
3281 static struct target_ops *
3282 default_fileio_target (void)
3283 {
3284 /* If we're already connected to something that can perform
3285 file I/O, use it. Otherwise, try using the native target. */
3286 if (current_target.to_stratum >= process_stratum)
3287 return current_target.beneath;
3288 else
3289 return find_default_run_target ("file I/O");
3290 }
3291
3292 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3293 target file descriptor, or -1 if an error occurs (and set
3294 *TARGET_ERRNO). */
3295 int
3296 target_fileio_open (const char *filename, int flags, int mode,
3297 int *target_errno)
3298 {
3299 struct target_ops *t;
3300
3301 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3302 {
3303 if (t->to_fileio_open != NULL)
3304 {
3305 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3306
3307 if (targetdebug)
3308 fprintf_unfiltered (gdb_stdlog,
3309 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3310 filename, flags, mode,
3311 fd, fd != -1 ? 0 : *target_errno);
3312 return fd;
3313 }
3314 }
3315
3316 *target_errno = FILEIO_ENOSYS;
3317 return -1;
3318 }
3319
3320 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3321 Return the number of bytes written, or -1 if an error occurs
3322 (and set *TARGET_ERRNO). */
3323 int
3324 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3325 ULONGEST offset, int *target_errno)
3326 {
3327 struct target_ops *t;
3328
3329 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3330 {
3331 if (t->to_fileio_pwrite != NULL)
3332 {
3333 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3334 target_errno);
3335
3336 if (targetdebug)
3337 fprintf_unfiltered (gdb_stdlog,
3338 "target_fileio_pwrite (%d,...,%d,%s) "
3339 "= %d (%d)\n",
3340 fd, len, pulongest (offset),
3341 ret, ret != -1 ? 0 : *target_errno);
3342 return ret;
3343 }
3344 }
3345
3346 *target_errno = FILEIO_ENOSYS;
3347 return -1;
3348 }
3349
3350 /* Read up to LEN bytes FD on the target into READ_BUF.
3351 Return the number of bytes read, or -1 if an error occurs
3352 (and set *TARGET_ERRNO). */
3353 int
3354 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3355 ULONGEST offset, int *target_errno)
3356 {
3357 struct target_ops *t;
3358
3359 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3360 {
3361 if (t->to_fileio_pread != NULL)
3362 {
3363 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3364 target_errno);
3365
3366 if (targetdebug)
3367 fprintf_unfiltered (gdb_stdlog,
3368 "target_fileio_pread (%d,...,%d,%s) "
3369 "= %d (%d)\n",
3370 fd, len, pulongest (offset),
3371 ret, ret != -1 ? 0 : *target_errno);
3372 return ret;
3373 }
3374 }
3375
3376 *target_errno = FILEIO_ENOSYS;
3377 return -1;
3378 }
3379
3380 /* Close FD on the target. Return 0, or -1 if an error occurs
3381 (and set *TARGET_ERRNO). */
3382 int
3383 target_fileio_close (int fd, int *target_errno)
3384 {
3385 struct target_ops *t;
3386
3387 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3388 {
3389 if (t->to_fileio_close != NULL)
3390 {
3391 int ret = t->to_fileio_close (t, fd, target_errno);
3392
3393 if (targetdebug)
3394 fprintf_unfiltered (gdb_stdlog,
3395 "target_fileio_close (%d) = %d (%d)\n",
3396 fd, ret, ret != -1 ? 0 : *target_errno);
3397 return ret;
3398 }
3399 }
3400
3401 *target_errno = FILEIO_ENOSYS;
3402 return -1;
3403 }
3404
3405 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3406 occurs (and set *TARGET_ERRNO). */
3407 int
3408 target_fileio_unlink (const char *filename, int *target_errno)
3409 {
3410 struct target_ops *t;
3411
3412 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3413 {
3414 if (t->to_fileio_unlink != NULL)
3415 {
3416 int ret = t->to_fileio_unlink (t, filename, target_errno);
3417
3418 if (targetdebug)
3419 fprintf_unfiltered (gdb_stdlog,
3420 "target_fileio_unlink (%s) = %d (%d)\n",
3421 filename, ret, ret != -1 ? 0 : *target_errno);
3422 return ret;
3423 }
3424 }
3425
3426 *target_errno = FILEIO_ENOSYS;
3427 return -1;
3428 }
3429
3430 /* Read value of symbolic link FILENAME on the target. Return a
3431 null-terminated string allocated via xmalloc, or NULL if an error
3432 occurs (and set *TARGET_ERRNO). */
3433 char *
3434 target_fileio_readlink (const char *filename, int *target_errno)
3435 {
3436 struct target_ops *t;
3437
3438 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3439 {
3440 if (t->to_fileio_readlink != NULL)
3441 {
3442 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3443
3444 if (targetdebug)
3445 fprintf_unfiltered (gdb_stdlog,
3446 "target_fileio_readlink (%s) = %s (%d)\n",
3447 filename, ret? ret : "(nil)",
3448 ret? 0 : *target_errno);
3449 return ret;
3450 }
3451 }
3452
3453 *target_errno = FILEIO_ENOSYS;
3454 return NULL;
3455 }
3456
3457 static void
3458 target_fileio_close_cleanup (void *opaque)
3459 {
3460 int fd = *(int *) opaque;
3461 int target_errno;
3462
3463 target_fileio_close (fd, &target_errno);
3464 }
3465
3466 /* Read target file FILENAME. Store the result in *BUF_P and
3467 return the size of the transferred data. PADDING additional bytes are
3468 available in *BUF_P. This is a helper function for
3469 target_fileio_read_alloc; see the declaration of that function for more
3470 information. */
3471
3472 static LONGEST
3473 target_fileio_read_alloc_1 (const char *filename,
3474 gdb_byte **buf_p, int padding)
3475 {
3476 struct cleanup *close_cleanup;
3477 size_t buf_alloc, buf_pos;
3478 gdb_byte *buf;
3479 LONGEST n;
3480 int fd;
3481 int target_errno;
3482
3483 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3484 if (fd == -1)
3485 return -1;
3486
3487 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3488
3489 /* Start by reading up to 4K at a time. The target will throttle
3490 this number down if necessary. */
3491 buf_alloc = 4096;
3492 buf = xmalloc (buf_alloc);
3493 buf_pos = 0;
3494 while (1)
3495 {
3496 n = target_fileio_pread (fd, &buf[buf_pos],
3497 buf_alloc - buf_pos - padding, buf_pos,
3498 &target_errno);
3499 if (n < 0)
3500 {
3501 /* An error occurred. */
3502 do_cleanups (close_cleanup);
3503 xfree (buf);
3504 return -1;
3505 }
3506 else if (n == 0)
3507 {
3508 /* Read all there was. */
3509 do_cleanups (close_cleanup);
3510 if (buf_pos == 0)
3511 xfree (buf);
3512 else
3513 *buf_p = buf;
3514 return buf_pos;
3515 }
3516
3517 buf_pos += n;
3518
3519 /* If the buffer is filling up, expand it. */
3520 if (buf_alloc < buf_pos * 2)
3521 {
3522 buf_alloc *= 2;
3523 buf = xrealloc (buf, buf_alloc);
3524 }
3525
3526 QUIT;
3527 }
3528 }
3529
3530 /* Read target file FILENAME. Store the result in *BUF_P and return
3531 the size of the transferred data. See the declaration in "target.h"
3532 function for more information about the return value. */
3533
3534 LONGEST
3535 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3536 {
3537 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3538 }
3539
3540 /* Read target file FILENAME. The result is NUL-terminated and
3541 returned as a string, allocated using xmalloc. If an error occurs
3542 or the transfer is unsupported, NULL is returned. Empty objects
3543 are returned as allocated but empty strings. A warning is issued
3544 if the result contains any embedded NUL bytes. */
3545
3546 char *
3547 target_fileio_read_stralloc (const char *filename)
3548 {
3549 gdb_byte *buffer;
3550 char *bufstr;
3551 LONGEST i, transferred;
3552
3553 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3554 bufstr = (char *) buffer;
3555
3556 if (transferred < 0)
3557 return NULL;
3558
3559 if (transferred == 0)
3560 return xstrdup ("");
3561
3562 bufstr[transferred] = 0;
3563
3564 /* Check for embedded NUL bytes; but allow trailing NULs. */
3565 for (i = strlen (bufstr); i < transferred; i++)
3566 if (bufstr[i] != 0)
3567 {
3568 warning (_("target file %s "
3569 "contained unexpected null characters"),
3570 filename);
3571 break;
3572 }
3573
3574 return bufstr;
3575 }
3576
3577
3578 static int
3579 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3580 CORE_ADDR addr, int len)
3581 {
3582 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3583 }
3584
3585 static int
3586 default_watchpoint_addr_within_range (struct target_ops *target,
3587 CORE_ADDR addr,
3588 CORE_ADDR start, int length)
3589 {
3590 return addr >= start && addr < start + length;
3591 }
3592
3593 static struct gdbarch *
3594 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3595 {
3596 return target_gdbarch ();
3597 }
3598
3599 static int
3600 return_zero (void)
3601 {
3602 return 0;
3603 }
3604
3605 static int
3606 return_one (void)
3607 {
3608 return 1;
3609 }
3610
3611 static int
3612 return_minus_one (void)
3613 {
3614 return -1;
3615 }
3616
3617 static void *
3618 return_null (void)
3619 {
3620 return 0;
3621 }
3622
3623 /*
3624 * Find the next target down the stack from the specified target.
3625 */
3626
3627 struct target_ops *
3628 find_target_beneath (struct target_ops *t)
3629 {
3630 return t->beneath;
3631 }
3632
3633 /* See target.h. */
3634
3635 struct target_ops *
3636 find_target_at (enum strata stratum)
3637 {
3638 struct target_ops *t;
3639
3640 for (t = current_target.beneath; t != NULL; t = t->beneath)
3641 if (t->to_stratum == stratum)
3642 return t;
3643
3644 return NULL;
3645 }
3646
3647 \f
3648 /* The inferior process has died. Long live the inferior! */
3649
3650 void
3651 generic_mourn_inferior (void)
3652 {
3653 ptid_t ptid;
3654
3655 ptid = inferior_ptid;
3656 inferior_ptid = null_ptid;
3657
3658 /* Mark breakpoints uninserted in case something tries to delete a
3659 breakpoint while we delete the inferior's threads (which would
3660 fail, since the inferior is long gone). */
3661 mark_breakpoints_out ();
3662
3663 if (!ptid_equal (ptid, null_ptid))
3664 {
3665 int pid = ptid_get_pid (ptid);
3666 exit_inferior (pid);
3667 }
3668
3669 /* Note this wipes step-resume breakpoints, so needs to be done
3670 after exit_inferior, which ends up referencing the step-resume
3671 breakpoints through clear_thread_inferior_resources. */
3672 breakpoint_init_inferior (inf_exited);
3673
3674 registers_changed ();
3675
3676 reopen_exec_file ();
3677 reinit_frame_cache ();
3678
3679 if (deprecated_detach_hook)
3680 deprecated_detach_hook ();
3681 }
3682 \f
3683 /* Convert a normal process ID to a string. Returns the string in a
3684 static buffer. */
3685
3686 char *
3687 normal_pid_to_str (ptid_t ptid)
3688 {
3689 static char buf[32];
3690
3691 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3692 return buf;
3693 }
3694
3695 static char *
3696 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3697 {
3698 return normal_pid_to_str (ptid);
3699 }
3700
3701 /* Error-catcher for target_find_memory_regions. */
3702 static int
3703 dummy_find_memory_regions (struct target_ops *self,
3704 find_memory_region_ftype ignore1, void *ignore2)
3705 {
3706 error (_("Command not implemented for this target."));
3707 return 0;
3708 }
3709
3710 /* Error-catcher for target_make_corefile_notes. */
3711 static char *
3712 dummy_make_corefile_notes (struct target_ops *self,
3713 bfd *ignore1, int *ignore2)
3714 {
3715 error (_("Command not implemented for this target."));
3716 return NULL;
3717 }
3718
3719 /* Error-catcher for target_get_bookmark. */
3720 static gdb_byte *
3721 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3722 {
3723 tcomplain ();
3724 return NULL;
3725 }
3726
3727 /* Error-catcher for target_goto_bookmark. */
3728 static void
3729 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3730 {
3731 tcomplain ();
3732 }
3733
3734 /* Set up the handful of non-empty slots needed by the dummy target
3735 vector. */
3736
3737 static void
3738 init_dummy_target (void)
3739 {
3740 dummy_target.to_shortname = "None";
3741 dummy_target.to_longname = "None";
3742 dummy_target.to_doc = "";
3743 dummy_target.to_create_inferior = find_default_create_inferior;
3744 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3745 dummy_target.to_supports_disable_randomization
3746 = find_default_supports_disable_randomization;
3747 dummy_target.to_pid_to_str = dummy_pid_to_str;
3748 dummy_target.to_stratum = dummy_stratum;
3749 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3750 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3751 dummy_target.to_get_bookmark = dummy_get_bookmark;
3752 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3753 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3754 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3755 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3756 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3757 dummy_target.to_has_execution
3758 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3759 dummy_target.to_magic = OPS_MAGIC;
3760
3761 install_dummy_methods (&dummy_target);
3762 }
3763 \f
3764 static void
3765 debug_to_open (char *args, int from_tty)
3766 {
3767 debug_target.to_open (args, from_tty);
3768
3769 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3770 }
3771
3772 void
3773 target_close (struct target_ops *targ)
3774 {
3775 gdb_assert (!target_is_pushed (targ));
3776
3777 if (targ->to_xclose != NULL)
3778 targ->to_xclose (targ);
3779 else if (targ->to_close != NULL)
3780 targ->to_close (targ);
3781
3782 if (targetdebug)
3783 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3784 }
3785
3786 void
3787 target_attach (char *args, int from_tty)
3788 {
3789 current_target.to_attach (&current_target, args, from_tty);
3790 if (targetdebug)
3791 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3792 args, from_tty);
3793 }
3794
3795 int
3796 target_thread_alive (ptid_t ptid)
3797 {
3798 struct target_ops *t;
3799
3800 for (t = current_target.beneath; t != NULL; t = t->beneath)
3801 {
3802 if (t->to_thread_alive != NULL)
3803 {
3804 int retval;
3805
3806 retval = t->to_thread_alive (t, ptid);
3807 if (targetdebug)
3808 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3809 ptid_get_pid (ptid), retval);
3810
3811 return retval;
3812 }
3813 }
3814
3815 return 0;
3816 }
3817
3818 void
3819 target_find_new_threads (void)
3820 {
3821 struct target_ops *t;
3822
3823 for (t = current_target.beneath; t != NULL; t = t->beneath)
3824 {
3825 if (t->to_find_new_threads != NULL)
3826 {
3827 t->to_find_new_threads (t);
3828 if (targetdebug)
3829 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3830
3831 return;
3832 }
3833 }
3834 }
3835
3836 void
3837 target_stop (ptid_t ptid)
3838 {
3839 if (!may_stop)
3840 {
3841 warning (_("May not interrupt or stop the target, ignoring attempt"));
3842 return;
3843 }
3844
3845 (*current_target.to_stop) (&current_target, ptid);
3846 }
3847
3848 static void
3849 debug_to_post_attach (struct target_ops *self, int pid)
3850 {
3851 debug_target.to_post_attach (&debug_target, pid);
3852
3853 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3854 }
3855
3856 /* Concatenate ELEM to LIST, a comma separate list, and return the
3857 result. The LIST incoming argument is released. */
3858
3859 static char *
3860 str_comma_list_concat_elem (char *list, const char *elem)
3861 {
3862 if (list == NULL)
3863 return xstrdup (elem);
3864 else
3865 return reconcat (list, list, ", ", elem, (char *) NULL);
3866 }
3867
3868 /* Helper for target_options_to_string. If OPT is present in
3869 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3870 Returns the new resulting string. OPT is removed from
3871 TARGET_OPTIONS. */
3872
3873 static char *
3874 do_option (int *target_options, char *ret,
3875 int opt, char *opt_str)
3876 {
3877 if ((*target_options & opt) != 0)
3878 {
3879 ret = str_comma_list_concat_elem (ret, opt_str);
3880 *target_options &= ~opt;
3881 }
3882
3883 return ret;
3884 }
3885
3886 char *
3887 target_options_to_string (int target_options)
3888 {
3889 char *ret = NULL;
3890
3891 #define DO_TARG_OPTION(OPT) \
3892 ret = do_option (&target_options, ret, OPT, #OPT)
3893
3894 DO_TARG_OPTION (TARGET_WNOHANG);
3895
3896 if (target_options != 0)
3897 ret = str_comma_list_concat_elem (ret, "unknown???");
3898
3899 if (ret == NULL)
3900 ret = xstrdup ("");
3901 return ret;
3902 }
3903
3904 static void
3905 debug_print_register (const char * func,
3906 struct regcache *regcache, int regno)
3907 {
3908 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3909
3910 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3911 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3912 && gdbarch_register_name (gdbarch, regno) != NULL
3913 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3914 fprintf_unfiltered (gdb_stdlog, "(%s)",
3915 gdbarch_register_name (gdbarch, regno));
3916 else
3917 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3918 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3919 {
3920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3921 int i, size = register_size (gdbarch, regno);
3922 gdb_byte buf[MAX_REGISTER_SIZE];
3923
3924 regcache_raw_collect (regcache, regno, buf);
3925 fprintf_unfiltered (gdb_stdlog, " = ");
3926 for (i = 0; i < size; i++)
3927 {
3928 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3929 }
3930 if (size <= sizeof (LONGEST))
3931 {
3932 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3933
3934 fprintf_unfiltered (gdb_stdlog, " %s %s",
3935 core_addr_to_string_nz (val), plongest (val));
3936 }
3937 }
3938 fprintf_unfiltered (gdb_stdlog, "\n");
3939 }
3940
3941 void
3942 target_fetch_registers (struct regcache *regcache, int regno)
3943 {
3944 struct target_ops *t;
3945
3946 for (t = current_target.beneath; t != NULL; t = t->beneath)
3947 {
3948 if (t->to_fetch_registers != NULL)
3949 {
3950 t->to_fetch_registers (t, regcache, regno);
3951 if (targetdebug)
3952 debug_print_register ("target_fetch_registers", regcache, regno);
3953 return;
3954 }
3955 }
3956 }
3957
3958 void
3959 target_store_registers (struct regcache *regcache, int regno)
3960 {
3961 struct target_ops *t;
3962
3963 if (!may_write_registers)
3964 error (_("Writing to registers is not allowed (regno %d)"), regno);
3965
3966 current_target.to_store_registers (&current_target, regcache, regno);
3967 if (targetdebug)
3968 {
3969 debug_print_register ("target_store_registers", regcache, regno);
3970 }
3971 }
3972
3973 int
3974 target_core_of_thread (ptid_t ptid)
3975 {
3976 struct target_ops *t;
3977
3978 for (t = current_target.beneath; t != NULL; t = t->beneath)
3979 {
3980 if (t->to_core_of_thread != NULL)
3981 {
3982 int retval = t->to_core_of_thread (t, ptid);
3983
3984 if (targetdebug)
3985 fprintf_unfiltered (gdb_stdlog,
3986 "target_core_of_thread (%d) = %d\n",
3987 ptid_get_pid (ptid), retval);
3988 return retval;
3989 }
3990 }
3991
3992 return -1;
3993 }
3994
3995 int
3996 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3997 {
3998 struct target_ops *t;
3999
4000 for (t = current_target.beneath; t != NULL; t = t->beneath)
4001 {
4002 if (t->to_verify_memory != NULL)
4003 {
4004 int retval = t->to_verify_memory (t, data, memaddr, size);
4005
4006 if (targetdebug)
4007 fprintf_unfiltered (gdb_stdlog,
4008 "target_verify_memory (%s, %s) = %d\n",
4009 paddress (target_gdbarch (), memaddr),
4010 pulongest (size),
4011 retval);
4012 return retval;
4013 }
4014 }
4015
4016 tcomplain ();
4017 }
4018
4019 /* The documentation for this function is in its prototype declaration in
4020 target.h. */
4021
4022 int
4023 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4024 {
4025 struct target_ops *t;
4026
4027 for (t = current_target.beneath; t != NULL; t = t->beneath)
4028 if (t->to_insert_mask_watchpoint != NULL)
4029 {
4030 int ret;
4031
4032 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4033
4034 if (targetdebug)
4035 fprintf_unfiltered (gdb_stdlog, "\
4036 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4037 core_addr_to_string (addr),
4038 core_addr_to_string (mask), rw, ret);
4039
4040 return ret;
4041 }
4042
4043 return 1;
4044 }
4045
4046 /* The documentation for this function is in its prototype declaration in
4047 target.h. */
4048
4049 int
4050 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4051 {
4052 struct target_ops *t;
4053
4054 for (t = current_target.beneath; t != NULL; t = t->beneath)
4055 if (t->to_remove_mask_watchpoint != NULL)
4056 {
4057 int ret;
4058
4059 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4060
4061 if (targetdebug)
4062 fprintf_unfiltered (gdb_stdlog, "\
4063 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4064 core_addr_to_string (addr),
4065 core_addr_to_string (mask), rw, ret);
4066
4067 return ret;
4068 }
4069
4070 return 1;
4071 }
4072
4073 /* The documentation for this function is in its prototype declaration
4074 in target.h. */
4075
4076 int
4077 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4078 {
4079 struct target_ops *t;
4080
4081 for (t = current_target.beneath; t != NULL; t = t->beneath)
4082 if (t->to_masked_watch_num_registers != NULL)
4083 return t->to_masked_watch_num_registers (t, addr, mask);
4084
4085 return -1;
4086 }
4087
4088 /* The documentation for this function is in its prototype declaration
4089 in target.h. */
4090
4091 int
4092 target_ranged_break_num_registers (void)
4093 {
4094 struct target_ops *t;
4095
4096 for (t = current_target.beneath; t != NULL; t = t->beneath)
4097 if (t->to_ranged_break_num_registers != NULL)
4098 return t->to_ranged_break_num_registers (t);
4099
4100 return -1;
4101 }
4102
4103 /* See target.h. */
4104
4105 struct btrace_target_info *
4106 target_enable_btrace (ptid_t ptid)
4107 {
4108 struct target_ops *t;
4109
4110 for (t = current_target.beneath; t != NULL; t = t->beneath)
4111 if (t->to_enable_btrace != NULL)
4112 return t->to_enable_btrace (t, ptid);
4113
4114 tcomplain ();
4115 return NULL;
4116 }
4117
4118 /* See target.h. */
4119
4120 void
4121 target_disable_btrace (struct btrace_target_info *btinfo)
4122 {
4123 struct target_ops *t;
4124
4125 for (t = current_target.beneath; t != NULL; t = t->beneath)
4126 if (t->to_disable_btrace != NULL)
4127 {
4128 t->to_disable_btrace (t, btinfo);
4129 return;
4130 }
4131
4132 tcomplain ();
4133 }
4134
4135 /* See target.h. */
4136
4137 void
4138 target_teardown_btrace (struct btrace_target_info *btinfo)
4139 {
4140 struct target_ops *t;
4141
4142 for (t = current_target.beneath; t != NULL; t = t->beneath)
4143 if (t->to_teardown_btrace != NULL)
4144 {
4145 t->to_teardown_btrace (t, btinfo);
4146 return;
4147 }
4148
4149 tcomplain ();
4150 }
4151
4152 /* See target.h. */
4153
4154 enum btrace_error
4155 target_read_btrace (VEC (btrace_block_s) **btrace,
4156 struct btrace_target_info *btinfo,
4157 enum btrace_read_type type)
4158 {
4159 struct target_ops *t;
4160
4161 for (t = current_target.beneath; t != NULL; t = t->beneath)
4162 if (t->to_read_btrace != NULL)
4163 return t->to_read_btrace (t, btrace, btinfo, type);
4164
4165 tcomplain ();
4166 return BTRACE_ERR_NOT_SUPPORTED;
4167 }
4168
4169 /* See target.h. */
4170
4171 void
4172 target_stop_recording (void)
4173 {
4174 struct target_ops *t;
4175
4176 for (t = current_target.beneath; t != NULL; t = t->beneath)
4177 if (t->to_stop_recording != NULL)
4178 {
4179 t->to_stop_recording (t);
4180 return;
4181 }
4182
4183 /* This is optional. */
4184 }
4185
4186 /* See target.h. */
4187
4188 void
4189 target_info_record (void)
4190 {
4191 struct target_ops *t;
4192
4193 for (t = current_target.beneath; t != NULL; t = t->beneath)
4194 if (t->to_info_record != NULL)
4195 {
4196 t->to_info_record (t);
4197 return;
4198 }
4199
4200 tcomplain ();
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_save_record (const char *filename)
4207 {
4208 struct target_ops *t;
4209
4210 for (t = current_target.beneath; t != NULL; t = t->beneath)
4211 if (t->to_save_record != NULL)
4212 {
4213 t->to_save_record (t, filename);
4214 return;
4215 }
4216
4217 tcomplain ();
4218 }
4219
4220 /* See target.h. */
4221
4222 int
4223 target_supports_delete_record (void)
4224 {
4225 struct target_ops *t;
4226
4227 for (t = current_target.beneath; t != NULL; t = t->beneath)
4228 if (t->to_delete_record != NULL)
4229 return 1;
4230
4231 return 0;
4232 }
4233
4234 /* See target.h. */
4235
4236 void
4237 target_delete_record (void)
4238 {
4239 struct target_ops *t;
4240
4241 for (t = current_target.beneath; t != NULL; t = t->beneath)
4242 if (t->to_delete_record != NULL)
4243 {
4244 t->to_delete_record (t);
4245 return;
4246 }
4247
4248 tcomplain ();
4249 }
4250
4251 /* See target.h. */
4252
4253 int
4254 target_record_is_replaying (void)
4255 {
4256 struct target_ops *t;
4257
4258 for (t = current_target.beneath; t != NULL; t = t->beneath)
4259 if (t->to_record_is_replaying != NULL)
4260 return t->to_record_is_replaying (t);
4261
4262 return 0;
4263 }
4264
4265 /* See target.h. */
4266
4267 void
4268 target_goto_record_begin (void)
4269 {
4270 struct target_ops *t;
4271
4272 for (t = current_target.beneath; t != NULL; t = t->beneath)
4273 if (t->to_goto_record_begin != NULL)
4274 {
4275 t->to_goto_record_begin (t);
4276 return;
4277 }
4278
4279 tcomplain ();
4280 }
4281
4282 /* See target.h. */
4283
4284 void
4285 target_goto_record_end (void)
4286 {
4287 struct target_ops *t;
4288
4289 for (t = current_target.beneath; t != NULL; t = t->beneath)
4290 if (t->to_goto_record_end != NULL)
4291 {
4292 t->to_goto_record_end (t);
4293 return;
4294 }
4295
4296 tcomplain ();
4297 }
4298
4299 /* See target.h. */
4300
4301 void
4302 target_goto_record (ULONGEST insn)
4303 {
4304 struct target_ops *t;
4305
4306 for (t = current_target.beneath; t != NULL; t = t->beneath)
4307 if (t->to_goto_record != NULL)
4308 {
4309 t->to_goto_record (t, insn);
4310 return;
4311 }
4312
4313 tcomplain ();
4314 }
4315
4316 /* See target.h. */
4317
4318 void
4319 target_insn_history (int size, int flags)
4320 {
4321 struct target_ops *t;
4322
4323 for (t = current_target.beneath; t != NULL; t = t->beneath)
4324 if (t->to_insn_history != NULL)
4325 {
4326 t->to_insn_history (t, size, flags);
4327 return;
4328 }
4329
4330 tcomplain ();
4331 }
4332
4333 /* See target.h. */
4334
4335 void
4336 target_insn_history_from (ULONGEST from, int size, int flags)
4337 {
4338 struct target_ops *t;
4339
4340 for (t = current_target.beneath; t != NULL; t = t->beneath)
4341 if (t->to_insn_history_from != NULL)
4342 {
4343 t->to_insn_history_from (t, from, size, flags);
4344 return;
4345 }
4346
4347 tcomplain ();
4348 }
4349
4350 /* See target.h. */
4351
4352 void
4353 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4354 {
4355 struct target_ops *t;
4356
4357 for (t = current_target.beneath; t != NULL; t = t->beneath)
4358 if (t->to_insn_history_range != NULL)
4359 {
4360 t->to_insn_history_range (t, begin, end, flags);
4361 return;
4362 }
4363
4364 tcomplain ();
4365 }
4366
4367 /* See target.h. */
4368
4369 void
4370 target_call_history (int size, int flags)
4371 {
4372 struct target_ops *t;
4373
4374 for (t = current_target.beneath; t != NULL; t = t->beneath)
4375 if (t->to_call_history != NULL)
4376 {
4377 t->to_call_history (t, size, flags);
4378 return;
4379 }
4380
4381 tcomplain ();
4382 }
4383
4384 /* See target.h. */
4385
4386 void
4387 target_call_history_from (ULONGEST begin, int size, int flags)
4388 {
4389 struct target_ops *t;
4390
4391 for (t = current_target.beneath; t != NULL; t = t->beneath)
4392 if (t->to_call_history_from != NULL)
4393 {
4394 t->to_call_history_from (t, begin, size, flags);
4395 return;
4396 }
4397
4398 tcomplain ();
4399 }
4400
4401 /* See target.h. */
4402
4403 void
4404 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4405 {
4406 struct target_ops *t;
4407
4408 for (t = current_target.beneath; t != NULL; t = t->beneath)
4409 if (t->to_call_history_range != NULL)
4410 {
4411 t->to_call_history_range (t, begin, end, flags);
4412 return;
4413 }
4414
4415 tcomplain ();
4416 }
4417
4418 static void
4419 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4420 {
4421 debug_target.to_prepare_to_store (&debug_target, regcache);
4422
4423 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4424 }
4425
4426 /* See target.h. */
4427
4428 const struct frame_unwind *
4429 target_get_unwinder (void)
4430 {
4431 struct target_ops *t;
4432
4433 for (t = current_target.beneath; t != NULL; t = t->beneath)
4434 if (t->to_get_unwinder != NULL)
4435 return t->to_get_unwinder;
4436
4437 return NULL;
4438 }
4439
4440 /* See target.h. */
4441
4442 const struct frame_unwind *
4443 target_get_tailcall_unwinder (void)
4444 {
4445 struct target_ops *t;
4446
4447 for (t = current_target.beneath; t != NULL; t = t->beneath)
4448 if (t->to_get_tailcall_unwinder != NULL)
4449 return t->to_get_tailcall_unwinder;
4450
4451 return NULL;
4452 }
4453
4454 /* See target.h. */
4455
4456 CORE_ADDR
4457 forward_target_decr_pc_after_break (struct target_ops *ops,
4458 struct gdbarch *gdbarch)
4459 {
4460 for (; ops != NULL; ops = ops->beneath)
4461 if (ops->to_decr_pc_after_break != NULL)
4462 return ops->to_decr_pc_after_break (ops, gdbarch);
4463
4464 return gdbarch_decr_pc_after_break (gdbarch);
4465 }
4466
4467 /* See target.h. */
4468
4469 CORE_ADDR
4470 target_decr_pc_after_break (struct gdbarch *gdbarch)
4471 {
4472 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4473 }
4474
4475 static int
4476 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4477 int write, struct mem_attrib *attrib,
4478 struct target_ops *target)
4479 {
4480 int retval;
4481
4482 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4483 attrib, target);
4484
4485 fprintf_unfiltered (gdb_stdlog,
4486 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4487 paddress (target_gdbarch (), memaddr), len,
4488 write ? "write" : "read", retval);
4489
4490 if (retval > 0)
4491 {
4492 int i;
4493
4494 fputs_unfiltered (", bytes =", gdb_stdlog);
4495 for (i = 0; i < retval; i++)
4496 {
4497 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4498 {
4499 if (targetdebug < 2 && i > 0)
4500 {
4501 fprintf_unfiltered (gdb_stdlog, " ...");
4502 break;
4503 }
4504 fprintf_unfiltered (gdb_stdlog, "\n");
4505 }
4506
4507 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4508 }
4509 }
4510
4511 fputc_unfiltered ('\n', gdb_stdlog);
4512
4513 return retval;
4514 }
4515
4516 static void
4517 debug_to_files_info (struct target_ops *target)
4518 {
4519 debug_target.to_files_info (target);
4520
4521 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4522 }
4523
4524 static int
4525 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4526 struct bp_target_info *bp_tgt)
4527 {
4528 int retval;
4529
4530 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4531
4532 fprintf_unfiltered (gdb_stdlog,
4533 "target_insert_breakpoint (%s, xxx) = %ld\n",
4534 core_addr_to_string (bp_tgt->placed_address),
4535 (unsigned long) retval);
4536 return retval;
4537 }
4538
4539 static int
4540 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4541 struct bp_target_info *bp_tgt)
4542 {
4543 int retval;
4544
4545 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4546
4547 fprintf_unfiltered (gdb_stdlog,
4548 "target_remove_breakpoint (%s, xxx) = %ld\n",
4549 core_addr_to_string (bp_tgt->placed_address),
4550 (unsigned long) retval);
4551 return retval;
4552 }
4553
4554 static int
4555 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4556 int type, int cnt, int from_tty)
4557 {
4558 int retval;
4559
4560 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4561 type, cnt, from_tty);
4562
4563 fprintf_unfiltered (gdb_stdlog,
4564 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4565 (unsigned long) type,
4566 (unsigned long) cnt,
4567 (unsigned long) from_tty,
4568 (unsigned long) retval);
4569 return retval;
4570 }
4571
4572 static int
4573 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4574 CORE_ADDR addr, int len)
4575 {
4576 CORE_ADDR retval;
4577
4578 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4579 addr, len);
4580
4581 fprintf_unfiltered (gdb_stdlog,
4582 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4583 core_addr_to_string (addr), (unsigned long) len,
4584 core_addr_to_string (retval));
4585 return retval;
4586 }
4587
4588 static int
4589 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4590 CORE_ADDR addr, int len, int rw,
4591 struct expression *cond)
4592 {
4593 int retval;
4594
4595 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4596 addr, len,
4597 rw, cond);
4598
4599 fprintf_unfiltered (gdb_stdlog,
4600 "target_can_accel_watchpoint_condition "
4601 "(%s, %d, %d, %s) = %ld\n",
4602 core_addr_to_string (addr), len, rw,
4603 host_address_to_string (cond), (unsigned long) retval);
4604 return retval;
4605 }
4606
4607 static int
4608 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4609 {
4610 int retval;
4611
4612 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4613
4614 fprintf_unfiltered (gdb_stdlog,
4615 "target_stopped_by_watchpoint () = %ld\n",
4616 (unsigned long) retval);
4617 return retval;
4618 }
4619
4620 static int
4621 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4622 {
4623 int retval;
4624
4625 retval = debug_target.to_stopped_data_address (target, addr);
4626
4627 fprintf_unfiltered (gdb_stdlog,
4628 "target_stopped_data_address ([%s]) = %ld\n",
4629 core_addr_to_string (*addr),
4630 (unsigned long)retval);
4631 return retval;
4632 }
4633
4634 static int
4635 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4636 CORE_ADDR addr,
4637 CORE_ADDR start, int length)
4638 {
4639 int retval;
4640
4641 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4642 start, length);
4643
4644 fprintf_filtered (gdb_stdlog,
4645 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4646 core_addr_to_string (addr), core_addr_to_string (start),
4647 length, retval);
4648 return retval;
4649 }
4650
4651 static int
4652 debug_to_insert_hw_breakpoint (struct target_ops *self,
4653 struct gdbarch *gdbarch,
4654 struct bp_target_info *bp_tgt)
4655 {
4656 int retval;
4657
4658 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4659 gdbarch, bp_tgt);
4660
4661 fprintf_unfiltered (gdb_stdlog,
4662 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4663 core_addr_to_string (bp_tgt->placed_address),
4664 (unsigned long) retval);
4665 return retval;
4666 }
4667
4668 static int
4669 debug_to_remove_hw_breakpoint (struct target_ops *self,
4670 struct gdbarch *gdbarch,
4671 struct bp_target_info *bp_tgt)
4672 {
4673 int retval;
4674
4675 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4676 gdbarch, bp_tgt);
4677
4678 fprintf_unfiltered (gdb_stdlog,
4679 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4680 core_addr_to_string (bp_tgt->placed_address),
4681 (unsigned long) retval);
4682 return retval;
4683 }
4684
4685 static int
4686 debug_to_insert_watchpoint (struct target_ops *self,
4687 CORE_ADDR addr, int len, int type,
4688 struct expression *cond)
4689 {
4690 int retval;
4691
4692 retval = debug_target.to_insert_watchpoint (&debug_target,
4693 addr, len, type, cond);
4694
4695 fprintf_unfiltered (gdb_stdlog,
4696 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4697 core_addr_to_string (addr), len, type,
4698 host_address_to_string (cond), (unsigned long) retval);
4699 return retval;
4700 }
4701
4702 static int
4703 debug_to_remove_watchpoint (struct target_ops *self,
4704 CORE_ADDR addr, int len, int type,
4705 struct expression *cond)
4706 {
4707 int retval;
4708
4709 retval = debug_target.to_remove_watchpoint (&debug_target,
4710 addr, len, type, cond);
4711
4712 fprintf_unfiltered (gdb_stdlog,
4713 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4714 core_addr_to_string (addr), len, type,
4715 host_address_to_string (cond), (unsigned long) retval);
4716 return retval;
4717 }
4718
4719 static void
4720 debug_to_terminal_init (struct target_ops *self)
4721 {
4722 debug_target.to_terminal_init (&debug_target);
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4725 }
4726
4727 static void
4728 debug_to_terminal_inferior (struct target_ops *self)
4729 {
4730 debug_target.to_terminal_inferior (&debug_target);
4731
4732 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4733 }
4734
4735 static void
4736 debug_to_terminal_ours_for_output (struct target_ops *self)
4737 {
4738 debug_target.to_terminal_ours_for_output (&debug_target);
4739
4740 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4741 }
4742
4743 static void
4744 debug_to_terminal_ours (struct target_ops *self)
4745 {
4746 debug_target.to_terminal_ours (&debug_target);
4747
4748 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4749 }
4750
4751 static void
4752 debug_to_terminal_save_ours (struct target_ops *self)
4753 {
4754 debug_target.to_terminal_save_ours (&debug_target);
4755
4756 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4757 }
4758
4759 static void
4760 debug_to_terminal_info (struct target_ops *self,
4761 const char *arg, int from_tty)
4762 {
4763 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4764
4765 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4766 from_tty);
4767 }
4768
4769 static void
4770 debug_to_load (struct target_ops *self, char *args, int from_tty)
4771 {
4772 debug_target.to_load (&debug_target, args, from_tty);
4773
4774 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4775 }
4776
4777 static void
4778 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4779 {
4780 debug_target.to_post_startup_inferior (&debug_target, ptid);
4781
4782 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4783 ptid_get_pid (ptid));
4784 }
4785
4786 static int
4787 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4788 {
4789 int retval;
4790
4791 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4792
4793 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4794 pid, retval);
4795
4796 return retval;
4797 }
4798
4799 static int
4800 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4801 {
4802 int retval;
4803
4804 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4805
4806 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4807 pid, retval);
4808
4809 return retval;
4810 }
4811
4812 static int
4813 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4814 {
4815 int retval;
4816
4817 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4818
4819 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4820 pid, retval);
4821
4822 return retval;
4823 }
4824
4825 static int
4826 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4827 {
4828 int retval;
4829
4830 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4831
4832 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4833 pid, retval);
4834
4835 return retval;
4836 }
4837
4838 static int
4839 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4840 {
4841 int retval;
4842
4843 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4844
4845 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4846 pid, retval);
4847
4848 return retval;
4849 }
4850
4851 static int
4852 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4853 {
4854 int retval;
4855
4856 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4857
4858 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4859 pid, retval);
4860
4861 return retval;
4862 }
4863
4864 static int
4865 debug_to_has_exited (struct target_ops *self,
4866 int pid, int wait_status, int *exit_status)
4867 {
4868 int has_exited;
4869
4870 has_exited = debug_target.to_has_exited (&debug_target,
4871 pid, wait_status, exit_status);
4872
4873 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4874 pid, wait_status, *exit_status, has_exited);
4875
4876 return has_exited;
4877 }
4878
4879 static int
4880 debug_to_can_run (struct target_ops *self)
4881 {
4882 int retval;
4883
4884 retval = debug_target.to_can_run (&debug_target);
4885
4886 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4887
4888 return retval;
4889 }
4890
4891 static struct gdbarch *
4892 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4893 {
4894 struct gdbarch *retval;
4895
4896 retval = debug_target.to_thread_architecture (ops, ptid);
4897
4898 fprintf_unfiltered (gdb_stdlog,
4899 "target_thread_architecture (%s) = %s [%s]\n",
4900 target_pid_to_str (ptid),
4901 host_address_to_string (retval),
4902 gdbarch_bfd_arch_info (retval)->printable_name);
4903 return retval;
4904 }
4905
4906 static void
4907 debug_to_stop (struct target_ops *self, ptid_t ptid)
4908 {
4909 debug_target.to_stop (&debug_target, ptid);
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4912 target_pid_to_str (ptid));
4913 }
4914
4915 static void
4916 debug_to_rcmd (struct target_ops *self, char *command,
4917 struct ui_file *outbuf)
4918 {
4919 debug_target.to_rcmd (&debug_target, command, outbuf);
4920 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4921 }
4922
4923 static char *
4924 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4925 {
4926 char *exec_file;
4927
4928 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4929
4930 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4931 pid, exec_file);
4932
4933 return exec_file;
4934 }
4935
4936 static void
4937 setup_target_debug (void)
4938 {
4939 memcpy (&debug_target, &current_target, sizeof debug_target);
4940
4941 current_target.to_open = debug_to_open;
4942 current_target.to_post_attach = debug_to_post_attach;
4943 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4944 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4945 current_target.to_files_info = debug_to_files_info;
4946 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4947 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4948 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4949 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4950 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4951 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4952 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4953 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4954 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4955 current_target.to_watchpoint_addr_within_range
4956 = debug_to_watchpoint_addr_within_range;
4957 current_target.to_region_ok_for_hw_watchpoint
4958 = debug_to_region_ok_for_hw_watchpoint;
4959 current_target.to_can_accel_watchpoint_condition
4960 = debug_to_can_accel_watchpoint_condition;
4961 current_target.to_terminal_init = debug_to_terminal_init;
4962 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4963 current_target.to_terminal_ours_for_output
4964 = debug_to_terminal_ours_for_output;
4965 current_target.to_terminal_ours = debug_to_terminal_ours;
4966 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4967 current_target.to_terminal_info = debug_to_terminal_info;
4968 current_target.to_load = debug_to_load;
4969 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4970 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4971 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4972 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4973 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4974 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4975 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4976 current_target.to_has_exited = debug_to_has_exited;
4977 current_target.to_can_run = debug_to_can_run;
4978 current_target.to_stop = debug_to_stop;
4979 current_target.to_rcmd = debug_to_rcmd;
4980 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4981 current_target.to_thread_architecture = debug_to_thread_architecture;
4982 }
4983 \f
4984
4985 static char targ_desc[] =
4986 "Names of targets and files being debugged.\nShows the entire \
4987 stack of targets currently in use (including the exec-file,\n\
4988 core-file, and process, if any), as well as the symbol file name.";
4989
4990 static void
4991 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4992 {
4993 error (_("\"monitor\" command not supported by this target."));
4994 }
4995
4996 static void
4997 do_monitor_command (char *cmd,
4998 int from_tty)
4999 {
5000 target_rcmd (cmd, gdb_stdtarg);
5001 }
5002
5003 /* Print the name of each layers of our target stack. */
5004
5005 static void
5006 maintenance_print_target_stack (char *cmd, int from_tty)
5007 {
5008 struct target_ops *t;
5009
5010 printf_filtered (_("The current target stack is:\n"));
5011
5012 for (t = target_stack; t != NULL; t = t->beneath)
5013 {
5014 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5015 }
5016 }
5017
5018 /* Controls if async mode is permitted. */
5019 int target_async_permitted = 0;
5020
5021 /* The set command writes to this variable. If the inferior is
5022 executing, target_async_permitted is *not* updated. */
5023 static int target_async_permitted_1 = 0;
5024
5025 static void
5026 set_target_async_command (char *args, int from_tty,
5027 struct cmd_list_element *c)
5028 {
5029 if (have_live_inferiors ())
5030 {
5031 target_async_permitted_1 = target_async_permitted;
5032 error (_("Cannot change this setting while the inferior is running."));
5033 }
5034
5035 target_async_permitted = target_async_permitted_1;
5036 }
5037
5038 static void
5039 show_target_async_command (struct ui_file *file, int from_tty,
5040 struct cmd_list_element *c,
5041 const char *value)
5042 {
5043 fprintf_filtered (file,
5044 _("Controlling the inferior in "
5045 "asynchronous mode is %s.\n"), value);
5046 }
5047
5048 /* Temporary copies of permission settings. */
5049
5050 static int may_write_registers_1 = 1;
5051 static int may_write_memory_1 = 1;
5052 static int may_insert_breakpoints_1 = 1;
5053 static int may_insert_tracepoints_1 = 1;
5054 static int may_insert_fast_tracepoints_1 = 1;
5055 static int may_stop_1 = 1;
5056
5057 /* Make the user-set values match the real values again. */
5058
5059 void
5060 update_target_permissions (void)
5061 {
5062 may_write_registers_1 = may_write_registers;
5063 may_write_memory_1 = may_write_memory;
5064 may_insert_breakpoints_1 = may_insert_breakpoints;
5065 may_insert_tracepoints_1 = may_insert_tracepoints;
5066 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5067 may_stop_1 = may_stop;
5068 }
5069
5070 /* The one function handles (most of) the permission flags in the same
5071 way. */
5072
5073 static void
5074 set_target_permissions (char *args, int from_tty,
5075 struct cmd_list_element *c)
5076 {
5077 if (target_has_execution)
5078 {
5079 update_target_permissions ();
5080 error (_("Cannot change this setting while the inferior is running."));
5081 }
5082
5083 /* Make the real values match the user-changed values. */
5084 may_write_registers = may_write_registers_1;
5085 may_insert_breakpoints = may_insert_breakpoints_1;
5086 may_insert_tracepoints = may_insert_tracepoints_1;
5087 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5088 may_stop = may_stop_1;
5089 update_observer_mode ();
5090 }
5091
5092 /* Set memory write permission independently of observer mode. */
5093
5094 static void
5095 set_write_memory_permission (char *args, int from_tty,
5096 struct cmd_list_element *c)
5097 {
5098 /* Make the real values match the user-changed values. */
5099 may_write_memory = may_write_memory_1;
5100 update_observer_mode ();
5101 }
5102
5103
5104 void
5105 initialize_targets (void)
5106 {
5107 init_dummy_target ();
5108 push_target (&dummy_target);
5109
5110 add_info ("target", target_info, targ_desc);
5111 add_info ("files", target_info, targ_desc);
5112
5113 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5114 Set target debugging."), _("\
5115 Show target debugging."), _("\
5116 When non-zero, target debugging is enabled. Higher numbers are more\n\
5117 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5118 command."),
5119 NULL,
5120 show_targetdebug,
5121 &setdebuglist, &showdebuglist);
5122
5123 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5124 &trust_readonly, _("\
5125 Set mode for reading from readonly sections."), _("\
5126 Show mode for reading from readonly sections."), _("\
5127 When this mode is on, memory reads from readonly sections (such as .text)\n\
5128 will be read from the object file instead of from the target. This will\n\
5129 result in significant performance improvement for remote targets."),
5130 NULL,
5131 show_trust_readonly,
5132 &setlist, &showlist);
5133
5134 add_com ("monitor", class_obscure, do_monitor_command,
5135 _("Send a command to the remote monitor (remote targets only)."));
5136
5137 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5138 _("Print the name of each layer of the internal target stack."),
5139 &maintenanceprintlist);
5140
5141 add_setshow_boolean_cmd ("target-async", no_class,
5142 &target_async_permitted_1, _("\
5143 Set whether gdb controls the inferior in asynchronous mode."), _("\
5144 Show whether gdb controls the inferior in asynchronous mode."), _("\
5145 Tells gdb whether to control the inferior in asynchronous mode."),
5146 set_target_async_command,
5147 show_target_async_command,
5148 &setlist,
5149 &showlist);
5150
5151 add_setshow_boolean_cmd ("may-write-registers", class_support,
5152 &may_write_registers_1, _("\
5153 Set permission to write into registers."), _("\
5154 Show permission to write into registers."), _("\
5155 When this permission is on, GDB may write into the target's registers.\n\
5156 Otherwise, any sort of write attempt will result in an error."),
5157 set_target_permissions, NULL,
5158 &setlist, &showlist);
5159
5160 add_setshow_boolean_cmd ("may-write-memory", class_support,
5161 &may_write_memory_1, _("\
5162 Set permission to write into target memory."), _("\
5163 Show permission to write into target memory."), _("\
5164 When this permission is on, GDB may write into the target's memory.\n\
5165 Otherwise, any sort of write attempt will result in an error."),
5166 set_write_memory_permission, NULL,
5167 &setlist, &showlist);
5168
5169 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5170 &may_insert_breakpoints_1, _("\
5171 Set permission to insert breakpoints in the target."), _("\
5172 Show permission to insert breakpoints in the target."), _("\
5173 When this permission is on, GDB may insert breakpoints in the program.\n\
5174 Otherwise, any sort of insertion attempt will result in an error."),
5175 set_target_permissions, NULL,
5176 &setlist, &showlist);
5177
5178 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5179 &may_insert_tracepoints_1, _("\
5180 Set permission to insert tracepoints in the target."), _("\
5181 Show permission to insert tracepoints in the target."), _("\
5182 When this permission is on, GDB may insert tracepoints in the program.\n\
5183 Otherwise, any sort of insertion attempt will result in an error."),
5184 set_target_permissions, NULL,
5185 &setlist, &showlist);
5186
5187 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5188 &may_insert_fast_tracepoints_1, _("\
5189 Set permission to insert fast tracepoints in the target."), _("\
5190 Show permission to insert fast tracepoints in the target."), _("\
5191 When this permission is on, GDB may insert fast tracepoints.\n\
5192 Otherwise, any sort of insertion attempt will result in an error."),
5193 set_target_permissions, NULL,
5194 &setlist, &showlist);
5195
5196 add_setshow_boolean_cmd ("may-interrupt", class_support,
5197 &may_stop_1, _("\
5198 Set permission to interrupt or signal the target."), _("\
5199 Show permission to interrupt or signal the target."), _("\
5200 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5201 Otherwise, any attempt to interrupt or stop will be ignored."),
5202 set_target_permissions, NULL,
5203 &setlist, &showlist);
5204 }
This page took 0.128737 seconds and 5 git commands to generate.