Add "volatile" keyword to "struct gdb_exception" declaration
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 static void *return_null (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static target_xfer_partial_ftype current_xfer_partial;
78
79 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
80 ptid_t ptid);
81
82 static void init_dummy_target (void);
83
84 static struct target_ops debug_target;
85
86 static void debug_to_open (char *, int);
87
88 static void debug_to_prepare_to_store (struct target_ops *self,
89 struct regcache *);
90
91 static void debug_to_files_info (struct target_ops *);
92
93 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
94 struct bp_target_info *);
95
96 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
97 struct bp_target_info *);
98
99 static int debug_to_can_use_hw_breakpoint (int, int, int);
100
101 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
105 struct bp_target_info *);
106
107 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
108 struct expression *);
109
110 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
111 struct expression *);
112
113 static int debug_to_stopped_by_watchpoint (void);
114
115 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
116
117 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
118 CORE_ADDR, CORE_ADDR, int);
119
120 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
121
122 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
123 struct expression *);
124
125 static void debug_to_terminal_init (void);
126
127 static void debug_to_terminal_inferior (void);
128
129 static void debug_to_terminal_ours_for_output (void);
130
131 static void debug_to_terminal_save_ours (void);
132
133 static void debug_to_terminal_ours (void);
134
135 static void debug_to_load (char *, int);
136
137 static int debug_to_can_run (void);
138
139 static void debug_to_stop (ptid_t);
140
141 /* Pointer to array of target architecture structures; the size of the
142 array; the current index into the array; the allocated size of the
143 array. */
144 struct target_ops **target_structs;
145 unsigned target_struct_size;
146 unsigned target_struct_allocsize;
147 #define DEFAULT_ALLOCSIZE 10
148
149 /* The initial current target, so that there is always a semi-valid
150 current target. */
151
152 static struct target_ops dummy_target;
153
154 /* Top of target stack. */
155
156 static struct target_ops *target_stack;
157
158 /* The target structure we are currently using to talk to a process
159 or file or whatever "inferior" we have. */
160
161 struct target_ops current_target;
162
163 /* Command list for target. */
164
165 static struct cmd_list_element *targetlist = NULL;
166
167 /* Nonzero if we should trust readonly sections from the
168 executable when reading memory. */
169
170 static int trust_readonly = 0;
171
172 /* Nonzero if we should show true memory content including
173 memory breakpoint inserted by gdb. */
174
175 static int show_memory_breakpoints = 0;
176
177 /* These globals control whether GDB attempts to perform these
178 operations; they are useful for targets that need to prevent
179 inadvertant disruption, such as in non-stop mode. */
180
181 int may_write_registers = 1;
182
183 int may_write_memory = 1;
184
185 int may_insert_breakpoints = 1;
186
187 int may_insert_tracepoints = 1;
188
189 int may_insert_fast_tracepoints = 1;
190
191 int may_stop = 1;
192
193 /* Non-zero if we want to see trace of target level stuff. */
194
195 static unsigned int targetdebug = 0;
196 static void
197 show_targetdebug (struct ui_file *file, int from_tty,
198 struct cmd_list_element *c, const char *value)
199 {
200 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
201 }
202
203 static void setup_target_debug (void);
204
205 /* The user just typed 'target' without the name of a target. */
206
207 static void
208 target_command (char *arg, int from_tty)
209 {
210 fputs_filtered ("Argument required (target name). Try `help target'\n",
211 gdb_stdout);
212 }
213
214 /* Default target_has_* methods for process_stratum targets. */
215
216 int
217 default_child_has_all_memory (struct target_ops *ops)
218 {
219 /* If no inferior selected, then we can't read memory here. */
220 if (ptid_equal (inferior_ptid, null_ptid))
221 return 0;
222
223 return 1;
224 }
225
226 int
227 default_child_has_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_stack (struct target_ops *ops)
238 {
239 /* If no inferior selected, there's no stack. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_registers (struct target_ops *ops)
248 {
249 /* Can't read registers from no inferior. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
258 {
259 /* If there's no thread selected, then we can't make it run through
260 hoops. */
261 if (ptid_equal (the_ptid, null_ptid))
262 return 0;
263
264 return 1;
265 }
266
267
268 int
269 target_has_all_memory_1 (void)
270 {
271 struct target_ops *t;
272
273 for (t = current_target.beneath; t != NULL; t = t->beneath)
274 if (t->to_has_all_memory (t))
275 return 1;
276
277 return 0;
278 }
279
280 int
281 target_has_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_stack_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_stack (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_registers_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_registers (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_execution_1 (ptid_t the_ptid)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_execution (t, the_ptid))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_current (void)
330 {
331 return target_has_execution_1 (inferior_ptid);
332 }
333
334 /* Complete initialization of T. This ensures that various fields in
335 T are set, if needed by the target implementation. */
336
337 void
338 complete_target_initialization (struct target_ops *t)
339 {
340 /* Provide default values for all "must have" methods. */
341 if (t->to_xfer_partial == NULL)
342 t->to_xfer_partial = default_xfer_partial;
343
344 if (t->to_has_all_memory == NULL)
345 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
346
347 if (t->to_has_memory == NULL)
348 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
349
350 if (t->to_has_stack == NULL)
351 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
352
353 if (t->to_has_registers == NULL)
354 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
355
356 if (t->to_has_execution == NULL)
357 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
358 }
359
360 /* Add possible target architecture T to the list and add a new
361 command 'target T->to_shortname'. Set COMPLETER as the command's
362 completer if not NULL. */
363
364 void
365 add_target_with_completer (struct target_ops *t,
366 completer_ftype *completer)
367 {
368 struct cmd_list_element *c;
369
370 complete_target_initialization (t);
371
372 if (!target_structs)
373 {
374 target_struct_allocsize = DEFAULT_ALLOCSIZE;
375 target_structs = (struct target_ops **) xmalloc
376 (target_struct_allocsize * sizeof (*target_structs));
377 }
378 if (target_struct_size >= target_struct_allocsize)
379 {
380 target_struct_allocsize *= 2;
381 target_structs = (struct target_ops **)
382 xrealloc ((char *) target_structs,
383 target_struct_allocsize * sizeof (*target_structs));
384 }
385 target_structs[target_struct_size++] = t;
386
387 if (targetlist == NULL)
388 add_prefix_cmd ("target", class_run, target_command, _("\
389 Connect to a target machine or process.\n\
390 The first argument is the type or protocol of the target machine.\n\
391 Remaining arguments are interpreted by the target protocol. For more\n\
392 information on the arguments for a particular protocol, type\n\
393 `help target ' followed by the protocol name."),
394 &targetlist, "target ", 0, &cmdlist);
395 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
396 &targetlist);
397 if (completer != NULL)
398 set_cmd_completer (c, completer);
399 }
400
401 /* Add a possible target architecture to the list. */
402
403 void
404 add_target (struct target_ops *t)
405 {
406 add_target_with_completer (t, NULL);
407 }
408
409 /* See target.h. */
410
411 void
412 add_deprecated_target_alias (struct target_ops *t, char *alias)
413 {
414 struct cmd_list_element *c;
415 char *alt;
416
417 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
418 see PR cli/15104. */
419 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
420 alt = xstrprintf ("target %s", t->to_shortname);
421 deprecate_cmd (c, alt);
422 }
423
424 /* Stub functions */
425
426 void
427 target_ignore (void)
428 {
429 }
430
431 void
432 target_kill (void)
433 {
434 struct target_ops *t;
435
436 for (t = current_target.beneath; t != NULL; t = t->beneath)
437 if (t->to_kill != NULL)
438 {
439 if (targetdebug)
440 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
441
442 t->to_kill (t);
443 return;
444 }
445
446 noprocess ();
447 }
448
449 void
450 target_load (char *arg, int from_tty)
451 {
452 target_dcache_invalidate ();
453 (*current_target.to_load) (arg, from_tty);
454 }
455
456 void
457 target_create_inferior (char *exec_file, char *args,
458 char **env, int from_tty)
459 {
460 struct target_ops *t;
461
462 for (t = current_target.beneath; t != NULL; t = t->beneath)
463 {
464 if (t->to_create_inferior != NULL)
465 {
466 t->to_create_inferior (t, exec_file, args, env, from_tty);
467 if (targetdebug)
468 fprintf_unfiltered (gdb_stdlog,
469 "target_create_inferior (%s, %s, xxx, %d)\n",
470 exec_file, args, from_tty);
471 return;
472 }
473 }
474
475 internal_error (__FILE__, __LINE__,
476 _("could not find a target to create inferior"));
477 }
478
479 void
480 target_terminal_inferior (void)
481 {
482 /* A background resume (``run&'') should leave GDB in control of the
483 terminal. Use target_can_async_p, not target_is_async_p, since at
484 this point the target is not async yet. However, if sync_execution
485 is not set, we know it will become async prior to resume. */
486 if (target_can_async_p () && !sync_execution)
487 return;
488
489 /* If GDB is resuming the inferior in the foreground, install
490 inferior's terminal modes. */
491 (*current_target.to_terminal_inferior) ();
492 }
493
494 static int
495 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
496 struct target_ops *t)
497 {
498 errno = EIO; /* Can't read/write this location. */
499 return 0; /* No bytes handled. */
500 }
501
502 static void
503 tcomplain (void)
504 {
505 error (_("You can't do that when your target is `%s'"),
506 current_target.to_shortname);
507 }
508
509 void
510 noprocess (void)
511 {
512 error (_("You can't do that without a process to debug."));
513 }
514
515 static void
516 default_terminal_info (const char *args, int from_tty)
517 {
518 printf_unfiltered (_("No saved terminal information.\n"));
519 }
520
521 /* A default implementation for the to_get_ada_task_ptid target method.
522
523 This function builds the PTID by using both LWP and TID as part of
524 the PTID lwp and tid elements. The pid used is the pid of the
525 inferior_ptid. */
526
527 static ptid_t
528 default_get_ada_task_ptid (long lwp, long tid)
529 {
530 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
531 }
532
533 static enum exec_direction_kind
534 default_execution_direction (void)
535 {
536 if (!target_can_execute_reverse)
537 return EXEC_FORWARD;
538 else if (!target_can_async_p ())
539 return EXEC_FORWARD;
540 else
541 gdb_assert_not_reached ("\
542 to_execution_direction must be implemented for reverse async");
543 }
544
545 /* Go through the target stack from top to bottom, copying over zero
546 entries in current_target, then filling in still empty entries. In
547 effect, we are doing class inheritance through the pushed target
548 vectors.
549
550 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
551 is currently implemented, is that it discards any knowledge of
552 which target an inherited method originally belonged to.
553 Consequently, new new target methods should instead explicitly and
554 locally search the target stack for the target that can handle the
555 request. */
556
557 static void
558 update_current_target (void)
559 {
560 struct target_ops *t;
561
562 /* First, reset current's contents. */
563 memset (&current_target, 0, sizeof (current_target));
564
565 #define INHERIT(FIELD, TARGET) \
566 if (!current_target.FIELD) \
567 current_target.FIELD = (TARGET)->FIELD
568
569 for (t = target_stack; t; t = t->beneath)
570 {
571 INHERIT (to_shortname, t);
572 INHERIT (to_longname, t);
573 INHERIT (to_doc, t);
574 /* Do not inherit to_open. */
575 /* Do not inherit to_close. */
576 /* Do not inherit to_attach. */
577 INHERIT (to_post_attach, t);
578 INHERIT (to_attach_no_wait, t);
579 /* Do not inherit to_detach. */
580 /* Do not inherit to_disconnect. */
581 /* Do not inherit to_resume. */
582 /* Do not inherit to_wait. */
583 /* Do not inherit to_fetch_registers. */
584 /* Do not inherit to_store_registers. */
585 INHERIT (to_prepare_to_store, t);
586 INHERIT (deprecated_xfer_memory, t);
587 INHERIT (to_files_info, t);
588 /* Do not inherit to_insert_breakpoint. */
589 /* Do not inherit to_remove_breakpoint. */
590 INHERIT (to_can_use_hw_breakpoint, t);
591 INHERIT (to_insert_hw_breakpoint, t);
592 INHERIT (to_remove_hw_breakpoint, t);
593 /* Do not inherit to_ranged_break_num_registers. */
594 INHERIT (to_insert_watchpoint, t);
595 INHERIT (to_remove_watchpoint, t);
596 /* Do not inherit to_insert_mask_watchpoint. */
597 /* Do not inherit to_remove_mask_watchpoint. */
598 INHERIT (to_stopped_data_address, t);
599 INHERIT (to_have_steppable_watchpoint, t);
600 INHERIT (to_have_continuable_watchpoint, t);
601 INHERIT (to_stopped_by_watchpoint, t);
602 INHERIT (to_watchpoint_addr_within_range, t);
603 INHERIT (to_region_ok_for_hw_watchpoint, t);
604 INHERIT (to_can_accel_watchpoint_condition, t);
605 /* Do not inherit to_masked_watch_num_registers. */
606 INHERIT (to_terminal_init, t);
607 INHERIT (to_terminal_inferior, t);
608 INHERIT (to_terminal_ours_for_output, t);
609 INHERIT (to_terminal_ours, t);
610 INHERIT (to_terminal_save_ours, t);
611 INHERIT (to_terminal_info, t);
612 /* Do not inherit to_kill. */
613 INHERIT (to_load, t);
614 /* Do no inherit to_create_inferior. */
615 INHERIT (to_post_startup_inferior, t);
616 INHERIT (to_insert_fork_catchpoint, t);
617 INHERIT (to_remove_fork_catchpoint, t);
618 INHERIT (to_insert_vfork_catchpoint, t);
619 INHERIT (to_remove_vfork_catchpoint, t);
620 /* Do not inherit to_follow_fork. */
621 INHERIT (to_insert_exec_catchpoint, t);
622 INHERIT (to_remove_exec_catchpoint, t);
623 INHERIT (to_set_syscall_catchpoint, t);
624 INHERIT (to_has_exited, t);
625 /* Do not inherit to_mourn_inferior. */
626 INHERIT (to_can_run, t);
627 /* Do not inherit to_pass_signals. */
628 /* Do not inherit to_program_signals. */
629 /* Do not inherit to_thread_alive. */
630 /* Do not inherit to_find_new_threads. */
631 /* Do not inherit to_pid_to_str. */
632 INHERIT (to_extra_thread_info, t);
633 INHERIT (to_thread_name, t);
634 INHERIT (to_stop, t);
635 /* Do not inherit to_xfer_partial. */
636 INHERIT (to_rcmd, t);
637 INHERIT (to_pid_to_exec_file, t);
638 INHERIT (to_log_command, t);
639 INHERIT (to_stratum, t);
640 /* Do not inherit to_has_all_memory. */
641 /* Do not inherit to_has_memory. */
642 /* Do not inherit to_has_stack. */
643 /* Do not inherit to_has_registers. */
644 /* Do not inherit to_has_execution. */
645 INHERIT (to_has_thread_control, t);
646 INHERIT (to_can_async_p, t);
647 INHERIT (to_is_async_p, t);
648 INHERIT (to_async, t);
649 INHERIT (to_find_memory_regions, t);
650 INHERIT (to_make_corefile_notes, t);
651 INHERIT (to_get_bookmark, t);
652 INHERIT (to_goto_bookmark, t);
653 /* Do not inherit to_get_thread_local_address. */
654 INHERIT (to_can_execute_reverse, t);
655 INHERIT (to_execution_direction, t);
656 INHERIT (to_thread_architecture, t);
657 /* Do not inherit to_read_description. */
658 INHERIT (to_get_ada_task_ptid, t);
659 /* Do not inherit to_search_memory. */
660 INHERIT (to_supports_multi_process, t);
661 INHERIT (to_supports_enable_disable_tracepoint, t);
662 INHERIT (to_supports_string_tracing, t);
663 INHERIT (to_trace_init, t);
664 INHERIT (to_download_tracepoint, t);
665 INHERIT (to_can_download_tracepoint, t);
666 INHERIT (to_download_trace_state_variable, t);
667 INHERIT (to_enable_tracepoint, t);
668 INHERIT (to_disable_tracepoint, t);
669 INHERIT (to_trace_set_readonly_regions, t);
670 INHERIT (to_trace_start, t);
671 INHERIT (to_get_trace_status, t);
672 INHERIT (to_get_tracepoint_status, t);
673 INHERIT (to_trace_stop, t);
674 INHERIT (to_trace_find, t);
675 INHERIT (to_get_trace_state_variable_value, t);
676 INHERIT (to_save_trace_data, t);
677 INHERIT (to_upload_tracepoints, t);
678 INHERIT (to_upload_trace_state_variables, t);
679 INHERIT (to_get_raw_trace_data, t);
680 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
681 INHERIT (to_set_disconnected_tracing, t);
682 INHERIT (to_set_circular_trace_buffer, t);
683 INHERIT (to_set_trace_buffer_size, t);
684 INHERIT (to_set_trace_notes, t);
685 INHERIT (to_get_tib_address, t);
686 INHERIT (to_set_permissions, t);
687 INHERIT (to_static_tracepoint_marker_at, t);
688 INHERIT (to_static_tracepoint_markers_by_strid, t);
689 INHERIT (to_traceframe_info, t);
690 INHERIT (to_use_agent, t);
691 INHERIT (to_can_use_agent, t);
692 INHERIT (to_augmented_libraries_svr4_read, t);
693 INHERIT (to_magic, t);
694 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
695 INHERIT (to_can_run_breakpoint_commands, t);
696 /* Do not inherit to_memory_map. */
697 /* Do not inherit to_flash_erase. */
698 /* Do not inherit to_flash_done. */
699 }
700 #undef INHERIT
701
702 /* Clean up a target struct so it no longer has any zero pointers in
703 it. Some entries are defaulted to a method that print an error,
704 others are hard-wired to a standard recursive default. */
705
706 #define de_fault(field, value) \
707 if (!current_target.field) \
708 current_target.field = value
709
710 de_fault (to_open,
711 (void (*) (char *, int))
712 tcomplain);
713 de_fault (to_close,
714 (void (*) (void))
715 target_ignore);
716 de_fault (to_post_attach,
717 (void (*) (int))
718 target_ignore);
719 de_fault (to_prepare_to_store,
720 (void (*) (struct target_ops *, struct regcache *))
721 noprocess);
722 de_fault (deprecated_xfer_memory,
723 (int (*) (CORE_ADDR, gdb_byte *, int, int,
724 struct mem_attrib *, struct target_ops *))
725 nomemory);
726 de_fault (to_files_info,
727 (void (*) (struct target_ops *))
728 target_ignore);
729 de_fault (to_can_use_hw_breakpoint,
730 (int (*) (int, int, int))
731 return_zero);
732 de_fault (to_insert_hw_breakpoint,
733 (int (*) (struct gdbarch *, struct bp_target_info *))
734 return_minus_one);
735 de_fault (to_remove_hw_breakpoint,
736 (int (*) (struct gdbarch *, struct bp_target_info *))
737 return_minus_one);
738 de_fault (to_insert_watchpoint,
739 (int (*) (CORE_ADDR, int, int, struct expression *))
740 return_minus_one);
741 de_fault (to_remove_watchpoint,
742 (int (*) (CORE_ADDR, int, int, struct expression *))
743 return_minus_one);
744 de_fault (to_stopped_by_watchpoint,
745 (int (*) (void))
746 return_zero);
747 de_fault (to_stopped_data_address,
748 (int (*) (struct target_ops *, CORE_ADDR *))
749 return_zero);
750 de_fault (to_watchpoint_addr_within_range,
751 default_watchpoint_addr_within_range);
752 de_fault (to_region_ok_for_hw_watchpoint,
753 default_region_ok_for_hw_watchpoint);
754 de_fault (to_can_accel_watchpoint_condition,
755 (int (*) (CORE_ADDR, int, int, struct expression *))
756 return_zero);
757 de_fault (to_terminal_init,
758 (void (*) (void))
759 target_ignore);
760 de_fault (to_terminal_inferior,
761 (void (*) (void))
762 target_ignore);
763 de_fault (to_terminal_ours_for_output,
764 (void (*) (void))
765 target_ignore);
766 de_fault (to_terminal_ours,
767 (void (*) (void))
768 target_ignore);
769 de_fault (to_terminal_save_ours,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_info,
773 default_terminal_info);
774 de_fault (to_load,
775 (void (*) (char *, int))
776 tcomplain);
777 de_fault (to_post_startup_inferior,
778 (void (*) (ptid_t))
779 target_ignore);
780 de_fault (to_insert_fork_catchpoint,
781 (int (*) (int))
782 return_one);
783 de_fault (to_remove_fork_catchpoint,
784 (int (*) (int))
785 return_one);
786 de_fault (to_insert_vfork_catchpoint,
787 (int (*) (int))
788 return_one);
789 de_fault (to_remove_vfork_catchpoint,
790 (int (*) (int))
791 return_one);
792 de_fault (to_insert_exec_catchpoint,
793 (int (*) (int))
794 return_one);
795 de_fault (to_remove_exec_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_set_syscall_catchpoint,
799 (int (*) (int, int, int, int, int *))
800 return_one);
801 de_fault (to_has_exited,
802 (int (*) (int, int, int *))
803 return_zero);
804 de_fault (to_can_run,
805 return_zero);
806 de_fault (to_extra_thread_info,
807 (char *(*) (struct thread_info *))
808 return_null);
809 de_fault (to_thread_name,
810 (char *(*) (struct thread_info *))
811 return_null);
812 de_fault (to_stop,
813 (void (*) (ptid_t))
814 target_ignore);
815 current_target.to_xfer_partial = current_xfer_partial;
816 de_fault (to_rcmd,
817 (void (*) (char *, struct ui_file *))
818 tcomplain);
819 de_fault (to_pid_to_exec_file,
820 (char *(*) (int))
821 return_null);
822 de_fault (to_async,
823 (void (*) (void (*) (enum inferior_event_type, void*), void*))
824 tcomplain);
825 de_fault (to_thread_architecture,
826 default_thread_architecture);
827 current_target.to_read_description = NULL;
828 de_fault (to_get_ada_task_ptid,
829 (ptid_t (*) (long, long))
830 default_get_ada_task_ptid);
831 de_fault (to_supports_multi_process,
832 (int (*) (void))
833 return_zero);
834 de_fault (to_supports_enable_disable_tracepoint,
835 (int (*) (void))
836 return_zero);
837 de_fault (to_supports_string_tracing,
838 (int (*) (void))
839 return_zero);
840 de_fault (to_trace_init,
841 (void (*) (void))
842 tcomplain);
843 de_fault (to_download_tracepoint,
844 (void (*) (struct bp_location *))
845 tcomplain);
846 de_fault (to_can_download_tracepoint,
847 (int (*) (void))
848 return_zero);
849 de_fault (to_download_trace_state_variable,
850 (void (*) (struct trace_state_variable *))
851 tcomplain);
852 de_fault (to_enable_tracepoint,
853 (void (*) (struct bp_location *))
854 tcomplain);
855 de_fault (to_disable_tracepoint,
856 (void (*) (struct bp_location *))
857 tcomplain);
858 de_fault (to_trace_set_readonly_regions,
859 (void (*) (void))
860 tcomplain);
861 de_fault (to_trace_start,
862 (void (*) (void))
863 tcomplain);
864 de_fault (to_get_trace_status,
865 (int (*) (struct trace_status *))
866 return_minus_one);
867 de_fault (to_get_tracepoint_status,
868 (void (*) (struct breakpoint *, struct uploaded_tp *))
869 tcomplain);
870 de_fault (to_trace_stop,
871 (void (*) (void))
872 tcomplain);
873 de_fault (to_trace_find,
874 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
875 return_minus_one);
876 de_fault (to_get_trace_state_variable_value,
877 (int (*) (int, LONGEST *))
878 return_zero);
879 de_fault (to_save_trace_data,
880 (int (*) (const char *))
881 tcomplain);
882 de_fault (to_upload_tracepoints,
883 (int (*) (struct uploaded_tp **))
884 return_zero);
885 de_fault (to_upload_trace_state_variables,
886 (int (*) (struct uploaded_tsv **))
887 return_zero);
888 de_fault (to_get_raw_trace_data,
889 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
890 tcomplain);
891 de_fault (to_get_min_fast_tracepoint_insn_len,
892 (int (*) (void))
893 return_minus_one);
894 de_fault (to_set_disconnected_tracing,
895 (void (*) (int))
896 target_ignore);
897 de_fault (to_set_circular_trace_buffer,
898 (void (*) (int))
899 target_ignore);
900 de_fault (to_set_trace_buffer_size,
901 (void (*) (LONGEST))
902 target_ignore);
903 de_fault (to_set_trace_notes,
904 (int (*) (const char *, const char *, const char *))
905 return_zero);
906 de_fault (to_get_tib_address,
907 (int (*) (ptid_t, CORE_ADDR *))
908 tcomplain);
909 de_fault (to_set_permissions,
910 (void (*) (void))
911 target_ignore);
912 de_fault (to_static_tracepoint_marker_at,
913 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
914 return_zero);
915 de_fault (to_static_tracepoint_markers_by_strid,
916 (VEC(static_tracepoint_marker_p) * (*) (const char *))
917 tcomplain);
918 de_fault (to_traceframe_info,
919 (struct traceframe_info * (*) (void))
920 return_null);
921 de_fault (to_supports_evaluation_of_breakpoint_conditions,
922 (int (*) (void))
923 return_zero);
924 de_fault (to_can_run_breakpoint_commands,
925 (int (*) (void))
926 return_zero);
927 de_fault (to_use_agent,
928 (int (*) (int))
929 tcomplain);
930 de_fault (to_can_use_agent,
931 (int (*) (void))
932 return_zero);
933 de_fault (to_augmented_libraries_svr4_read,
934 (int (*) (void))
935 return_zero);
936 de_fault (to_execution_direction, default_execution_direction);
937
938 #undef de_fault
939
940 /* Finally, position the target-stack beneath the squashed
941 "current_target". That way code looking for a non-inherited
942 target method can quickly and simply find it. */
943 current_target.beneath = target_stack;
944
945 if (targetdebug)
946 setup_target_debug ();
947 }
948
949 /* Push a new target type into the stack of the existing target accessors,
950 possibly superseding some of the existing accessors.
951
952 Rather than allow an empty stack, we always have the dummy target at
953 the bottom stratum, so we can call the function vectors without
954 checking them. */
955
956 void
957 push_target (struct target_ops *t)
958 {
959 struct target_ops **cur;
960
961 /* Check magic number. If wrong, it probably means someone changed
962 the struct definition, but not all the places that initialize one. */
963 if (t->to_magic != OPS_MAGIC)
964 {
965 fprintf_unfiltered (gdb_stderr,
966 "Magic number of %s target struct wrong\n",
967 t->to_shortname);
968 internal_error (__FILE__, __LINE__,
969 _("failed internal consistency check"));
970 }
971
972 /* Find the proper stratum to install this target in. */
973 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
974 {
975 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
976 break;
977 }
978
979 /* If there's already targets at this stratum, remove them. */
980 /* FIXME: cagney/2003-10-15: I think this should be popping all
981 targets to CUR, and not just those at this stratum level. */
982 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
983 {
984 /* There's already something at this stratum level. Close it,
985 and un-hook it from the stack. */
986 struct target_ops *tmp = (*cur);
987
988 (*cur) = (*cur)->beneath;
989 tmp->beneath = NULL;
990 target_close (tmp);
991 }
992
993 /* We have removed all targets in our stratum, now add the new one. */
994 t->beneath = (*cur);
995 (*cur) = t;
996
997 update_current_target ();
998 }
999
1000 /* Remove a target_ops vector from the stack, wherever it may be.
1001 Return how many times it was removed (0 or 1). */
1002
1003 int
1004 unpush_target (struct target_ops *t)
1005 {
1006 struct target_ops **cur;
1007 struct target_ops *tmp;
1008
1009 if (t->to_stratum == dummy_stratum)
1010 internal_error (__FILE__, __LINE__,
1011 _("Attempt to unpush the dummy target"));
1012
1013 /* Look for the specified target. Note that we assume that a target
1014 can only occur once in the target stack. */
1015
1016 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1017 {
1018 if ((*cur) == t)
1019 break;
1020 }
1021
1022 /* If we don't find target_ops, quit. Only open targets should be
1023 closed. */
1024 if ((*cur) == NULL)
1025 return 0;
1026
1027 /* Unchain the target. */
1028 tmp = (*cur);
1029 (*cur) = (*cur)->beneath;
1030 tmp->beneath = NULL;
1031
1032 update_current_target ();
1033
1034 /* Finally close the target. Note we do this after unchaining, so
1035 any target method calls from within the target_close
1036 implementation don't end up in T anymore. */
1037 target_close (t);
1038
1039 return 1;
1040 }
1041
1042 void
1043 pop_all_targets_above (enum strata above_stratum)
1044 {
1045 while ((int) (current_target.to_stratum) > (int) above_stratum)
1046 {
1047 if (!unpush_target (target_stack))
1048 {
1049 fprintf_unfiltered (gdb_stderr,
1050 "pop_all_targets couldn't find target %s\n",
1051 target_stack->to_shortname);
1052 internal_error (__FILE__, __LINE__,
1053 _("failed internal consistency check"));
1054 break;
1055 }
1056 }
1057 }
1058
1059 void
1060 pop_all_targets (void)
1061 {
1062 pop_all_targets_above (dummy_stratum);
1063 }
1064
1065 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1066
1067 int
1068 target_is_pushed (struct target_ops *t)
1069 {
1070 struct target_ops **cur;
1071
1072 /* Check magic number. If wrong, it probably means someone changed
1073 the struct definition, but not all the places that initialize one. */
1074 if (t->to_magic != OPS_MAGIC)
1075 {
1076 fprintf_unfiltered (gdb_stderr,
1077 "Magic number of %s target struct wrong\n",
1078 t->to_shortname);
1079 internal_error (__FILE__, __LINE__,
1080 _("failed internal consistency check"));
1081 }
1082
1083 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1084 if (*cur == t)
1085 return 1;
1086
1087 return 0;
1088 }
1089
1090 /* Using the objfile specified in OBJFILE, find the address for the
1091 current thread's thread-local storage with offset OFFSET. */
1092 CORE_ADDR
1093 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1094 {
1095 volatile CORE_ADDR addr = 0;
1096 struct target_ops *target;
1097
1098 for (target = current_target.beneath;
1099 target != NULL;
1100 target = target->beneath)
1101 {
1102 if (target->to_get_thread_local_address != NULL)
1103 break;
1104 }
1105
1106 if (target != NULL
1107 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1108 {
1109 ptid_t ptid = inferior_ptid;
1110 volatile struct gdb_exception ex;
1111
1112 TRY_CATCH (ex, RETURN_MASK_ALL)
1113 {
1114 CORE_ADDR lm_addr;
1115
1116 /* Fetch the load module address for this objfile. */
1117 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1118 objfile);
1119 /* If it's 0, throw the appropriate exception. */
1120 if (lm_addr == 0)
1121 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1122 _("TLS load module not found"));
1123
1124 addr = target->to_get_thread_local_address (target, ptid,
1125 lm_addr, offset);
1126 }
1127 /* If an error occurred, print TLS related messages here. Otherwise,
1128 throw the error to some higher catcher. */
1129 if (ex.reason < 0)
1130 {
1131 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1132
1133 switch (ex.error)
1134 {
1135 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1136 error (_("Cannot find thread-local variables "
1137 "in this thread library."));
1138 break;
1139 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1140 if (objfile_is_library)
1141 error (_("Cannot find shared library `%s' in dynamic"
1142 " linker's load module list"), objfile_name (objfile));
1143 else
1144 error (_("Cannot find executable file `%s' in dynamic"
1145 " linker's load module list"), objfile_name (objfile));
1146 break;
1147 case TLS_NOT_ALLOCATED_YET_ERROR:
1148 if (objfile_is_library)
1149 error (_("The inferior has not yet allocated storage for"
1150 " thread-local variables in\n"
1151 "the shared library `%s'\n"
1152 "for %s"),
1153 objfile_name (objfile), target_pid_to_str (ptid));
1154 else
1155 error (_("The inferior has not yet allocated storage for"
1156 " thread-local variables in\n"
1157 "the executable `%s'\n"
1158 "for %s"),
1159 objfile_name (objfile), target_pid_to_str (ptid));
1160 break;
1161 case TLS_GENERIC_ERROR:
1162 if (objfile_is_library)
1163 error (_("Cannot find thread-local storage for %s, "
1164 "shared library %s:\n%s"),
1165 target_pid_to_str (ptid),
1166 objfile_name (objfile), ex.message);
1167 else
1168 error (_("Cannot find thread-local storage for %s, "
1169 "executable file %s:\n%s"),
1170 target_pid_to_str (ptid),
1171 objfile_name (objfile), ex.message);
1172 break;
1173 default:
1174 throw_exception (ex);
1175 break;
1176 }
1177 }
1178 }
1179 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1180 TLS is an ABI-specific thing. But we don't do that yet. */
1181 else
1182 error (_("Cannot find thread-local variables on this target"));
1183
1184 return addr;
1185 }
1186
1187 const char *
1188 target_xfer_error_to_string (enum target_xfer_error err)
1189 {
1190 #define CASE(X) case X: return #X
1191 switch (err)
1192 {
1193 CASE(TARGET_XFER_E_IO);
1194 CASE(TARGET_XFER_E_UNAVAILABLE);
1195 default:
1196 return "<unknown>";
1197 }
1198 #undef CASE
1199 };
1200
1201
1202 #undef MIN
1203 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1204
1205 /* target_read_string -- read a null terminated string, up to LEN bytes,
1206 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1207 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1208 is responsible for freeing it. Return the number of bytes successfully
1209 read. */
1210
1211 int
1212 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1213 {
1214 int tlen, offset, i;
1215 gdb_byte buf[4];
1216 int errcode = 0;
1217 char *buffer;
1218 int buffer_allocated;
1219 char *bufptr;
1220 unsigned int nbytes_read = 0;
1221
1222 gdb_assert (string);
1223
1224 /* Small for testing. */
1225 buffer_allocated = 4;
1226 buffer = xmalloc (buffer_allocated);
1227 bufptr = buffer;
1228
1229 while (len > 0)
1230 {
1231 tlen = MIN (len, 4 - (memaddr & 3));
1232 offset = memaddr & 3;
1233
1234 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1235 if (errcode != 0)
1236 {
1237 /* The transfer request might have crossed the boundary to an
1238 unallocated region of memory. Retry the transfer, requesting
1239 a single byte. */
1240 tlen = 1;
1241 offset = 0;
1242 errcode = target_read_memory (memaddr, buf, 1);
1243 if (errcode != 0)
1244 goto done;
1245 }
1246
1247 if (bufptr - buffer + tlen > buffer_allocated)
1248 {
1249 unsigned int bytes;
1250
1251 bytes = bufptr - buffer;
1252 buffer_allocated *= 2;
1253 buffer = xrealloc (buffer, buffer_allocated);
1254 bufptr = buffer + bytes;
1255 }
1256
1257 for (i = 0; i < tlen; i++)
1258 {
1259 *bufptr++ = buf[i + offset];
1260 if (buf[i + offset] == '\000')
1261 {
1262 nbytes_read += i + 1;
1263 goto done;
1264 }
1265 }
1266
1267 memaddr += tlen;
1268 len -= tlen;
1269 nbytes_read += tlen;
1270 }
1271 done:
1272 *string = buffer;
1273 if (errnop != NULL)
1274 *errnop = errcode;
1275 return nbytes_read;
1276 }
1277
1278 struct target_section_table *
1279 target_get_section_table (struct target_ops *target)
1280 {
1281 struct target_ops *t;
1282
1283 if (targetdebug)
1284 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1285
1286 for (t = target; t != NULL; t = t->beneath)
1287 if (t->to_get_section_table != NULL)
1288 return (*t->to_get_section_table) (t);
1289
1290 return NULL;
1291 }
1292
1293 /* Find a section containing ADDR. */
1294
1295 struct target_section *
1296 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1297 {
1298 struct target_section_table *table = target_get_section_table (target);
1299 struct target_section *secp;
1300
1301 if (table == NULL)
1302 return NULL;
1303
1304 for (secp = table->sections; secp < table->sections_end; secp++)
1305 {
1306 if (addr >= secp->addr && addr < secp->endaddr)
1307 return secp;
1308 }
1309 return NULL;
1310 }
1311
1312 /* Read memory from the live target, even if currently inspecting a
1313 traceframe. The return is the same as that of target_read. */
1314
1315 static LONGEST
1316 target_read_live_memory (enum target_object object,
1317 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len)
1318 {
1319 LONGEST ret;
1320 struct cleanup *cleanup;
1321
1322 /* Switch momentarily out of tfind mode so to access live memory.
1323 Note that this must not clear global state, such as the frame
1324 cache, which must still remain valid for the previous traceframe.
1325 We may be _building_ the frame cache at this point. */
1326 cleanup = make_cleanup_restore_traceframe_number ();
1327 set_traceframe_number (-1);
1328
1329 ret = target_read (current_target.beneath, object, NULL,
1330 myaddr, memaddr, len);
1331
1332 do_cleanups (cleanup);
1333 return ret;
1334 }
1335
1336 /* Using the set of read-only target sections of OPS, read live
1337 read-only memory. Note that the actual reads start from the
1338 top-most target again.
1339
1340 For interface/parameters/return description see target.h,
1341 to_xfer_partial. */
1342
1343 static LONGEST
1344 memory_xfer_live_readonly_partial (struct target_ops *ops,
1345 enum target_object object,
1346 gdb_byte *readbuf, ULONGEST memaddr,
1347 ULONGEST len)
1348 {
1349 struct target_section *secp;
1350 struct target_section_table *table;
1351
1352 secp = target_section_by_addr (ops, memaddr);
1353 if (secp != NULL
1354 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1355 secp->the_bfd_section)
1356 & SEC_READONLY))
1357 {
1358 struct target_section *p;
1359 ULONGEST memend = memaddr + len;
1360
1361 table = target_get_section_table (ops);
1362
1363 for (p = table->sections; p < table->sections_end; p++)
1364 {
1365 if (memaddr >= p->addr)
1366 {
1367 if (memend <= p->endaddr)
1368 {
1369 /* Entire transfer is within this section. */
1370 return target_read_live_memory (object, memaddr,
1371 readbuf, len);
1372 }
1373 else if (memaddr >= p->endaddr)
1374 {
1375 /* This section ends before the transfer starts. */
1376 continue;
1377 }
1378 else
1379 {
1380 /* This section overlaps the transfer. Just do half. */
1381 len = p->endaddr - memaddr;
1382 return target_read_live_memory (object, memaddr,
1383 readbuf, len);
1384 }
1385 }
1386 }
1387 }
1388
1389 return 0;
1390 }
1391
1392 /* Read memory from more than one valid target. A core file, for
1393 instance, could have some of memory but delegate other bits to
1394 the target below it. So, we must manually try all targets. */
1395
1396 static LONGEST
1397 raw_memory_xfer_partial (struct target_ops *ops, void *readbuf,
1398 const void *writebuf, ULONGEST memaddr, LONGEST len)
1399 {
1400 LONGEST res;
1401
1402 do
1403 {
1404 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1405 readbuf, writebuf, memaddr, len);
1406 if (res > 0)
1407 break;
1408
1409 /* Stop if the target reports that the memory is not available. */
1410 if (res == TARGET_XFER_E_UNAVAILABLE)
1411 break;
1412
1413 /* We want to continue past core files to executables, but not
1414 past a running target's memory. */
1415 if (ops->to_has_all_memory (ops))
1416 break;
1417
1418 ops = ops->beneath;
1419 }
1420 while (ops != NULL);
1421
1422 return res;
1423 }
1424
1425 /* Perform a partial memory transfer.
1426 For docs see target.h, to_xfer_partial. */
1427
1428 static LONGEST
1429 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1430 void *readbuf, const void *writebuf, ULONGEST memaddr,
1431 ULONGEST len)
1432 {
1433 LONGEST res;
1434 int reg_len;
1435 struct mem_region *region;
1436 struct inferior *inf;
1437
1438 /* For accesses to unmapped overlay sections, read directly from
1439 files. Must do this first, as MEMADDR may need adjustment. */
1440 if (readbuf != NULL && overlay_debugging)
1441 {
1442 struct obj_section *section = find_pc_overlay (memaddr);
1443
1444 if (pc_in_unmapped_range (memaddr, section))
1445 {
1446 struct target_section_table *table
1447 = target_get_section_table (ops);
1448 const char *section_name = section->the_bfd_section->name;
1449
1450 memaddr = overlay_mapped_address (memaddr, section);
1451 return section_table_xfer_memory_partial (readbuf, writebuf,
1452 memaddr, len,
1453 table->sections,
1454 table->sections_end,
1455 section_name);
1456 }
1457 }
1458
1459 /* Try the executable files, if "trust-readonly-sections" is set. */
1460 if (readbuf != NULL && trust_readonly)
1461 {
1462 struct target_section *secp;
1463 struct target_section_table *table;
1464
1465 secp = target_section_by_addr (ops, memaddr);
1466 if (secp != NULL
1467 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1468 secp->the_bfd_section)
1469 & SEC_READONLY))
1470 {
1471 table = target_get_section_table (ops);
1472 return section_table_xfer_memory_partial (readbuf, writebuf,
1473 memaddr, len,
1474 table->sections,
1475 table->sections_end,
1476 NULL);
1477 }
1478 }
1479
1480 /* If reading unavailable memory in the context of traceframes, and
1481 this address falls within a read-only section, fallback to
1482 reading from live memory. */
1483 if (readbuf != NULL && get_traceframe_number () != -1)
1484 {
1485 VEC(mem_range_s) *available;
1486
1487 /* If we fail to get the set of available memory, then the
1488 target does not support querying traceframe info, and so we
1489 attempt reading from the traceframe anyway (assuming the
1490 target implements the old QTro packet then). */
1491 if (traceframe_available_memory (&available, memaddr, len))
1492 {
1493 struct cleanup *old_chain;
1494
1495 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1496
1497 if (VEC_empty (mem_range_s, available)
1498 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1499 {
1500 /* Don't read into the traceframe's available
1501 memory. */
1502 if (!VEC_empty (mem_range_s, available))
1503 {
1504 LONGEST oldlen = len;
1505
1506 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1507 gdb_assert (len <= oldlen);
1508 }
1509
1510 do_cleanups (old_chain);
1511
1512 /* This goes through the topmost target again. */
1513 res = memory_xfer_live_readonly_partial (ops, object,
1514 readbuf, memaddr, len);
1515 if (res > 0)
1516 return res;
1517
1518 /* No use trying further, we know some memory starting
1519 at MEMADDR isn't available. */
1520 return TARGET_XFER_E_UNAVAILABLE;
1521 }
1522
1523 /* Don't try to read more than how much is available, in
1524 case the target implements the deprecated QTro packet to
1525 cater for older GDBs (the target's knowledge of read-only
1526 sections may be outdated by now). */
1527 len = VEC_index (mem_range_s, available, 0)->length;
1528
1529 do_cleanups (old_chain);
1530 }
1531 }
1532
1533 /* Try GDB's internal data cache. */
1534 region = lookup_mem_region (memaddr);
1535 /* region->hi == 0 means there's no upper bound. */
1536 if (memaddr + len < region->hi || region->hi == 0)
1537 reg_len = len;
1538 else
1539 reg_len = region->hi - memaddr;
1540
1541 switch (region->attrib.mode)
1542 {
1543 case MEM_RO:
1544 if (writebuf != NULL)
1545 return -1;
1546 break;
1547
1548 case MEM_WO:
1549 if (readbuf != NULL)
1550 return -1;
1551 break;
1552
1553 case MEM_FLASH:
1554 /* We only support writing to flash during "load" for now. */
1555 if (writebuf != NULL)
1556 error (_("Writing to flash memory forbidden in this context"));
1557 break;
1558
1559 case MEM_NONE:
1560 return -1;
1561 }
1562
1563 if (!ptid_equal (inferior_ptid, null_ptid))
1564 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1565 else
1566 inf = NULL;
1567
1568 if (inf != NULL
1569 /* The dcache reads whole cache lines; that doesn't play well
1570 with reading from a trace buffer, because reading outside of
1571 the collected memory range fails. */
1572 && get_traceframe_number () == -1
1573 && (region->attrib.cache
1574 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1575 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1576 {
1577 DCACHE *dcache = target_dcache_get_or_init ();
1578
1579 if (readbuf != NULL)
1580 res = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1581 else
1582 /* FIXME drow/2006-08-09: If we're going to preserve const
1583 correctness dcache_xfer_memory should take readbuf and
1584 writebuf. */
1585 res = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1586 reg_len, 1);
1587 if (res <= 0)
1588 return -1;
1589 else
1590 return res;
1591 }
1592
1593 /* If none of those methods found the memory we wanted, fall back
1594 to a target partial transfer. Normally a single call to
1595 to_xfer_partial is enough; if it doesn't recognize an object
1596 it will call the to_xfer_partial of the next target down.
1597 But for memory this won't do. Memory is the only target
1598 object which can be read from more than one valid target. */
1599 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len);
1600
1601 /* Make sure the cache gets updated no matter what - if we are writing
1602 to the stack. Even if this write is not tagged as such, we still need
1603 to update the cache. */
1604
1605 if (res > 0
1606 && inf != NULL
1607 && writebuf != NULL
1608 && target_dcache_init_p ()
1609 && !region->attrib.cache
1610 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1611 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1612 {
1613 DCACHE *dcache = target_dcache_get ();
1614
1615 dcache_update (dcache, memaddr, (void *) writebuf, res);
1616 }
1617
1618 /* If we still haven't got anything, return the last error. We
1619 give up. */
1620 return res;
1621 }
1622
1623 /* Perform a partial memory transfer. For docs see target.h,
1624 to_xfer_partial. */
1625
1626 static LONGEST
1627 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1628 void *readbuf, const void *writebuf, ULONGEST memaddr,
1629 ULONGEST len)
1630 {
1631 int res;
1632
1633 /* Zero length requests are ok and require no work. */
1634 if (len == 0)
1635 return 0;
1636
1637 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1638 breakpoint insns, thus hiding out from higher layers whether
1639 there are software breakpoints inserted in the code stream. */
1640 if (readbuf != NULL)
1641 {
1642 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1643
1644 if (res > 0 && !show_memory_breakpoints)
1645 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1646 }
1647 else
1648 {
1649 void *buf;
1650 struct cleanup *old_chain;
1651
1652 /* A large write request is likely to be partially satisfied
1653 by memory_xfer_partial_1. We will continually malloc
1654 and free a copy of the entire write request for breakpoint
1655 shadow handling even though we only end up writing a small
1656 subset of it. Cap writes to 4KB to mitigate this. */
1657 len = min (4096, len);
1658
1659 buf = xmalloc (len);
1660 old_chain = make_cleanup (xfree, buf);
1661 memcpy (buf, writebuf, len);
1662
1663 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1664 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1665
1666 do_cleanups (old_chain);
1667 }
1668
1669 return res;
1670 }
1671
1672 static void
1673 restore_show_memory_breakpoints (void *arg)
1674 {
1675 show_memory_breakpoints = (uintptr_t) arg;
1676 }
1677
1678 struct cleanup *
1679 make_show_memory_breakpoints_cleanup (int show)
1680 {
1681 int current = show_memory_breakpoints;
1682
1683 show_memory_breakpoints = show;
1684 return make_cleanup (restore_show_memory_breakpoints,
1685 (void *) (uintptr_t) current);
1686 }
1687
1688 /* For docs see target.h, to_xfer_partial. */
1689
1690 LONGEST
1691 target_xfer_partial (struct target_ops *ops,
1692 enum target_object object, const char *annex,
1693 gdb_byte *readbuf, const gdb_byte *writebuf,
1694 ULONGEST offset, ULONGEST len)
1695 {
1696 LONGEST retval;
1697
1698 gdb_assert (ops->to_xfer_partial != NULL);
1699
1700 if (writebuf && !may_write_memory)
1701 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1702 core_addr_to_string_nz (offset), plongest (len));
1703
1704 /* If this is a memory transfer, let the memory-specific code
1705 have a look at it instead. Memory transfers are more
1706 complicated. */
1707 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1708 || object == TARGET_OBJECT_CODE_MEMORY)
1709 retval = memory_xfer_partial (ops, object, readbuf,
1710 writebuf, offset, len);
1711 else if (object == TARGET_OBJECT_RAW_MEMORY)
1712 {
1713 /* Request the normal memory object from other layers. */
1714 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1715 }
1716 else
1717 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1718 writebuf, offset, len);
1719
1720 if (targetdebug)
1721 {
1722 const unsigned char *myaddr = NULL;
1723
1724 fprintf_unfiltered (gdb_stdlog,
1725 "%s:target_xfer_partial "
1726 "(%d, %s, %s, %s, %s, %s) = %s",
1727 ops->to_shortname,
1728 (int) object,
1729 (annex ? annex : "(null)"),
1730 host_address_to_string (readbuf),
1731 host_address_to_string (writebuf),
1732 core_addr_to_string_nz (offset),
1733 pulongest (len), plongest (retval));
1734
1735 if (readbuf)
1736 myaddr = readbuf;
1737 if (writebuf)
1738 myaddr = writebuf;
1739 if (retval > 0 && myaddr != NULL)
1740 {
1741 int i;
1742
1743 fputs_unfiltered (", bytes =", gdb_stdlog);
1744 for (i = 0; i < retval; i++)
1745 {
1746 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1747 {
1748 if (targetdebug < 2 && i > 0)
1749 {
1750 fprintf_unfiltered (gdb_stdlog, " ...");
1751 break;
1752 }
1753 fprintf_unfiltered (gdb_stdlog, "\n");
1754 }
1755
1756 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1757 }
1758 }
1759
1760 fputc_unfiltered ('\n', gdb_stdlog);
1761 }
1762 return retval;
1763 }
1764
1765 /* Read LEN bytes of target memory at address MEMADDR, placing the
1766 results in GDB's memory at MYADDR. Returns either 0 for success or
1767 a target_xfer_error value if any error occurs.
1768
1769 If an error occurs, no guarantee is made about the contents of the data at
1770 MYADDR. In particular, the caller should not depend upon partial reads
1771 filling the buffer with good data. There is no way for the caller to know
1772 how much good data might have been transfered anyway. Callers that can
1773 deal with partial reads should call target_read (which will retry until
1774 it makes no progress, and then return how much was transferred). */
1775
1776 int
1777 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1778 {
1779 /* Dispatch to the topmost target, not the flattened current_target.
1780 Memory accesses check target->to_has_(all_)memory, and the
1781 flattened target doesn't inherit those. */
1782 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1783 myaddr, memaddr, len) == len)
1784 return 0;
1785 else
1786 return TARGET_XFER_E_IO;
1787 }
1788
1789 /* Like target_read_memory, but specify explicitly that this is a read
1790 from the target's raw memory. That is, this read bypasses the
1791 dcache, breakpoint shadowing, etc. */
1792
1793 int
1794 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1795 {
1796 /* See comment in target_read_memory about why the request starts at
1797 current_target.beneath. */
1798 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1799 myaddr, memaddr, len) == len)
1800 return 0;
1801 else
1802 return TARGET_XFER_E_IO;
1803 }
1804
1805 /* Like target_read_memory, but specify explicitly that this is a read from
1806 the target's stack. This may trigger different cache behavior. */
1807
1808 int
1809 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1810 {
1811 /* See comment in target_read_memory about why the request starts at
1812 current_target.beneath. */
1813 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1814 myaddr, memaddr, len) == len)
1815 return 0;
1816 else
1817 return TARGET_XFER_E_IO;
1818 }
1819
1820 /* Like target_read_memory, but specify explicitly that this is a read from
1821 the target's code. This may trigger different cache behavior. */
1822
1823 int
1824 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1825 {
1826 /* See comment in target_read_memory about why the request starts at
1827 current_target.beneath. */
1828 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1829 myaddr, memaddr, len) == len)
1830 return 0;
1831 else
1832 return TARGET_XFER_E_IO;
1833 }
1834
1835 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1836 Returns either 0 for success or a target_xfer_error value if any
1837 error occurs. If an error occurs, no guarantee is made about how
1838 much data got written. Callers that can deal with partial writes
1839 should call target_write. */
1840
1841 int
1842 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1843 {
1844 /* See comment in target_read_memory about why the request starts at
1845 current_target.beneath. */
1846 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1847 myaddr, memaddr, len) == len)
1848 return 0;
1849 else
1850 return TARGET_XFER_E_IO;
1851 }
1852
1853 /* Write LEN bytes from MYADDR to target raw memory at address
1854 MEMADDR. Returns either 0 for success or a target_xfer_error value
1855 if any error occurs. If an error occurs, no guarantee is made
1856 about how much data got written. Callers that can deal with
1857 partial writes should call target_write. */
1858
1859 int
1860 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1861 {
1862 /* See comment in target_read_memory about why the request starts at
1863 current_target.beneath. */
1864 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1865 myaddr, memaddr, len) == len)
1866 return 0;
1867 else
1868 return TARGET_XFER_E_IO;
1869 }
1870
1871 /* Fetch the target's memory map. */
1872
1873 VEC(mem_region_s) *
1874 target_memory_map (void)
1875 {
1876 VEC(mem_region_s) *result;
1877 struct mem_region *last_one, *this_one;
1878 int ix;
1879 struct target_ops *t;
1880
1881 if (targetdebug)
1882 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1883
1884 for (t = current_target.beneath; t != NULL; t = t->beneath)
1885 if (t->to_memory_map != NULL)
1886 break;
1887
1888 if (t == NULL)
1889 return NULL;
1890
1891 result = t->to_memory_map (t);
1892 if (result == NULL)
1893 return NULL;
1894
1895 qsort (VEC_address (mem_region_s, result),
1896 VEC_length (mem_region_s, result),
1897 sizeof (struct mem_region), mem_region_cmp);
1898
1899 /* Check that regions do not overlap. Simultaneously assign
1900 a numbering for the "mem" commands to use to refer to
1901 each region. */
1902 last_one = NULL;
1903 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1904 {
1905 this_one->number = ix;
1906
1907 if (last_one && last_one->hi > this_one->lo)
1908 {
1909 warning (_("Overlapping regions in memory map: ignoring"));
1910 VEC_free (mem_region_s, result);
1911 return NULL;
1912 }
1913 last_one = this_one;
1914 }
1915
1916 return result;
1917 }
1918
1919 void
1920 target_flash_erase (ULONGEST address, LONGEST length)
1921 {
1922 struct target_ops *t;
1923
1924 for (t = current_target.beneath; t != NULL; t = t->beneath)
1925 if (t->to_flash_erase != NULL)
1926 {
1927 if (targetdebug)
1928 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1929 hex_string (address), phex (length, 0));
1930 t->to_flash_erase (t, address, length);
1931 return;
1932 }
1933
1934 tcomplain ();
1935 }
1936
1937 void
1938 target_flash_done (void)
1939 {
1940 struct target_ops *t;
1941
1942 for (t = current_target.beneath; t != NULL; t = t->beneath)
1943 if (t->to_flash_done != NULL)
1944 {
1945 if (targetdebug)
1946 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1947 t->to_flash_done (t);
1948 return;
1949 }
1950
1951 tcomplain ();
1952 }
1953
1954 static void
1955 show_trust_readonly (struct ui_file *file, int from_tty,
1956 struct cmd_list_element *c, const char *value)
1957 {
1958 fprintf_filtered (file,
1959 _("Mode for reading from readonly sections is %s.\n"),
1960 value);
1961 }
1962
1963 /* More generic transfers. */
1964
1965 static LONGEST
1966 default_xfer_partial (struct target_ops *ops, enum target_object object,
1967 const char *annex, gdb_byte *readbuf,
1968 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
1969 {
1970 if (object == TARGET_OBJECT_MEMORY
1971 && ops->deprecated_xfer_memory != NULL)
1972 /* If available, fall back to the target's
1973 "deprecated_xfer_memory" method. */
1974 {
1975 int xfered = -1;
1976
1977 errno = 0;
1978 if (writebuf != NULL)
1979 {
1980 void *buffer = xmalloc (len);
1981 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1982
1983 memcpy (buffer, writebuf, len);
1984 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1985 1/*write*/, NULL, ops);
1986 do_cleanups (cleanup);
1987 }
1988 if (readbuf != NULL)
1989 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1990 0/*read*/, NULL, ops);
1991 if (xfered > 0)
1992 return xfered;
1993 else if (xfered == 0 && errno == 0)
1994 /* "deprecated_xfer_memory" uses 0, cross checked against
1995 ERRNO as one indication of an error. */
1996 return 0;
1997 else
1998 return -1;
1999 }
2000 else if (ops->beneath != NULL)
2001 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2002 readbuf, writebuf, offset, len);
2003 else
2004 return -1;
2005 }
2006
2007 /* The xfer_partial handler for the topmost target. Unlike the default,
2008 it does not need to handle memory specially; it just passes all
2009 requests down the stack. */
2010
2011 static LONGEST
2012 current_xfer_partial (struct target_ops *ops, enum target_object object,
2013 const char *annex, gdb_byte *readbuf,
2014 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
2015 {
2016 if (ops->beneath != NULL)
2017 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2018 readbuf, writebuf, offset, len);
2019 else
2020 return -1;
2021 }
2022
2023 /* Target vector read/write partial wrapper functions. */
2024
2025 static LONGEST
2026 target_read_partial (struct target_ops *ops,
2027 enum target_object object,
2028 const char *annex, gdb_byte *buf,
2029 ULONGEST offset, LONGEST len)
2030 {
2031 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2032 }
2033
2034 static LONGEST
2035 target_write_partial (struct target_ops *ops,
2036 enum target_object object,
2037 const char *annex, const gdb_byte *buf,
2038 ULONGEST offset, LONGEST len)
2039 {
2040 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2041 }
2042
2043 /* Wrappers to perform the full transfer. */
2044
2045 /* For docs on target_read see target.h. */
2046
2047 LONGEST
2048 target_read (struct target_ops *ops,
2049 enum target_object object,
2050 const char *annex, gdb_byte *buf,
2051 ULONGEST offset, LONGEST len)
2052 {
2053 LONGEST xfered = 0;
2054
2055 while (xfered < len)
2056 {
2057 LONGEST xfer = target_read_partial (ops, object, annex,
2058 (gdb_byte *) buf + xfered,
2059 offset + xfered, len - xfered);
2060
2061 /* Call an observer, notifying them of the xfer progress? */
2062 if (xfer == 0)
2063 return xfered;
2064 if (xfer < 0)
2065 return -1;
2066 xfered += xfer;
2067 QUIT;
2068 }
2069 return len;
2070 }
2071
2072 /* Assuming that the entire [begin, end) range of memory cannot be
2073 read, try to read whatever subrange is possible to read.
2074
2075 The function returns, in RESULT, either zero or one memory block.
2076 If there's a readable subrange at the beginning, it is completely
2077 read and returned. Any further readable subrange will not be read.
2078 Otherwise, if there's a readable subrange at the end, it will be
2079 completely read and returned. Any readable subranges before it
2080 (obviously, not starting at the beginning), will be ignored. In
2081 other cases -- either no readable subrange, or readable subrange(s)
2082 that is neither at the beginning, or end, nothing is returned.
2083
2084 The purpose of this function is to handle a read across a boundary
2085 of accessible memory in a case when memory map is not available.
2086 The above restrictions are fine for this case, but will give
2087 incorrect results if the memory is 'patchy'. However, supporting
2088 'patchy' memory would require trying to read every single byte,
2089 and it seems unacceptable solution. Explicit memory map is
2090 recommended for this case -- and target_read_memory_robust will
2091 take care of reading multiple ranges then. */
2092
2093 static void
2094 read_whatever_is_readable (struct target_ops *ops,
2095 ULONGEST begin, ULONGEST end,
2096 VEC(memory_read_result_s) **result)
2097 {
2098 gdb_byte *buf = xmalloc (end - begin);
2099 ULONGEST current_begin = begin;
2100 ULONGEST current_end = end;
2101 int forward;
2102 memory_read_result_s r;
2103
2104 /* If we previously failed to read 1 byte, nothing can be done here. */
2105 if (end - begin <= 1)
2106 {
2107 xfree (buf);
2108 return;
2109 }
2110
2111 /* Check that either first or the last byte is readable, and give up
2112 if not. This heuristic is meant to permit reading accessible memory
2113 at the boundary of accessible region. */
2114 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2115 buf, begin, 1) == 1)
2116 {
2117 forward = 1;
2118 ++current_begin;
2119 }
2120 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2121 buf + (end-begin) - 1, end - 1, 1) == 1)
2122 {
2123 forward = 0;
2124 --current_end;
2125 }
2126 else
2127 {
2128 xfree (buf);
2129 return;
2130 }
2131
2132 /* Loop invariant is that the [current_begin, current_end) was previously
2133 found to be not readable as a whole.
2134
2135 Note loop condition -- if the range has 1 byte, we can't divide the range
2136 so there's no point trying further. */
2137 while (current_end - current_begin > 1)
2138 {
2139 ULONGEST first_half_begin, first_half_end;
2140 ULONGEST second_half_begin, second_half_end;
2141 LONGEST xfer;
2142 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2143
2144 if (forward)
2145 {
2146 first_half_begin = current_begin;
2147 first_half_end = middle;
2148 second_half_begin = middle;
2149 second_half_end = current_end;
2150 }
2151 else
2152 {
2153 first_half_begin = middle;
2154 first_half_end = current_end;
2155 second_half_begin = current_begin;
2156 second_half_end = middle;
2157 }
2158
2159 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2160 buf + (first_half_begin - begin),
2161 first_half_begin,
2162 first_half_end - first_half_begin);
2163
2164 if (xfer == first_half_end - first_half_begin)
2165 {
2166 /* This half reads up fine. So, the error must be in the
2167 other half. */
2168 current_begin = second_half_begin;
2169 current_end = second_half_end;
2170 }
2171 else
2172 {
2173 /* This half is not readable. Because we've tried one byte, we
2174 know some part of this half if actually redable. Go to the next
2175 iteration to divide again and try to read.
2176
2177 We don't handle the other half, because this function only tries
2178 to read a single readable subrange. */
2179 current_begin = first_half_begin;
2180 current_end = first_half_end;
2181 }
2182 }
2183
2184 if (forward)
2185 {
2186 /* The [begin, current_begin) range has been read. */
2187 r.begin = begin;
2188 r.end = current_begin;
2189 r.data = buf;
2190 }
2191 else
2192 {
2193 /* The [current_end, end) range has been read. */
2194 LONGEST rlen = end - current_end;
2195
2196 r.data = xmalloc (rlen);
2197 memcpy (r.data, buf + current_end - begin, rlen);
2198 r.begin = current_end;
2199 r.end = end;
2200 xfree (buf);
2201 }
2202 VEC_safe_push(memory_read_result_s, (*result), &r);
2203 }
2204
2205 void
2206 free_memory_read_result_vector (void *x)
2207 {
2208 VEC(memory_read_result_s) *v = x;
2209 memory_read_result_s *current;
2210 int ix;
2211
2212 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2213 {
2214 xfree (current->data);
2215 }
2216 VEC_free (memory_read_result_s, v);
2217 }
2218
2219 VEC(memory_read_result_s) *
2220 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2221 {
2222 VEC(memory_read_result_s) *result = 0;
2223
2224 LONGEST xfered = 0;
2225 while (xfered < len)
2226 {
2227 struct mem_region *region = lookup_mem_region (offset + xfered);
2228 LONGEST rlen;
2229
2230 /* If there is no explicit region, a fake one should be created. */
2231 gdb_assert (region);
2232
2233 if (region->hi == 0)
2234 rlen = len - xfered;
2235 else
2236 rlen = region->hi - offset;
2237
2238 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2239 {
2240 /* Cannot read this region. Note that we can end up here only
2241 if the region is explicitly marked inaccessible, or
2242 'inaccessible-by-default' is in effect. */
2243 xfered += rlen;
2244 }
2245 else
2246 {
2247 LONGEST to_read = min (len - xfered, rlen);
2248 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2249
2250 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2251 (gdb_byte *) buffer,
2252 offset + xfered, to_read);
2253 /* Call an observer, notifying them of the xfer progress? */
2254 if (xfer <= 0)
2255 {
2256 /* Got an error reading full chunk. See if maybe we can read
2257 some subrange. */
2258 xfree (buffer);
2259 read_whatever_is_readable (ops, offset + xfered,
2260 offset + xfered + to_read, &result);
2261 xfered += to_read;
2262 }
2263 else
2264 {
2265 struct memory_read_result r;
2266 r.data = buffer;
2267 r.begin = offset + xfered;
2268 r.end = r.begin + xfer;
2269 VEC_safe_push (memory_read_result_s, result, &r);
2270 xfered += xfer;
2271 }
2272 QUIT;
2273 }
2274 }
2275 return result;
2276 }
2277
2278
2279 /* An alternative to target_write with progress callbacks. */
2280
2281 LONGEST
2282 target_write_with_progress (struct target_ops *ops,
2283 enum target_object object,
2284 const char *annex, const gdb_byte *buf,
2285 ULONGEST offset, LONGEST len,
2286 void (*progress) (ULONGEST, void *), void *baton)
2287 {
2288 LONGEST xfered = 0;
2289
2290 /* Give the progress callback a chance to set up. */
2291 if (progress)
2292 (*progress) (0, baton);
2293
2294 while (xfered < len)
2295 {
2296 LONGEST xfer = target_write_partial (ops, object, annex,
2297 (gdb_byte *) buf + xfered,
2298 offset + xfered, len - xfered);
2299
2300 if (xfer == 0)
2301 return xfered;
2302 if (xfer < 0)
2303 return -1;
2304
2305 if (progress)
2306 (*progress) (xfer, baton);
2307
2308 xfered += xfer;
2309 QUIT;
2310 }
2311 return len;
2312 }
2313
2314 /* For docs on target_write see target.h. */
2315
2316 LONGEST
2317 target_write (struct target_ops *ops,
2318 enum target_object object,
2319 const char *annex, const gdb_byte *buf,
2320 ULONGEST offset, LONGEST len)
2321 {
2322 return target_write_with_progress (ops, object, annex, buf, offset, len,
2323 NULL, NULL);
2324 }
2325
2326 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2327 the size of the transferred data. PADDING additional bytes are
2328 available in *BUF_P. This is a helper function for
2329 target_read_alloc; see the declaration of that function for more
2330 information. */
2331
2332 static LONGEST
2333 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2334 const char *annex, gdb_byte **buf_p, int padding)
2335 {
2336 size_t buf_alloc, buf_pos;
2337 gdb_byte *buf;
2338 LONGEST n;
2339
2340 /* This function does not have a length parameter; it reads the
2341 entire OBJECT). Also, it doesn't support objects fetched partly
2342 from one target and partly from another (in a different stratum,
2343 e.g. a core file and an executable). Both reasons make it
2344 unsuitable for reading memory. */
2345 gdb_assert (object != TARGET_OBJECT_MEMORY);
2346
2347 /* Start by reading up to 4K at a time. The target will throttle
2348 this number down if necessary. */
2349 buf_alloc = 4096;
2350 buf = xmalloc (buf_alloc);
2351 buf_pos = 0;
2352 while (1)
2353 {
2354 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2355 buf_pos, buf_alloc - buf_pos - padding);
2356 if (n < 0)
2357 {
2358 /* An error occurred. */
2359 xfree (buf);
2360 return -1;
2361 }
2362 else if (n == 0)
2363 {
2364 /* Read all there was. */
2365 if (buf_pos == 0)
2366 xfree (buf);
2367 else
2368 *buf_p = buf;
2369 return buf_pos;
2370 }
2371
2372 buf_pos += n;
2373
2374 /* If the buffer is filling up, expand it. */
2375 if (buf_alloc < buf_pos * 2)
2376 {
2377 buf_alloc *= 2;
2378 buf = xrealloc (buf, buf_alloc);
2379 }
2380
2381 QUIT;
2382 }
2383 }
2384
2385 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2386 the size of the transferred data. See the declaration in "target.h"
2387 function for more information about the return value. */
2388
2389 LONGEST
2390 target_read_alloc (struct target_ops *ops, enum target_object object,
2391 const char *annex, gdb_byte **buf_p)
2392 {
2393 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2394 }
2395
2396 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2397 returned as a string, allocated using xmalloc. If an error occurs
2398 or the transfer is unsupported, NULL is returned. Empty objects
2399 are returned as allocated but empty strings. A warning is issued
2400 if the result contains any embedded NUL bytes. */
2401
2402 char *
2403 target_read_stralloc (struct target_ops *ops, enum target_object object,
2404 const char *annex)
2405 {
2406 gdb_byte *buffer;
2407 char *bufstr;
2408 LONGEST i, transferred;
2409
2410 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2411 bufstr = (char *) buffer;
2412
2413 if (transferred < 0)
2414 return NULL;
2415
2416 if (transferred == 0)
2417 return xstrdup ("");
2418
2419 bufstr[transferred] = 0;
2420
2421 /* Check for embedded NUL bytes; but allow trailing NULs. */
2422 for (i = strlen (bufstr); i < transferred; i++)
2423 if (bufstr[i] != 0)
2424 {
2425 warning (_("target object %d, annex %s, "
2426 "contained unexpected null characters"),
2427 (int) object, annex ? annex : "(none)");
2428 break;
2429 }
2430
2431 return bufstr;
2432 }
2433
2434 /* Memory transfer methods. */
2435
2436 void
2437 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2438 LONGEST len)
2439 {
2440 /* This method is used to read from an alternate, non-current
2441 target. This read must bypass the overlay support (as symbols
2442 don't match this target), and GDB's internal cache (wrong cache
2443 for this target). */
2444 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2445 != len)
2446 memory_error (TARGET_XFER_E_IO, addr);
2447 }
2448
2449 ULONGEST
2450 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2451 int len, enum bfd_endian byte_order)
2452 {
2453 gdb_byte buf[sizeof (ULONGEST)];
2454
2455 gdb_assert (len <= sizeof (buf));
2456 get_target_memory (ops, addr, buf, len);
2457 return extract_unsigned_integer (buf, len, byte_order);
2458 }
2459
2460 /* See target.h. */
2461
2462 int
2463 forward_target_insert_breakpoint (struct target_ops *ops,
2464 struct gdbarch *gdbarch,
2465 struct bp_target_info *bp_tgt)
2466 {
2467 for (; ops != NULL; ops = ops->beneath)
2468 if (ops->to_insert_breakpoint != NULL)
2469 return ops->to_insert_breakpoint (ops, gdbarch, bp_tgt);
2470
2471 return memory_insert_breakpoint (ops, gdbarch, bp_tgt);
2472 }
2473
2474 /* See target.h. */
2475
2476 int
2477 target_insert_breakpoint (struct gdbarch *gdbarch,
2478 struct bp_target_info *bp_tgt)
2479 {
2480 if (!may_insert_breakpoints)
2481 {
2482 warning (_("May not insert breakpoints"));
2483 return 1;
2484 }
2485
2486 return forward_target_insert_breakpoint (&current_target, gdbarch, bp_tgt);
2487 }
2488
2489 /* See target.h. */
2490
2491 int
2492 forward_target_remove_breakpoint (struct target_ops *ops,
2493 struct gdbarch *gdbarch,
2494 struct bp_target_info *bp_tgt)
2495 {
2496 /* This is kind of a weird case to handle, but the permission might
2497 have been changed after breakpoints were inserted - in which case
2498 we should just take the user literally and assume that any
2499 breakpoints should be left in place. */
2500 if (!may_insert_breakpoints)
2501 {
2502 warning (_("May not remove breakpoints"));
2503 return 1;
2504 }
2505
2506 for (; ops != NULL; ops = ops->beneath)
2507 if (ops->to_remove_breakpoint != NULL)
2508 return ops->to_remove_breakpoint (ops, gdbarch, bp_tgt);
2509
2510 return memory_remove_breakpoint (ops, gdbarch, bp_tgt);
2511 }
2512
2513 /* See target.h. */
2514
2515 int
2516 target_remove_breakpoint (struct gdbarch *gdbarch,
2517 struct bp_target_info *bp_tgt)
2518 {
2519 return forward_target_remove_breakpoint (&current_target, gdbarch, bp_tgt);
2520 }
2521
2522 static void
2523 target_info (char *args, int from_tty)
2524 {
2525 struct target_ops *t;
2526 int has_all_mem = 0;
2527
2528 if (symfile_objfile != NULL)
2529 printf_unfiltered (_("Symbols from \"%s\".\n"),
2530 objfile_name (symfile_objfile));
2531
2532 for (t = target_stack; t != NULL; t = t->beneath)
2533 {
2534 if (!(*t->to_has_memory) (t))
2535 continue;
2536
2537 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2538 continue;
2539 if (has_all_mem)
2540 printf_unfiltered (_("\tWhile running this, "
2541 "GDB does not access memory from...\n"));
2542 printf_unfiltered ("%s:\n", t->to_longname);
2543 (t->to_files_info) (t);
2544 has_all_mem = (*t->to_has_all_memory) (t);
2545 }
2546 }
2547
2548 /* This function is called before any new inferior is created, e.g.
2549 by running a program, attaching, or connecting to a target.
2550 It cleans up any state from previous invocations which might
2551 change between runs. This is a subset of what target_preopen
2552 resets (things which might change between targets). */
2553
2554 void
2555 target_pre_inferior (int from_tty)
2556 {
2557 /* Clear out solib state. Otherwise the solib state of the previous
2558 inferior might have survived and is entirely wrong for the new
2559 target. This has been observed on GNU/Linux using glibc 2.3. How
2560 to reproduce:
2561
2562 bash$ ./foo&
2563 [1] 4711
2564 bash$ ./foo&
2565 [1] 4712
2566 bash$ gdb ./foo
2567 [...]
2568 (gdb) attach 4711
2569 (gdb) detach
2570 (gdb) attach 4712
2571 Cannot access memory at address 0xdeadbeef
2572 */
2573
2574 /* In some OSs, the shared library list is the same/global/shared
2575 across inferiors. If code is shared between processes, so are
2576 memory regions and features. */
2577 if (!gdbarch_has_global_solist (target_gdbarch ()))
2578 {
2579 no_shared_libraries (NULL, from_tty);
2580
2581 invalidate_target_mem_regions ();
2582
2583 target_clear_description ();
2584 }
2585
2586 agent_capability_invalidate ();
2587 }
2588
2589 /* Callback for iterate_over_inferiors. Gets rid of the given
2590 inferior. */
2591
2592 static int
2593 dispose_inferior (struct inferior *inf, void *args)
2594 {
2595 struct thread_info *thread;
2596
2597 thread = any_thread_of_process (inf->pid);
2598 if (thread)
2599 {
2600 switch_to_thread (thread->ptid);
2601
2602 /* Core inferiors actually should be detached, not killed. */
2603 if (target_has_execution)
2604 target_kill ();
2605 else
2606 target_detach (NULL, 0);
2607 }
2608
2609 return 0;
2610 }
2611
2612 /* This is to be called by the open routine before it does
2613 anything. */
2614
2615 void
2616 target_preopen (int from_tty)
2617 {
2618 dont_repeat ();
2619
2620 if (have_inferiors ())
2621 {
2622 if (!from_tty
2623 || !have_live_inferiors ()
2624 || query (_("A program is being debugged already. Kill it? ")))
2625 iterate_over_inferiors (dispose_inferior, NULL);
2626 else
2627 error (_("Program not killed."));
2628 }
2629
2630 /* Calling target_kill may remove the target from the stack. But if
2631 it doesn't (which seems like a win for UDI), remove it now. */
2632 /* Leave the exec target, though. The user may be switching from a
2633 live process to a core of the same program. */
2634 pop_all_targets_above (file_stratum);
2635
2636 target_pre_inferior (from_tty);
2637 }
2638
2639 /* Detach a target after doing deferred register stores. */
2640
2641 void
2642 target_detach (const char *args, int from_tty)
2643 {
2644 struct target_ops* t;
2645
2646 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2647 /* Don't remove global breakpoints here. They're removed on
2648 disconnection from the target. */
2649 ;
2650 else
2651 /* If we're in breakpoints-always-inserted mode, have to remove
2652 them before detaching. */
2653 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2654
2655 prepare_for_detach ();
2656
2657 for (t = current_target.beneath; t != NULL; t = t->beneath)
2658 {
2659 if (t->to_detach != NULL)
2660 {
2661 t->to_detach (t, args, from_tty);
2662 if (targetdebug)
2663 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2664 args, from_tty);
2665 return;
2666 }
2667 }
2668
2669 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2670 }
2671
2672 void
2673 target_disconnect (char *args, int from_tty)
2674 {
2675 struct target_ops *t;
2676
2677 /* If we're in breakpoints-always-inserted mode or if breakpoints
2678 are global across processes, we have to remove them before
2679 disconnecting. */
2680 remove_breakpoints ();
2681
2682 for (t = current_target.beneath; t != NULL; t = t->beneath)
2683 if (t->to_disconnect != NULL)
2684 {
2685 if (targetdebug)
2686 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2687 args, from_tty);
2688 t->to_disconnect (t, args, from_tty);
2689 return;
2690 }
2691
2692 tcomplain ();
2693 }
2694
2695 ptid_t
2696 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2697 {
2698 struct target_ops *t;
2699
2700 for (t = current_target.beneath; t != NULL; t = t->beneath)
2701 {
2702 if (t->to_wait != NULL)
2703 {
2704 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2705
2706 if (targetdebug)
2707 {
2708 char *status_string;
2709 char *options_string;
2710
2711 status_string = target_waitstatus_to_string (status);
2712 options_string = target_options_to_string (options);
2713 fprintf_unfiltered (gdb_stdlog,
2714 "target_wait (%d, status, options={%s})"
2715 " = %d, %s\n",
2716 ptid_get_pid (ptid), options_string,
2717 ptid_get_pid (retval), status_string);
2718 xfree (status_string);
2719 xfree (options_string);
2720 }
2721
2722 return retval;
2723 }
2724 }
2725
2726 noprocess ();
2727 }
2728
2729 char *
2730 target_pid_to_str (ptid_t ptid)
2731 {
2732 struct target_ops *t;
2733
2734 for (t = current_target.beneath; t != NULL; t = t->beneath)
2735 {
2736 if (t->to_pid_to_str != NULL)
2737 return (*t->to_pid_to_str) (t, ptid);
2738 }
2739
2740 return normal_pid_to_str (ptid);
2741 }
2742
2743 char *
2744 target_thread_name (struct thread_info *info)
2745 {
2746 struct target_ops *t;
2747
2748 for (t = current_target.beneath; t != NULL; t = t->beneath)
2749 {
2750 if (t->to_thread_name != NULL)
2751 return (*t->to_thread_name) (info);
2752 }
2753
2754 return NULL;
2755 }
2756
2757 void
2758 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2759 {
2760 struct target_ops *t;
2761
2762 target_dcache_invalidate ();
2763
2764 for (t = current_target.beneath; t != NULL; t = t->beneath)
2765 {
2766 if (t->to_resume != NULL)
2767 {
2768 t->to_resume (t, ptid, step, signal);
2769 if (targetdebug)
2770 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2771 ptid_get_pid (ptid),
2772 step ? "step" : "continue",
2773 gdb_signal_to_name (signal));
2774
2775 registers_changed_ptid (ptid);
2776 set_executing (ptid, 1);
2777 set_running (ptid, 1);
2778 clear_inline_frame_state (ptid);
2779 return;
2780 }
2781 }
2782
2783 noprocess ();
2784 }
2785
2786 void
2787 target_pass_signals (int numsigs, unsigned char *pass_signals)
2788 {
2789 struct target_ops *t;
2790
2791 for (t = current_target.beneath; t != NULL; t = t->beneath)
2792 {
2793 if (t->to_pass_signals != NULL)
2794 {
2795 if (targetdebug)
2796 {
2797 int i;
2798
2799 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2800 numsigs);
2801
2802 for (i = 0; i < numsigs; i++)
2803 if (pass_signals[i])
2804 fprintf_unfiltered (gdb_stdlog, " %s",
2805 gdb_signal_to_name (i));
2806
2807 fprintf_unfiltered (gdb_stdlog, " })\n");
2808 }
2809
2810 (*t->to_pass_signals) (numsigs, pass_signals);
2811 return;
2812 }
2813 }
2814 }
2815
2816 void
2817 target_program_signals (int numsigs, unsigned char *program_signals)
2818 {
2819 struct target_ops *t;
2820
2821 for (t = current_target.beneath; t != NULL; t = t->beneath)
2822 {
2823 if (t->to_program_signals != NULL)
2824 {
2825 if (targetdebug)
2826 {
2827 int i;
2828
2829 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2830 numsigs);
2831
2832 for (i = 0; i < numsigs; i++)
2833 if (program_signals[i])
2834 fprintf_unfiltered (gdb_stdlog, " %s",
2835 gdb_signal_to_name (i));
2836
2837 fprintf_unfiltered (gdb_stdlog, " })\n");
2838 }
2839
2840 (*t->to_program_signals) (numsigs, program_signals);
2841 return;
2842 }
2843 }
2844 }
2845
2846 /* Look through the list of possible targets for a target that can
2847 follow forks. */
2848
2849 int
2850 target_follow_fork (int follow_child, int detach_fork)
2851 {
2852 struct target_ops *t;
2853
2854 for (t = current_target.beneath; t != NULL; t = t->beneath)
2855 {
2856 if (t->to_follow_fork != NULL)
2857 {
2858 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2859
2860 if (targetdebug)
2861 fprintf_unfiltered (gdb_stdlog,
2862 "target_follow_fork (%d, %d) = %d\n",
2863 follow_child, detach_fork, retval);
2864 return retval;
2865 }
2866 }
2867
2868 /* Some target returned a fork event, but did not know how to follow it. */
2869 internal_error (__FILE__, __LINE__,
2870 _("could not find a target to follow fork"));
2871 }
2872
2873 void
2874 target_mourn_inferior (void)
2875 {
2876 struct target_ops *t;
2877
2878 for (t = current_target.beneath; t != NULL; t = t->beneath)
2879 {
2880 if (t->to_mourn_inferior != NULL)
2881 {
2882 t->to_mourn_inferior (t);
2883 if (targetdebug)
2884 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2885
2886 /* We no longer need to keep handles on any of the object files.
2887 Make sure to release them to avoid unnecessarily locking any
2888 of them while we're not actually debugging. */
2889 bfd_cache_close_all ();
2890
2891 return;
2892 }
2893 }
2894
2895 internal_error (__FILE__, __LINE__,
2896 _("could not find a target to follow mourn inferior"));
2897 }
2898
2899 /* Look for a target which can describe architectural features, starting
2900 from TARGET. If we find one, return its description. */
2901
2902 const struct target_desc *
2903 target_read_description (struct target_ops *target)
2904 {
2905 struct target_ops *t;
2906
2907 for (t = target; t != NULL; t = t->beneath)
2908 if (t->to_read_description != NULL)
2909 {
2910 const struct target_desc *tdesc;
2911
2912 tdesc = t->to_read_description (t);
2913 if (tdesc)
2914 return tdesc;
2915 }
2916
2917 return NULL;
2918 }
2919
2920 /* The default implementation of to_search_memory.
2921 This implements a basic search of memory, reading target memory and
2922 performing the search here (as opposed to performing the search in on the
2923 target side with, for example, gdbserver). */
2924
2925 int
2926 simple_search_memory (struct target_ops *ops,
2927 CORE_ADDR start_addr, ULONGEST search_space_len,
2928 const gdb_byte *pattern, ULONGEST pattern_len,
2929 CORE_ADDR *found_addrp)
2930 {
2931 /* NOTE: also defined in find.c testcase. */
2932 #define SEARCH_CHUNK_SIZE 16000
2933 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2934 /* Buffer to hold memory contents for searching. */
2935 gdb_byte *search_buf;
2936 unsigned search_buf_size;
2937 struct cleanup *old_cleanups;
2938
2939 search_buf_size = chunk_size + pattern_len - 1;
2940
2941 /* No point in trying to allocate a buffer larger than the search space. */
2942 if (search_space_len < search_buf_size)
2943 search_buf_size = search_space_len;
2944
2945 search_buf = malloc (search_buf_size);
2946 if (search_buf == NULL)
2947 error (_("Unable to allocate memory to perform the search."));
2948 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2949
2950 /* Prime the search buffer. */
2951
2952 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2953 search_buf, start_addr, search_buf_size) != search_buf_size)
2954 {
2955 warning (_("Unable to access %s bytes of target "
2956 "memory at %s, halting search."),
2957 pulongest (search_buf_size), hex_string (start_addr));
2958 do_cleanups (old_cleanups);
2959 return -1;
2960 }
2961
2962 /* Perform the search.
2963
2964 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2965 When we've scanned N bytes we copy the trailing bytes to the start and
2966 read in another N bytes. */
2967
2968 while (search_space_len >= pattern_len)
2969 {
2970 gdb_byte *found_ptr;
2971 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2972
2973 found_ptr = memmem (search_buf, nr_search_bytes,
2974 pattern, pattern_len);
2975
2976 if (found_ptr != NULL)
2977 {
2978 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2979
2980 *found_addrp = found_addr;
2981 do_cleanups (old_cleanups);
2982 return 1;
2983 }
2984
2985 /* Not found in this chunk, skip to next chunk. */
2986
2987 /* Don't let search_space_len wrap here, it's unsigned. */
2988 if (search_space_len >= chunk_size)
2989 search_space_len -= chunk_size;
2990 else
2991 search_space_len = 0;
2992
2993 if (search_space_len >= pattern_len)
2994 {
2995 unsigned keep_len = search_buf_size - chunk_size;
2996 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2997 int nr_to_read;
2998
2999 /* Copy the trailing part of the previous iteration to the front
3000 of the buffer for the next iteration. */
3001 gdb_assert (keep_len == pattern_len - 1);
3002 memcpy (search_buf, search_buf + chunk_size, keep_len);
3003
3004 nr_to_read = min (search_space_len - keep_len, chunk_size);
3005
3006 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3007 search_buf + keep_len, read_addr,
3008 nr_to_read) != nr_to_read)
3009 {
3010 warning (_("Unable to access %s bytes of target "
3011 "memory at %s, halting search."),
3012 plongest (nr_to_read),
3013 hex_string (read_addr));
3014 do_cleanups (old_cleanups);
3015 return -1;
3016 }
3017
3018 start_addr += chunk_size;
3019 }
3020 }
3021
3022 /* Not found. */
3023
3024 do_cleanups (old_cleanups);
3025 return 0;
3026 }
3027
3028 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3029 sequence of bytes in PATTERN with length PATTERN_LEN.
3030
3031 The result is 1 if found, 0 if not found, and -1 if there was an error
3032 requiring halting of the search (e.g. memory read error).
3033 If the pattern is found the address is recorded in FOUND_ADDRP. */
3034
3035 int
3036 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3037 const gdb_byte *pattern, ULONGEST pattern_len,
3038 CORE_ADDR *found_addrp)
3039 {
3040 struct target_ops *t;
3041 int found;
3042
3043 /* We don't use INHERIT to set current_target.to_search_memory,
3044 so we have to scan the target stack and handle targetdebug
3045 ourselves. */
3046
3047 if (targetdebug)
3048 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3049 hex_string (start_addr));
3050
3051 for (t = current_target.beneath; t != NULL; t = t->beneath)
3052 if (t->to_search_memory != NULL)
3053 break;
3054
3055 if (t != NULL)
3056 {
3057 found = t->to_search_memory (t, start_addr, search_space_len,
3058 pattern, pattern_len, found_addrp);
3059 }
3060 else
3061 {
3062 /* If a special version of to_search_memory isn't available, use the
3063 simple version. */
3064 found = simple_search_memory (current_target.beneath,
3065 start_addr, search_space_len,
3066 pattern, pattern_len, found_addrp);
3067 }
3068
3069 if (targetdebug)
3070 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3071
3072 return found;
3073 }
3074
3075 /* Look through the currently pushed targets. If none of them will
3076 be able to restart the currently running process, issue an error
3077 message. */
3078
3079 void
3080 target_require_runnable (void)
3081 {
3082 struct target_ops *t;
3083
3084 for (t = target_stack; t != NULL; t = t->beneath)
3085 {
3086 /* If this target knows how to create a new program, then
3087 assume we will still be able to after killing the current
3088 one. Either killing and mourning will not pop T, or else
3089 find_default_run_target will find it again. */
3090 if (t->to_create_inferior != NULL)
3091 return;
3092
3093 /* Do not worry about thread_stratum targets that can not
3094 create inferiors. Assume they will be pushed again if
3095 necessary, and continue to the process_stratum. */
3096 if (t->to_stratum == thread_stratum
3097 || t->to_stratum == arch_stratum)
3098 continue;
3099
3100 error (_("The \"%s\" target does not support \"run\". "
3101 "Try \"help target\" or \"continue\"."),
3102 t->to_shortname);
3103 }
3104
3105 /* This function is only called if the target is running. In that
3106 case there should have been a process_stratum target and it
3107 should either know how to create inferiors, or not... */
3108 internal_error (__FILE__, __LINE__, _("No targets found"));
3109 }
3110
3111 /* Look through the list of possible targets for a target that can
3112 execute a run or attach command without any other data. This is
3113 used to locate the default process stratum.
3114
3115 If DO_MESG is not NULL, the result is always valid (error() is
3116 called for errors); else, return NULL on error. */
3117
3118 static struct target_ops *
3119 find_default_run_target (char *do_mesg)
3120 {
3121 struct target_ops **t;
3122 struct target_ops *runable = NULL;
3123 int count;
3124
3125 count = 0;
3126
3127 for (t = target_structs; t < target_structs + target_struct_size;
3128 ++t)
3129 {
3130 if ((*t)->to_can_run && target_can_run (*t))
3131 {
3132 runable = *t;
3133 ++count;
3134 }
3135 }
3136
3137 if (count != 1)
3138 {
3139 if (do_mesg)
3140 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3141 else
3142 return NULL;
3143 }
3144
3145 return runable;
3146 }
3147
3148 void
3149 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3150 {
3151 struct target_ops *t;
3152
3153 t = find_default_run_target ("attach");
3154 (t->to_attach) (t, args, from_tty);
3155 return;
3156 }
3157
3158 void
3159 find_default_create_inferior (struct target_ops *ops,
3160 char *exec_file, char *allargs, char **env,
3161 int from_tty)
3162 {
3163 struct target_ops *t;
3164
3165 t = find_default_run_target ("run");
3166 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3167 return;
3168 }
3169
3170 static int
3171 find_default_can_async_p (void)
3172 {
3173 struct target_ops *t;
3174
3175 /* This may be called before the target is pushed on the stack;
3176 look for the default process stratum. If there's none, gdb isn't
3177 configured with a native debugger, and target remote isn't
3178 connected yet. */
3179 t = find_default_run_target (NULL);
3180 if (t && t->to_can_async_p)
3181 return (t->to_can_async_p) ();
3182 return 0;
3183 }
3184
3185 static int
3186 find_default_is_async_p (void)
3187 {
3188 struct target_ops *t;
3189
3190 /* This may be called before the target is pushed on the stack;
3191 look for the default process stratum. If there's none, gdb isn't
3192 configured with a native debugger, and target remote isn't
3193 connected yet. */
3194 t = find_default_run_target (NULL);
3195 if (t && t->to_is_async_p)
3196 return (t->to_is_async_p) ();
3197 return 0;
3198 }
3199
3200 static int
3201 find_default_supports_non_stop (void)
3202 {
3203 struct target_ops *t;
3204
3205 t = find_default_run_target (NULL);
3206 if (t && t->to_supports_non_stop)
3207 return (t->to_supports_non_stop) ();
3208 return 0;
3209 }
3210
3211 int
3212 target_supports_non_stop (void)
3213 {
3214 struct target_ops *t;
3215
3216 for (t = &current_target; t != NULL; t = t->beneath)
3217 if (t->to_supports_non_stop)
3218 return t->to_supports_non_stop ();
3219
3220 return 0;
3221 }
3222
3223 /* Implement the "info proc" command. */
3224
3225 int
3226 target_info_proc (char *args, enum info_proc_what what)
3227 {
3228 struct target_ops *t;
3229
3230 /* If we're already connected to something that can get us OS
3231 related data, use it. Otherwise, try using the native
3232 target. */
3233 if (current_target.to_stratum >= process_stratum)
3234 t = current_target.beneath;
3235 else
3236 t = find_default_run_target (NULL);
3237
3238 for (; t != NULL; t = t->beneath)
3239 {
3240 if (t->to_info_proc != NULL)
3241 {
3242 t->to_info_proc (t, args, what);
3243
3244 if (targetdebug)
3245 fprintf_unfiltered (gdb_stdlog,
3246 "target_info_proc (\"%s\", %d)\n", args, what);
3247
3248 return 1;
3249 }
3250 }
3251
3252 return 0;
3253 }
3254
3255 static int
3256 find_default_supports_disable_randomization (void)
3257 {
3258 struct target_ops *t;
3259
3260 t = find_default_run_target (NULL);
3261 if (t && t->to_supports_disable_randomization)
3262 return (t->to_supports_disable_randomization) ();
3263 return 0;
3264 }
3265
3266 int
3267 target_supports_disable_randomization (void)
3268 {
3269 struct target_ops *t;
3270
3271 for (t = &current_target; t != NULL; t = t->beneath)
3272 if (t->to_supports_disable_randomization)
3273 return t->to_supports_disable_randomization ();
3274
3275 return 0;
3276 }
3277
3278 char *
3279 target_get_osdata (const char *type)
3280 {
3281 struct target_ops *t;
3282
3283 /* If we're already connected to something that can get us OS
3284 related data, use it. Otherwise, try using the native
3285 target. */
3286 if (current_target.to_stratum >= process_stratum)
3287 t = current_target.beneath;
3288 else
3289 t = find_default_run_target ("get OS data");
3290
3291 if (!t)
3292 return NULL;
3293
3294 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3295 }
3296
3297 /* Determine the current address space of thread PTID. */
3298
3299 struct address_space *
3300 target_thread_address_space (ptid_t ptid)
3301 {
3302 struct address_space *aspace;
3303 struct inferior *inf;
3304 struct target_ops *t;
3305
3306 for (t = current_target.beneath; t != NULL; t = t->beneath)
3307 {
3308 if (t->to_thread_address_space != NULL)
3309 {
3310 aspace = t->to_thread_address_space (t, ptid);
3311 gdb_assert (aspace);
3312
3313 if (targetdebug)
3314 fprintf_unfiltered (gdb_stdlog,
3315 "target_thread_address_space (%s) = %d\n",
3316 target_pid_to_str (ptid),
3317 address_space_num (aspace));
3318 return aspace;
3319 }
3320 }
3321
3322 /* Fall-back to the "main" address space of the inferior. */
3323 inf = find_inferior_pid (ptid_get_pid (ptid));
3324
3325 if (inf == NULL || inf->aspace == NULL)
3326 internal_error (__FILE__, __LINE__,
3327 _("Can't determine the current "
3328 "address space of thread %s\n"),
3329 target_pid_to_str (ptid));
3330
3331 return inf->aspace;
3332 }
3333
3334
3335 /* Target file operations. */
3336
3337 static struct target_ops *
3338 default_fileio_target (void)
3339 {
3340 /* If we're already connected to something that can perform
3341 file I/O, use it. Otherwise, try using the native target. */
3342 if (current_target.to_stratum >= process_stratum)
3343 return current_target.beneath;
3344 else
3345 return find_default_run_target ("file I/O");
3346 }
3347
3348 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3349 target file descriptor, or -1 if an error occurs (and set
3350 *TARGET_ERRNO). */
3351 int
3352 target_fileio_open (const char *filename, int flags, int mode,
3353 int *target_errno)
3354 {
3355 struct target_ops *t;
3356
3357 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3358 {
3359 if (t->to_fileio_open != NULL)
3360 {
3361 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3362
3363 if (targetdebug)
3364 fprintf_unfiltered (gdb_stdlog,
3365 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3366 filename, flags, mode,
3367 fd, fd != -1 ? 0 : *target_errno);
3368 return fd;
3369 }
3370 }
3371
3372 *target_errno = FILEIO_ENOSYS;
3373 return -1;
3374 }
3375
3376 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3377 Return the number of bytes written, or -1 if an error occurs
3378 (and set *TARGET_ERRNO). */
3379 int
3380 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3381 ULONGEST offset, int *target_errno)
3382 {
3383 struct target_ops *t;
3384
3385 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3386 {
3387 if (t->to_fileio_pwrite != NULL)
3388 {
3389 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3390 target_errno);
3391
3392 if (targetdebug)
3393 fprintf_unfiltered (gdb_stdlog,
3394 "target_fileio_pwrite (%d,...,%d,%s) "
3395 "= %d (%d)\n",
3396 fd, len, pulongest (offset),
3397 ret, ret != -1 ? 0 : *target_errno);
3398 return ret;
3399 }
3400 }
3401
3402 *target_errno = FILEIO_ENOSYS;
3403 return -1;
3404 }
3405
3406 /* Read up to LEN bytes FD on the target into READ_BUF.
3407 Return the number of bytes read, or -1 if an error occurs
3408 (and set *TARGET_ERRNO). */
3409 int
3410 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3411 ULONGEST offset, int *target_errno)
3412 {
3413 struct target_ops *t;
3414
3415 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3416 {
3417 if (t->to_fileio_pread != NULL)
3418 {
3419 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3420 target_errno);
3421
3422 if (targetdebug)
3423 fprintf_unfiltered (gdb_stdlog,
3424 "target_fileio_pread (%d,...,%d,%s) "
3425 "= %d (%d)\n",
3426 fd, len, pulongest (offset),
3427 ret, ret != -1 ? 0 : *target_errno);
3428 return ret;
3429 }
3430 }
3431
3432 *target_errno = FILEIO_ENOSYS;
3433 return -1;
3434 }
3435
3436 /* Close FD on the target. Return 0, or -1 if an error occurs
3437 (and set *TARGET_ERRNO). */
3438 int
3439 target_fileio_close (int fd, int *target_errno)
3440 {
3441 struct target_ops *t;
3442
3443 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3444 {
3445 if (t->to_fileio_close != NULL)
3446 {
3447 int ret = t->to_fileio_close (fd, target_errno);
3448
3449 if (targetdebug)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "target_fileio_close (%d) = %d (%d)\n",
3452 fd, ret, ret != -1 ? 0 : *target_errno);
3453 return ret;
3454 }
3455 }
3456
3457 *target_errno = FILEIO_ENOSYS;
3458 return -1;
3459 }
3460
3461 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3462 occurs (and set *TARGET_ERRNO). */
3463 int
3464 target_fileio_unlink (const char *filename, int *target_errno)
3465 {
3466 struct target_ops *t;
3467
3468 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3469 {
3470 if (t->to_fileio_unlink != NULL)
3471 {
3472 int ret = t->to_fileio_unlink (filename, target_errno);
3473
3474 if (targetdebug)
3475 fprintf_unfiltered (gdb_stdlog,
3476 "target_fileio_unlink (%s) = %d (%d)\n",
3477 filename, ret, ret != -1 ? 0 : *target_errno);
3478 return ret;
3479 }
3480 }
3481
3482 *target_errno = FILEIO_ENOSYS;
3483 return -1;
3484 }
3485
3486 /* Read value of symbolic link FILENAME on the target. Return a
3487 null-terminated string allocated via xmalloc, or NULL if an error
3488 occurs (and set *TARGET_ERRNO). */
3489 char *
3490 target_fileio_readlink (const char *filename, int *target_errno)
3491 {
3492 struct target_ops *t;
3493
3494 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3495 {
3496 if (t->to_fileio_readlink != NULL)
3497 {
3498 char *ret = t->to_fileio_readlink (filename, target_errno);
3499
3500 if (targetdebug)
3501 fprintf_unfiltered (gdb_stdlog,
3502 "target_fileio_readlink (%s) = %s (%d)\n",
3503 filename, ret? ret : "(nil)",
3504 ret? 0 : *target_errno);
3505 return ret;
3506 }
3507 }
3508
3509 *target_errno = FILEIO_ENOSYS;
3510 return NULL;
3511 }
3512
3513 static void
3514 target_fileio_close_cleanup (void *opaque)
3515 {
3516 int fd = *(int *) opaque;
3517 int target_errno;
3518
3519 target_fileio_close (fd, &target_errno);
3520 }
3521
3522 /* Read target file FILENAME. Store the result in *BUF_P and
3523 return the size of the transferred data. PADDING additional bytes are
3524 available in *BUF_P. This is a helper function for
3525 target_fileio_read_alloc; see the declaration of that function for more
3526 information. */
3527
3528 static LONGEST
3529 target_fileio_read_alloc_1 (const char *filename,
3530 gdb_byte **buf_p, int padding)
3531 {
3532 struct cleanup *close_cleanup;
3533 size_t buf_alloc, buf_pos;
3534 gdb_byte *buf;
3535 LONGEST n;
3536 int fd;
3537 int target_errno;
3538
3539 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3540 if (fd == -1)
3541 return -1;
3542
3543 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3544
3545 /* Start by reading up to 4K at a time. The target will throttle
3546 this number down if necessary. */
3547 buf_alloc = 4096;
3548 buf = xmalloc (buf_alloc);
3549 buf_pos = 0;
3550 while (1)
3551 {
3552 n = target_fileio_pread (fd, &buf[buf_pos],
3553 buf_alloc - buf_pos - padding, buf_pos,
3554 &target_errno);
3555 if (n < 0)
3556 {
3557 /* An error occurred. */
3558 do_cleanups (close_cleanup);
3559 xfree (buf);
3560 return -1;
3561 }
3562 else if (n == 0)
3563 {
3564 /* Read all there was. */
3565 do_cleanups (close_cleanup);
3566 if (buf_pos == 0)
3567 xfree (buf);
3568 else
3569 *buf_p = buf;
3570 return buf_pos;
3571 }
3572
3573 buf_pos += n;
3574
3575 /* If the buffer is filling up, expand it. */
3576 if (buf_alloc < buf_pos * 2)
3577 {
3578 buf_alloc *= 2;
3579 buf = xrealloc (buf, buf_alloc);
3580 }
3581
3582 QUIT;
3583 }
3584 }
3585
3586 /* Read target file FILENAME. Store the result in *BUF_P and return
3587 the size of the transferred data. See the declaration in "target.h"
3588 function for more information about the return value. */
3589
3590 LONGEST
3591 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3592 {
3593 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3594 }
3595
3596 /* Read target file FILENAME. The result is NUL-terminated and
3597 returned as a string, allocated using xmalloc. If an error occurs
3598 or the transfer is unsupported, NULL is returned. Empty objects
3599 are returned as allocated but empty strings. A warning is issued
3600 if the result contains any embedded NUL bytes. */
3601
3602 char *
3603 target_fileio_read_stralloc (const char *filename)
3604 {
3605 gdb_byte *buffer;
3606 char *bufstr;
3607 LONGEST i, transferred;
3608
3609 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3610 bufstr = (char *) buffer;
3611
3612 if (transferred < 0)
3613 return NULL;
3614
3615 if (transferred == 0)
3616 return xstrdup ("");
3617
3618 bufstr[transferred] = 0;
3619
3620 /* Check for embedded NUL bytes; but allow trailing NULs. */
3621 for (i = strlen (bufstr); i < transferred; i++)
3622 if (bufstr[i] != 0)
3623 {
3624 warning (_("target file %s "
3625 "contained unexpected null characters"),
3626 filename);
3627 break;
3628 }
3629
3630 return bufstr;
3631 }
3632
3633
3634 static int
3635 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3636 {
3637 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3638 }
3639
3640 static int
3641 default_watchpoint_addr_within_range (struct target_ops *target,
3642 CORE_ADDR addr,
3643 CORE_ADDR start, int length)
3644 {
3645 return addr >= start && addr < start + length;
3646 }
3647
3648 static struct gdbarch *
3649 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3650 {
3651 return target_gdbarch ();
3652 }
3653
3654 static int
3655 return_zero (void)
3656 {
3657 return 0;
3658 }
3659
3660 static int
3661 return_one (void)
3662 {
3663 return 1;
3664 }
3665
3666 static int
3667 return_minus_one (void)
3668 {
3669 return -1;
3670 }
3671
3672 static void *
3673 return_null (void)
3674 {
3675 return 0;
3676 }
3677
3678 /*
3679 * Find the next target down the stack from the specified target.
3680 */
3681
3682 struct target_ops *
3683 find_target_beneath (struct target_ops *t)
3684 {
3685 return t->beneath;
3686 }
3687
3688 \f
3689 /* The inferior process has died. Long live the inferior! */
3690
3691 void
3692 generic_mourn_inferior (void)
3693 {
3694 ptid_t ptid;
3695
3696 ptid = inferior_ptid;
3697 inferior_ptid = null_ptid;
3698
3699 /* Mark breakpoints uninserted in case something tries to delete a
3700 breakpoint while we delete the inferior's threads (which would
3701 fail, since the inferior is long gone). */
3702 mark_breakpoints_out ();
3703
3704 if (!ptid_equal (ptid, null_ptid))
3705 {
3706 int pid = ptid_get_pid (ptid);
3707 exit_inferior (pid);
3708 }
3709
3710 /* Note this wipes step-resume breakpoints, so needs to be done
3711 after exit_inferior, which ends up referencing the step-resume
3712 breakpoints through clear_thread_inferior_resources. */
3713 breakpoint_init_inferior (inf_exited);
3714
3715 registers_changed ();
3716
3717 reopen_exec_file ();
3718 reinit_frame_cache ();
3719
3720 if (deprecated_detach_hook)
3721 deprecated_detach_hook ();
3722 }
3723 \f
3724 /* Convert a normal process ID to a string. Returns the string in a
3725 static buffer. */
3726
3727 char *
3728 normal_pid_to_str (ptid_t ptid)
3729 {
3730 static char buf[32];
3731
3732 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3733 return buf;
3734 }
3735
3736 static char *
3737 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3738 {
3739 return normal_pid_to_str (ptid);
3740 }
3741
3742 /* Error-catcher for target_find_memory_regions. */
3743 static int
3744 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3745 {
3746 error (_("Command not implemented for this target."));
3747 return 0;
3748 }
3749
3750 /* Error-catcher for target_make_corefile_notes. */
3751 static char *
3752 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3753 {
3754 error (_("Command not implemented for this target."));
3755 return NULL;
3756 }
3757
3758 /* Error-catcher for target_get_bookmark. */
3759 static gdb_byte *
3760 dummy_get_bookmark (char *ignore1, int ignore2)
3761 {
3762 tcomplain ();
3763 return NULL;
3764 }
3765
3766 /* Error-catcher for target_goto_bookmark. */
3767 static void
3768 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3769 {
3770 tcomplain ();
3771 }
3772
3773 /* Set up the handful of non-empty slots needed by the dummy target
3774 vector. */
3775
3776 static void
3777 init_dummy_target (void)
3778 {
3779 dummy_target.to_shortname = "None";
3780 dummy_target.to_longname = "None";
3781 dummy_target.to_doc = "";
3782 dummy_target.to_attach = find_default_attach;
3783 dummy_target.to_detach =
3784 (void (*)(struct target_ops *, const char *, int))target_ignore;
3785 dummy_target.to_create_inferior = find_default_create_inferior;
3786 dummy_target.to_can_async_p = find_default_can_async_p;
3787 dummy_target.to_is_async_p = find_default_is_async_p;
3788 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3789 dummy_target.to_supports_disable_randomization
3790 = find_default_supports_disable_randomization;
3791 dummy_target.to_pid_to_str = dummy_pid_to_str;
3792 dummy_target.to_stratum = dummy_stratum;
3793 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3794 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3795 dummy_target.to_get_bookmark = dummy_get_bookmark;
3796 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3797 dummy_target.to_xfer_partial = default_xfer_partial;
3798 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3799 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3800 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3801 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3802 dummy_target.to_has_execution
3803 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3804 dummy_target.to_stopped_by_watchpoint = return_zero;
3805 dummy_target.to_stopped_data_address =
3806 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3807 dummy_target.to_magic = OPS_MAGIC;
3808 }
3809 \f
3810 static void
3811 debug_to_open (char *args, int from_tty)
3812 {
3813 debug_target.to_open (args, from_tty);
3814
3815 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3816 }
3817
3818 void
3819 target_close (struct target_ops *targ)
3820 {
3821 gdb_assert (!target_is_pushed (targ));
3822
3823 if (targ->to_xclose != NULL)
3824 targ->to_xclose (targ);
3825 else if (targ->to_close != NULL)
3826 targ->to_close ();
3827
3828 if (targetdebug)
3829 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3830 }
3831
3832 void
3833 target_attach (char *args, int from_tty)
3834 {
3835 struct target_ops *t;
3836
3837 for (t = current_target.beneath; t != NULL; t = t->beneath)
3838 {
3839 if (t->to_attach != NULL)
3840 {
3841 t->to_attach (t, args, from_tty);
3842 if (targetdebug)
3843 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3844 args, from_tty);
3845 return;
3846 }
3847 }
3848
3849 internal_error (__FILE__, __LINE__,
3850 _("could not find a target to attach"));
3851 }
3852
3853 int
3854 target_thread_alive (ptid_t ptid)
3855 {
3856 struct target_ops *t;
3857
3858 for (t = current_target.beneath; t != NULL; t = t->beneath)
3859 {
3860 if (t->to_thread_alive != NULL)
3861 {
3862 int retval;
3863
3864 retval = t->to_thread_alive (t, ptid);
3865 if (targetdebug)
3866 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3867 ptid_get_pid (ptid), retval);
3868
3869 return retval;
3870 }
3871 }
3872
3873 return 0;
3874 }
3875
3876 void
3877 target_find_new_threads (void)
3878 {
3879 struct target_ops *t;
3880
3881 for (t = current_target.beneath; t != NULL; t = t->beneath)
3882 {
3883 if (t->to_find_new_threads != NULL)
3884 {
3885 t->to_find_new_threads (t);
3886 if (targetdebug)
3887 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3888
3889 return;
3890 }
3891 }
3892 }
3893
3894 void
3895 target_stop (ptid_t ptid)
3896 {
3897 if (!may_stop)
3898 {
3899 warning (_("May not interrupt or stop the target, ignoring attempt"));
3900 return;
3901 }
3902
3903 (*current_target.to_stop) (ptid);
3904 }
3905
3906 static void
3907 debug_to_post_attach (int pid)
3908 {
3909 debug_target.to_post_attach (pid);
3910
3911 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3912 }
3913
3914 /* Concatenate ELEM to LIST, a comma separate list, and return the
3915 result. The LIST incoming argument is released. */
3916
3917 static char *
3918 str_comma_list_concat_elem (char *list, const char *elem)
3919 {
3920 if (list == NULL)
3921 return xstrdup (elem);
3922 else
3923 return reconcat (list, list, ", ", elem, (char *) NULL);
3924 }
3925
3926 /* Helper for target_options_to_string. If OPT is present in
3927 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3928 Returns the new resulting string. OPT is removed from
3929 TARGET_OPTIONS. */
3930
3931 static char *
3932 do_option (int *target_options, char *ret,
3933 int opt, char *opt_str)
3934 {
3935 if ((*target_options & opt) != 0)
3936 {
3937 ret = str_comma_list_concat_elem (ret, opt_str);
3938 *target_options &= ~opt;
3939 }
3940
3941 return ret;
3942 }
3943
3944 char *
3945 target_options_to_string (int target_options)
3946 {
3947 char *ret = NULL;
3948
3949 #define DO_TARG_OPTION(OPT) \
3950 ret = do_option (&target_options, ret, OPT, #OPT)
3951
3952 DO_TARG_OPTION (TARGET_WNOHANG);
3953
3954 if (target_options != 0)
3955 ret = str_comma_list_concat_elem (ret, "unknown???");
3956
3957 if (ret == NULL)
3958 ret = xstrdup ("");
3959 return ret;
3960 }
3961
3962 static void
3963 debug_print_register (const char * func,
3964 struct regcache *regcache, int regno)
3965 {
3966 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3967
3968 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3969 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3970 && gdbarch_register_name (gdbarch, regno) != NULL
3971 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3972 fprintf_unfiltered (gdb_stdlog, "(%s)",
3973 gdbarch_register_name (gdbarch, regno));
3974 else
3975 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3976 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3977 {
3978 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3979 int i, size = register_size (gdbarch, regno);
3980 gdb_byte buf[MAX_REGISTER_SIZE];
3981
3982 regcache_raw_collect (regcache, regno, buf);
3983 fprintf_unfiltered (gdb_stdlog, " = ");
3984 for (i = 0; i < size; i++)
3985 {
3986 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3987 }
3988 if (size <= sizeof (LONGEST))
3989 {
3990 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3991
3992 fprintf_unfiltered (gdb_stdlog, " %s %s",
3993 core_addr_to_string_nz (val), plongest (val));
3994 }
3995 }
3996 fprintf_unfiltered (gdb_stdlog, "\n");
3997 }
3998
3999 void
4000 target_fetch_registers (struct regcache *regcache, int regno)
4001 {
4002 struct target_ops *t;
4003
4004 for (t = current_target.beneath; t != NULL; t = t->beneath)
4005 {
4006 if (t->to_fetch_registers != NULL)
4007 {
4008 t->to_fetch_registers (t, regcache, regno);
4009 if (targetdebug)
4010 debug_print_register ("target_fetch_registers", regcache, regno);
4011 return;
4012 }
4013 }
4014 }
4015
4016 void
4017 target_store_registers (struct regcache *regcache, int regno)
4018 {
4019 struct target_ops *t;
4020
4021 if (!may_write_registers)
4022 error (_("Writing to registers is not allowed (regno %d)"), regno);
4023
4024 for (t = current_target.beneath; t != NULL; t = t->beneath)
4025 {
4026 if (t->to_store_registers != NULL)
4027 {
4028 t->to_store_registers (t, regcache, regno);
4029 if (targetdebug)
4030 {
4031 debug_print_register ("target_store_registers", regcache, regno);
4032 }
4033 return;
4034 }
4035 }
4036
4037 noprocess ();
4038 }
4039
4040 int
4041 target_core_of_thread (ptid_t ptid)
4042 {
4043 struct target_ops *t;
4044
4045 for (t = current_target.beneath; t != NULL; t = t->beneath)
4046 {
4047 if (t->to_core_of_thread != NULL)
4048 {
4049 int retval = t->to_core_of_thread (t, ptid);
4050
4051 if (targetdebug)
4052 fprintf_unfiltered (gdb_stdlog,
4053 "target_core_of_thread (%d) = %d\n",
4054 ptid_get_pid (ptid), retval);
4055 return retval;
4056 }
4057 }
4058
4059 return -1;
4060 }
4061
4062 int
4063 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4064 {
4065 struct target_ops *t;
4066
4067 for (t = current_target.beneath; t != NULL; t = t->beneath)
4068 {
4069 if (t->to_verify_memory != NULL)
4070 {
4071 int retval = t->to_verify_memory (t, data, memaddr, size);
4072
4073 if (targetdebug)
4074 fprintf_unfiltered (gdb_stdlog,
4075 "target_verify_memory (%s, %s) = %d\n",
4076 paddress (target_gdbarch (), memaddr),
4077 pulongest (size),
4078 retval);
4079 return retval;
4080 }
4081 }
4082
4083 tcomplain ();
4084 }
4085
4086 /* The documentation for this function is in its prototype declaration in
4087 target.h. */
4088
4089 int
4090 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4091 {
4092 struct target_ops *t;
4093
4094 for (t = current_target.beneath; t != NULL; t = t->beneath)
4095 if (t->to_insert_mask_watchpoint != NULL)
4096 {
4097 int ret;
4098
4099 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4100
4101 if (targetdebug)
4102 fprintf_unfiltered (gdb_stdlog, "\
4103 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4104 core_addr_to_string (addr),
4105 core_addr_to_string (mask), rw, ret);
4106
4107 return ret;
4108 }
4109
4110 return 1;
4111 }
4112
4113 /* The documentation for this function is in its prototype declaration in
4114 target.h. */
4115
4116 int
4117 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4118 {
4119 struct target_ops *t;
4120
4121 for (t = current_target.beneath; t != NULL; t = t->beneath)
4122 if (t->to_remove_mask_watchpoint != NULL)
4123 {
4124 int ret;
4125
4126 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4127
4128 if (targetdebug)
4129 fprintf_unfiltered (gdb_stdlog, "\
4130 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4131 core_addr_to_string (addr),
4132 core_addr_to_string (mask), rw, ret);
4133
4134 return ret;
4135 }
4136
4137 return 1;
4138 }
4139
4140 /* The documentation for this function is in its prototype declaration
4141 in target.h. */
4142
4143 int
4144 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4145 {
4146 struct target_ops *t;
4147
4148 for (t = current_target.beneath; t != NULL; t = t->beneath)
4149 if (t->to_masked_watch_num_registers != NULL)
4150 return t->to_masked_watch_num_registers (t, addr, mask);
4151
4152 return -1;
4153 }
4154
4155 /* The documentation for this function is in its prototype declaration
4156 in target.h. */
4157
4158 int
4159 target_ranged_break_num_registers (void)
4160 {
4161 struct target_ops *t;
4162
4163 for (t = current_target.beneath; t != NULL; t = t->beneath)
4164 if (t->to_ranged_break_num_registers != NULL)
4165 return t->to_ranged_break_num_registers (t);
4166
4167 return -1;
4168 }
4169
4170 /* See target.h. */
4171
4172 int
4173 target_supports_btrace (void)
4174 {
4175 struct target_ops *t;
4176
4177 for (t = current_target.beneath; t != NULL; t = t->beneath)
4178 if (t->to_supports_btrace != NULL)
4179 return t->to_supports_btrace ();
4180
4181 return 0;
4182 }
4183
4184 /* See target.h. */
4185
4186 struct btrace_target_info *
4187 target_enable_btrace (ptid_t ptid)
4188 {
4189 struct target_ops *t;
4190
4191 for (t = current_target.beneath; t != NULL; t = t->beneath)
4192 if (t->to_enable_btrace != NULL)
4193 return t->to_enable_btrace (ptid);
4194
4195 tcomplain ();
4196 return NULL;
4197 }
4198
4199 /* See target.h. */
4200
4201 void
4202 target_disable_btrace (struct btrace_target_info *btinfo)
4203 {
4204 struct target_ops *t;
4205
4206 for (t = current_target.beneath; t != NULL; t = t->beneath)
4207 if (t->to_disable_btrace != NULL)
4208 {
4209 t->to_disable_btrace (btinfo);
4210 return;
4211 }
4212
4213 tcomplain ();
4214 }
4215
4216 /* See target.h. */
4217
4218 void
4219 target_teardown_btrace (struct btrace_target_info *btinfo)
4220 {
4221 struct target_ops *t;
4222
4223 for (t = current_target.beneath; t != NULL; t = t->beneath)
4224 if (t->to_teardown_btrace != NULL)
4225 {
4226 t->to_teardown_btrace (btinfo);
4227 return;
4228 }
4229
4230 tcomplain ();
4231 }
4232
4233 /* See target.h. */
4234
4235 enum btrace_error
4236 target_read_btrace (VEC (btrace_block_s) **btrace,
4237 struct btrace_target_info *btinfo,
4238 enum btrace_read_type type)
4239 {
4240 struct target_ops *t;
4241
4242 for (t = current_target.beneath; t != NULL; t = t->beneath)
4243 if (t->to_read_btrace != NULL)
4244 return t->to_read_btrace (btrace, btinfo, type);
4245
4246 tcomplain ();
4247 return BTRACE_ERR_NOT_SUPPORTED;
4248 }
4249
4250 /* See target.h. */
4251
4252 void
4253 target_stop_recording (void)
4254 {
4255 struct target_ops *t;
4256
4257 for (t = current_target.beneath; t != NULL; t = t->beneath)
4258 if (t->to_stop_recording != NULL)
4259 {
4260 t->to_stop_recording ();
4261 return;
4262 }
4263
4264 /* This is optional. */
4265 }
4266
4267 /* See target.h. */
4268
4269 void
4270 target_info_record (void)
4271 {
4272 struct target_ops *t;
4273
4274 for (t = current_target.beneath; t != NULL; t = t->beneath)
4275 if (t->to_info_record != NULL)
4276 {
4277 t->to_info_record ();
4278 return;
4279 }
4280
4281 tcomplain ();
4282 }
4283
4284 /* See target.h. */
4285
4286 void
4287 target_save_record (const char *filename)
4288 {
4289 struct target_ops *t;
4290
4291 for (t = current_target.beneath; t != NULL; t = t->beneath)
4292 if (t->to_save_record != NULL)
4293 {
4294 t->to_save_record (filename);
4295 return;
4296 }
4297
4298 tcomplain ();
4299 }
4300
4301 /* See target.h. */
4302
4303 int
4304 target_supports_delete_record (void)
4305 {
4306 struct target_ops *t;
4307
4308 for (t = current_target.beneath; t != NULL; t = t->beneath)
4309 if (t->to_delete_record != NULL)
4310 return 1;
4311
4312 return 0;
4313 }
4314
4315 /* See target.h. */
4316
4317 void
4318 target_delete_record (void)
4319 {
4320 struct target_ops *t;
4321
4322 for (t = current_target.beneath; t != NULL; t = t->beneath)
4323 if (t->to_delete_record != NULL)
4324 {
4325 t->to_delete_record ();
4326 return;
4327 }
4328
4329 tcomplain ();
4330 }
4331
4332 /* See target.h. */
4333
4334 int
4335 target_record_is_replaying (void)
4336 {
4337 struct target_ops *t;
4338
4339 for (t = current_target.beneath; t != NULL; t = t->beneath)
4340 if (t->to_record_is_replaying != NULL)
4341 return t->to_record_is_replaying ();
4342
4343 return 0;
4344 }
4345
4346 /* See target.h. */
4347
4348 void
4349 target_goto_record_begin (void)
4350 {
4351 struct target_ops *t;
4352
4353 for (t = current_target.beneath; t != NULL; t = t->beneath)
4354 if (t->to_goto_record_begin != NULL)
4355 {
4356 t->to_goto_record_begin ();
4357 return;
4358 }
4359
4360 tcomplain ();
4361 }
4362
4363 /* See target.h. */
4364
4365 void
4366 target_goto_record_end (void)
4367 {
4368 struct target_ops *t;
4369
4370 for (t = current_target.beneath; t != NULL; t = t->beneath)
4371 if (t->to_goto_record_end != NULL)
4372 {
4373 t->to_goto_record_end ();
4374 return;
4375 }
4376
4377 tcomplain ();
4378 }
4379
4380 /* See target.h. */
4381
4382 void
4383 target_goto_record (ULONGEST insn)
4384 {
4385 struct target_ops *t;
4386
4387 for (t = current_target.beneath; t != NULL; t = t->beneath)
4388 if (t->to_goto_record != NULL)
4389 {
4390 t->to_goto_record (insn);
4391 return;
4392 }
4393
4394 tcomplain ();
4395 }
4396
4397 /* See target.h. */
4398
4399 void
4400 target_insn_history (int size, int flags)
4401 {
4402 struct target_ops *t;
4403
4404 for (t = current_target.beneath; t != NULL; t = t->beneath)
4405 if (t->to_insn_history != NULL)
4406 {
4407 t->to_insn_history (size, flags);
4408 return;
4409 }
4410
4411 tcomplain ();
4412 }
4413
4414 /* See target.h. */
4415
4416 void
4417 target_insn_history_from (ULONGEST from, int size, int flags)
4418 {
4419 struct target_ops *t;
4420
4421 for (t = current_target.beneath; t != NULL; t = t->beneath)
4422 if (t->to_insn_history_from != NULL)
4423 {
4424 t->to_insn_history_from (from, size, flags);
4425 return;
4426 }
4427
4428 tcomplain ();
4429 }
4430
4431 /* See target.h. */
4432
4433 void
4434 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4435 {
4436 struct target_ops *t;
4437
4438 for (t = current_target.beneath; t != NULL; t = t->beneath)
4439 if (t->to_insn_history_range != NULL)
4440 {
4441 t->to_insn_history_range (begin, end, flags);
4442 return;
4443 }
4444
4445 tcomplain ();
4446 }
4447
4448 /* See target.h. */
4449
4450 void
4451 target_call_history (int size, int flags)
4452 {
4453 struct target_ops *t;
4454
4455 for (t = current_target.beneath; t != NULL; t = t->beneath)
4456 if (t->to_call_history != NULL)
4457 {
4458 t->to_call_history (size, flags);
4459 return;
4460 }
4461
4462 tcomplain ();
4463 }
4464
4465 /* See target.h. */
4466
4467 void
4468 target_call_history_from (ULONGEST begin, int size, int flags)
4469 {
4470 struct target_ops *t;
4471
4472 for (t = current_target.beneath; t != NULL; t = t->beneath)
4473 if (t->to_call_history_from != NULL)
4474 {
4475 t->to_call_history_from (begin, size, flags);
4476 return;
4477 }
4478
4479 tcomplain ();
4480 }
4481
4482 /* See target.h. */
4483
4484 void
4485 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4486 {
4487 struct target_ops *t;
4488
4489 for (t = current_target.beneath; t != NULL; t = t->beneath)
4490 if (t->to_call_history_range != NULL)
4491 {
4492 t->to_call_history_range (begin, end, flags);
4493 return;
4494 }
4495
4496 tcomplain ();
4497 }
4498
4499 static void
4500 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4501 {
4502 debug_target.to_prepare_to_store (&debug_target, regcache);
4503
4504 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4505 }
4506
4507 /* See target.h. */
4508
4509 const struct frame_unwind *
4510 target_get_unwinder (void)
4511 {
4512 struct target_ops *t;
4513
4514 for (t = current_target.beneath; t != NULL; t = t->beneath)
4515 if (t->to_get_unwinder != NULL)
4516 return t->to_get_unwinder;
4517
4518 return NULL;
4519 }
4520
4521 /* See target.h. */
4522
4523 const struct frame_unwind *
4524 target_get_tailcall_unwinder (void)
4525 {
4526 struct target_ops *t;
4527
4528 for (t = current_target.beneath; t != NULL; t = t->beneath)
4529 if (t->to_get_tailcall_unwinder != NULL)
4530 return t->to_get_tailcall_unwinder;
4531
4532 return NULL;
4533 }
4534
4535 /* See target.h. */
4536
4537 CORE_ADDR
4538 forward_target_decr_pc_after_break (struct target_ops *ops,
4539 struct gdbarch *gdbarch)
4540 {
4541 for (; ops != NULL; ops = ops->beneath)
4542 if (ops->to_decr_pc_after_break != NULL)
4543 return ops->to_decr_pc_after_break (ops, gdbarch);
4544
4545 return gdbarch_decr_pc_after_break (gdbarch);
4546 }
4547
4548 /* See target.h. */
4549
4550 CORE_ADDR
4551 target_decr_pc_after_break (struct gdbarch *gdbarch)
4552 {
4553 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4554 }
4555
4556 static int
4557 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4558 int write, struct mem_attrib *attrib,
4559 struct target_ops *target)
4560 {
4561 int retval;
4562
4563 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4564 attrib, target);
4565
4566 fprintf_unfiltered (gdb_stdlog,
4567 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4568 paddress (target_gdbarch (), memaddr), len,
4569 write ? "write" : "read", retval);
4570
4571 if (retval > 0)
4572 {
4573 int i;
4574
4575 fputs_unfiltered (", bytes =", gdb_stdlog);
4576 for (i = 0; i < retval; i++)
4577 {
4578 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4579 {
4580 if (targetdebug < 2 && i > 0)
4581 {
4582 fprintf_unfiltered (gdb_stdlog, " ...");
4583 break;
4584 }
4585 fprintf_unfiltered (gdb_stdlog, "\n");
4586 }
4587
4588 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4589 }
4590 }
4591
4592 fputc_unfiltered ('\n', gdb_stdlog);
4593
4594 return retval;
4595 }
4596
4597 static void
4598 debug_to_files_info (struct target_ops *target)
4599 {
4600 debug_target.to_files_info (target);
4601
4602 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4603 }
4604
4605 static int
4606 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4607 struct bp_target_info *bp_tgt)
4608 {
4609 int retval;
4610
4611 retval = forward_target_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4612
4613 fprintf_unfiltered (gdb_stdlog,
4614 "target_insert_breakpoint (%s, xxx) = %ld\n",
4615 core_addr_to_string (bp_tgt->placed_address),
4616 (unsigned long) retval);
4617 return retval;
4618 }
4619
4620 static int
4621 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4622 struct bp_target_info *bp_tgt)
4623 {
4624 int retval;
4625
4626 retval = forward_target_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4627
4628 fprintf_unfiltered (gdb_stdlog,
4629 "target_remove_breakpoint (%s, xxx) = %ld\n",
4630 core_addr_to_string (bp_tgt->placed_address),
4631 (unsigned long) retval);
4632 return retval;
4633 }
4634
4635 static int
4636 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4637 {
4638 int retval;
4639
4640 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4641
4642 fprintf_unfiltered (gdb_stdlog,
4643 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4644 (unsigned long) type,
4645 (unsigned long) cnt,
4646 (unsigned long) from_tty,
4647 (unsigned long) retval);
4648 return retval;
4649 }
4650
4651 static int
4652 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4653 {
4654 CORE_ADDR retval;
4655
4656 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4657
4658 fprintf_unfiltered (gdb_stdlog,
4659 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4660 core_addr_to_string (addr), (unsigned long) len,
4661 core_addr_to_string (retval));
4662 return retval;
4663 }
4664
4665 static int
4666 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4667 struct expression *cond)
4668 {
4669 int retval;
4670
4671 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4672 rw, cond);
4673
4674 fprintf_unfiltered (gdb_stdlog,
4675 "target_can_accel_watchpoint_condition "
4676 "(%s, %d, %d, %s) = %ld\n",
4677 core_addr_to_string (addr), len, rw,
4678 host_address_to_string (cond), (unsigned long) retval);
4679 return retval;
4680 }
4681
4682 static int
4683 debug_to_stopped_by_watchpoint (void)
4684 {
4685 int retval;
4686
4687 retval = debug_target.to_stopped_by_watchpoint ();
4688
4689 fprintf_unfiltered (gdb_stdlog,
4690 "target_stopped_by_watchpoint () = %ld\n",
4691 (unsigned long) retval);
4692 return retval;
4693 }
4694
4695 static int
4696 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4697 {
4698 int retval;
4699
4700 retval = debug_target.to_stopped_data_address (target, addr);
4701
4702 fprintf_unfiltered (gdb_stdlog,
4703 "target_stopped_data_address ([%s]) = %ld\n",
4704 core_addr_to_string (*addr),
4705 (unsigned long)retval);
4706 return retval;
4707 }
4708
4709 static int
4710 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4711 CORE_ADDR addr,
4712 CORE_ADDR start, int length)
4713 {
4714 int retval;
4715
4716 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4717 start, length);
4718
4719 fprintf_filtered (gdb_stdlog,
4720 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4721 core_addr_to_string (addr), core_addr_to_string (start),
4722 length, retval);
4723 return retval;
4724 }
4725
4726 static int
4727 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4728 struct bp_target_info *bp_tgt)
4729 {
4730 int retval;
4731
4732 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4733
4734 fprintf_unfiltered (gdb_stdlog,
4735 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4736 core_addr_to_string (bp_tgt->placed_address),
4737 (unsigned long) retval);
4738 return retval;
4739 }
4740
4741 static int
4742 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4743 struct bp_target_info *bp_tgt)
4744 {
4745 int retval;
4746
4747 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4748
4749 fprintf_unfiltered (gdb_stdlog,
4750 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4751 core_addr_to_string (bp_tgt->placed_address),
4752 (unsigned long) retval);
4753 return retval;
4754 }
4755
4756 static int
4757 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4758 struct expression *cond)
4759 {
4760 int retval;
4761
4762 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4763
4764 fprintf_unfiltered (gdb_stdlog,
4765 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4766 core_addr_to_string (addr), len, type,
4767 host_address_to_string (cond), (unsigned long) retval);
4768 return retval;
4769 }
4770
4771 static int
4772 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4773 struct expression *cond)
4774 {
4775 int retval;
4776
4777 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4778
4779 fprintf_unfiltered (gdb_stdlog,
4780 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4781 core_addr_to_string (addr), len, type,
4782 host_address_to_string (cond), (unsigned long) retval);
4783 return retval;
4784 }
4785
4786 static void
4787 debug_to_terminal_init (void)
4788 {
4789 debug_target.to_terminal_init ();
4790
4791 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4792 }
4793
4794 static void
4795 debug_to_terminal_inferior (void)
4796 {
4797 debug_target.to_terminal_inferior ();
4798
4799 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4800 }
4801
4802 static void
4803 debug_to_terminal_ours_for_output (void)
4804 {
4805 debug_target.to_terminal_ours_for_output ();
4806
4807 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4808 }
4809
4810 static void
4811 debug_to_terminal_ours (void)
4812 {
4813 debug_target.to_terminal_ours ();
4814
4815 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4816 }
4817
4818 static void
4819 debug_to_terminal_save_ours (void)
4820 {
4821 debug_target.to_terminal_save_ours ();
4822
4823 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4824 }
4825
4826 static void
4827 debug_to_terminal_info (const char *arg, int from_tty)
4828 {
4829 debug_target.to_terminal_info (arg, from_tty);
4830
4831 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4832 from_tty);
4833 }
4834
4835 static void
4836 debug_to_load (char *args, int from_tty)
4837 {
4838 debug_target.to_load (args, from_tty);
4839
4840 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4841 }
4842
4843 static void
4844 debug_to_post_startup_inferior (ptid_t ptid)
4845 {
4846 debug_target.to_post_startup_inferior (ptid);
4847
4848 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4849 ptid_get_pid (ptid));
4850 }
4851
4852 static int
4853 debug_to_insert_fork_catchpoint (int pid)
4854 {
4855 int retval;
4856
4857 retval = debug_target.to_insert_fork_catchpoint (pid);
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4860 pid, retval);
4861
4862 return retval;
4863 }
4864
4865 static int
4866 debug_to_remove_fork_catchpoint (int pid)
4867 {
4868 int retval;
4869
4870 retval = debug_target.to_remove_fork_catchpoint (pid);
4871
4872 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4873 pid, retval);
4874
4875 return retval;
4876 }
4877
4878 static int
4879 debug_to_insert_vfork_catchpoint (int pid)
4880 {
4881 int retval;
4882
4883 retval = debug_target.to_insert_vfork_catchpoint (pid);
4884
4885 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4886 pid, retval);
4887
4888 return retval;
4889 }
4890
4891 static int
4892 debug_to_remove_vfork_catchpoint (int pid)
4893 {
4894 int retval;
4895
4896 retval = debug_target.to_remove_vfork_catchpoint (pid);
4897
4898 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4899 pid, retval);
4900
4901 return retval;
4902 }
4903
4904 static int
4905 debug_to_insert_exec_catchpoint (int pid)
4906 {
4907 int retval;
4908
4909 retval = debug_target.to_insert_exec_catchpoint (pid);
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4912 pid, retval);
4913
4914 return retval;
4915 }
4916
4917 static int
4918 debug_to_remove_exec_catchpoint (int pid)
4919 {
4920 int retval;
4921
4922 retval = debug_target.to_remove_exec_catchpoint (pid);
4923
4924 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4925 pid, retval);
4926
4927 return retval;
4928 }
4929
4930 static int
4931 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4932 {
4933 int has_exited;
4934
4935 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4936
4937 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4938 pid, wait_status, *exit_status, has_exited);
4939
4940 return has_exited;
4941 }
4942
4943 static int
4944 debug_to_can_run (void)
4945 {
4946 int retval;
4947
4948 retval = debug_target.to_can_run ();
4949
4950 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4951
4952 return retval;
4953 }
4954
4955 static struct gdbarch *
4956 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4957 {
4958 struct gdbarch *retval;
4959
4960 retval = debug_target.to_thread_architecture (ops, ptid);
4961
4962 fprintf_unfiltered (gdb_stdlog,
4963 "target_thread_architecture (%s) = %s [%s]\n",
4964 target_pid_to_str (ptid),
4965 host_address_to_string (retval),
4966 gdbarch_bfd_arch_info (retval)->printable_name);
4967 return retval;
4968 }
4969
4970 static void
4971 debug_to_stop (ptid_t ptid)
4972 {
4973 debug_target.to_stop (ptid);
4974
4975 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4976 target_pid_to_str (ptid));
4977 }
4978
4979 static void
4980 debug_to_rcmd (char *command,
4981 struct ui_file *outbuf)
4982 {
4983 debug_target.to_rcmd (command, outbuf);
4984 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4985 }
4986
4987 static char *
4988 debug_to_pid_to_exec_file (int pid)
4989 {
4990 char *exec_file;
4991
4992 exec_file = debug_target.to_pid_to_exec_file (pid);
4993
4994 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4995 pid, exec_file);
4996
4997 return exec_file;
4998 }
4999
5000 static void
5001 setup_target_debug (void)
5002 {
5003 memcpy (&debug_target, &current_target, sizeof debug_target);
5004
5005 current_target.to_open = debug_to_open;
5006 current_target.to_post_attach = debug_to_post_attach;
5007 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5008 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5009 current_target.to_files_info = debug_to_files_info;
5010 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5011 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5012 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5013 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5014 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5015 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5016 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5017 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5018 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5019 current_target.to_watchpoint_addr_within_range
5020 = debug_to_watchpoint_addr_within_range;
5021 current_target.to_region_ok_for_hw_watchpoint
5022 = debug_to_region_ok_for_hw_watchpoint;
5023 current_target.to_can_accel_watchpoint_condition
5024 = debug_to_can_accel_watchpoint_condition;
5025 current_target.to_terminal_init = debug_to_terminal_init;
5026 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5027 current_target.to_terminal_ours_for_output
5028 = debug_to_terminal_ours_for_output;
5029 current_target.to_terminal_ours = debug_to_terminal_ours;
5030 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5031 current_target.to_terminal_info = debug_to_terminal_info;
5032 current_target.to_load = debug_to_load;
5033 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5034 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5035 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5036 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5037 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5038 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5039 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5040 current_target.to_has_exited = debug_to_has_exited;
5041 current_target.to_can_run = debug_to_can_run;
5042 current_target.to_stop = debug_to_stop;
5043 current_target.to_rcmd = debug_to_rcmd;
5044 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5045 current_target.to_thread_architecture = debug_to_thread_architecture;
5046 }
5047 \f
5048
5049 static char targ_desc[] =
5050 "Names of targets and files being debugged.\nShows the entire \
5051 stack of targets currently in use (including the exec-file,\n\
5052 core-file, and process, if any), as well as the symbol file name.";
5053
5054 static void
5055 do_monitor_command (char *cmd,
5056 int from_tty)
5057 {
5058 if ((current_target.to_rcmd
5059 == (void (*) (char *, struct ui_file *)) tcomplain)
5060 || (current_target.to_rcmd == debug_to_rcmd
5061 && (debug_target.to_rcmd
5062 == (void (*) (char *, struct ui_file *)) tcomplain)))
5063 error (_("\"monitor\" command not supported by this target."));
5064 target_rcmd (cmd, gdb_stdtarg);
5065 }
5066
5067 /* Print the name of each layers of our target stack. */
5068
5069 static void
5070 maintenance_print_target_stack (char *cmd, int from_tty)
5071 {
5072 struct target_ops *t;
5073
5074 printf_filtered (_("The current target stack is:\n"));
5075
5076 for (t = target_stack; t != NULL; t = t->beneath)
5077 {
5078 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5079 }
5080 }
5081
5082 /* Controls if async mode is permitted. */
5083 int target_async_permitted = 0;
5084
5085 /* The set command writes to this variable. If the inferior is
5086 executing, target_async_permitted is *not* updated. */
5087 static int target_async_permitted_1 = 0;
5088
5089 static void
5090 set_target_async_command (char *args, int from_tty,
5091 struct cmd_list_element *c)
5092 {
5093 if (have_live_inferiors ())
5094 {
5095 target_async_permitted_1 = target_async_permitted;
5096 error (_("Cannot change this setting while the inferior is running."));
5097 }
5098
5099 target_async_permitted = target_async_permitted_1;
5100 }
5101
5102 static void
5103 show_target_async_command (struct ui_file *file, int from_tty,
5104 struct cmd_list_element *c,
5105 const char *value)
5106 {
5107 fprintf_filtered (file,
5108 _("Controlling the inferior in "
5109 "asynchronous mode is %s.\n"), value);
5110 }
5111
5112 /* Temporary copies of permission settings. */
5113
5114 static int may_write_registers_1 = 1;
5115 static int may_write_memory_1 = 1;
5116 static int may_insert_breakpoints_1 = 1;
5117 static int may_insert_tracepoints_1 = 1;
5118 static int may_insert_fast_tracepoints_1 = 1;
5119 static int may_stop_1 = 1;
5120
5121 /* Make the user-set values match the real values again. */
5122
5123 void
5124 update_target_permissions (void)
5125 {
5126 may_write_registers_1 = may_write_registers;
5127 may_write_memory_1 = may_write_memory;
5128 may_insert_breakpoints_1 = may_insert_breakpoints;
5129 may_insert_tracepoints_1 = may_insert_tracepoints;
5130 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5131 may_stop_1 = may_stop;
5132 }
5133
5134 /* The one function handles (most of) the permission flags in the same
5135 way. */
5136
5137 static void
5138 set_target_permissions (char *args, int from_tty,
5139 struct cmd_list_element *c)
5140 {
5141 if (target_has_execution)
5142 {
5143 update_target_permissions ();
5144 error (_("Cannot change this setting while the inferior is running."));
5145 }
5146
5147 /* Make the real values match the user-changed values. */
5148 may_write_registers = may_write_registers_1;
5149 may_insert_breakpoints = may_insert_breakpoints_1;
5150 may_insert_tracepoints = may_insert_tracepoints_1;
5151 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5152 may_stop = may_stop_1;
5153 update_observer_mode ();
5154 }
5155
5156 /* Set memory write permission independently of observer mode. */
5157
5158 static void
5159 set_write_memory_permission (char *args, int from_tty,
5160 struct cmd_list_element *c)
5161 {
5162 /* Make the real values match the user-changed values. */
5163 may_write_memory = may_write_memory_1;
5164 update_observer_mode ();
5165 }
5166
5167
5168 void
5169 initialize_targets (void)
5170 {
5171 init_dummy_target ();
5172 push_target (&dummy_target);
5173
5174 add_info ("target", target_info, targ_desc);
5175 add_info ("files", target_info, targ_desc);
5176
5177 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5178 Set target debugging."), _("\
5179 Show target debugging."), _("\
5180 When non-zero, target debugging is enabled. Higher numbers are more\n\
5181 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5182 command."),
5183 NULL,
5184 show_targetdebug,
5185 &setdebuglist, &showdebuglist);
5186
5187 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5188 &trust_readonly, _("\
5189 Set mode for reading from readonly sections."), _("\
5190 Show mode for reading from readonly sections."), _("\
5191 When this mode is on, memory reads from readonly sections (such as .text)\n\
5192 will be read from the object file instead of from the target. This will\n\
5193 result in significant performance improvement for remote targets."),
5194 NULL,
5195 show_trust_readonly,
5196 &setlist, &showlist);
5197
5198 add_com ("monitor", class_obscure, do_monitor_command,
5199 _("Send a command to the remote monitor (remote targets only)."));
5200
5201 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5202 _("Print the name of each layer of the internal target stack."),
5203 &maintenanceprintlist);
5204
5205 add_setshow_boolean_cmd ("target-async", no_class,
5206 &target_async_permitted_1, _("\
5207 Set whether gdb controls the inferior in asynchronous mode."), _("\
5208 Show whether gdb controls the inferior in asynchronous mode."), _("\
5209 Tells gdb whether to control the inferior in asynchronous mode."),
5210 set_target_async_command,
5211 show_target_async_command,
5212 &setlist,
5213 &showlist);
5214
5215 add_setshow_boolean_cmd ("may-write-registers", class_support,
5216 &may_write_registers_1, _("\
5217 Set permission to write into registers."), _("\
5218 Show permission to write into registers."), _("\
5219 When this permission is on, GDB may write into the target's registers.\n\
5220 Otherwise, any sort of write attempt will result in an error."),
5221 set_target_permissions, NULL,
5222 &setlist, &showlist);
5223
5224 add_setshow_boolean_cmd ("may-write-memory", class_support,
5225 &may_write_memory_1, _("\
5226 Set permission to write into target memory."), _("\
5227 Show permission to write into target memory."), _("\
5228 When this permission is on, GDB may write into the target's memory.\n\
5229 Otherwise, any sort of write attempt will result in an error."),
5230 set_write_memory_permission, NULL,
5231 &setlist, &showlist);
5232
5233 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5234 &may_insert_breakpoints_1, _("\
5235 Set permission to insert breakpoints in the target."), _("\
5236 Show permission to insert breakpoints in the target."), _("\
5237 When this permission is on, GDB may insert breakpoints in the program.\n\
5238 Otherwise, any sort of insertion attempt will result in an error."),
5239 set_target_permissions, NULL,
5240 &setlist, &showlist);
5241
5242 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5243 &may_insert_tracepoints_1, _("\
5244 Set permission to insert tracepoints in the target."), _("\
5245 Show permission to insert tracepoints in the target."), _("\
5246 When this permission is on, GDB may insert tracepoints in the program.\n\
5247 Otherwise, any sort of insertion attempt will result in an error."),
5248 set_target_permissions, NULL,
5249 &setlist, &showlist);
5250
5251 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5252 &may_insert_fast_tracepoints_1, _("\
5253 Set permission to insert fast tracepoints in the target."), _("\
5254 Show permission to insert fast tracepoints in the target."), _("\
5255 When this permission is on, GDB may insert fast tracepoints.\n\
5256 Otherwise, any sort of insertion attempt will result in an error."),
5257 set_target_permissions, NULL,
5258 &setlist, &showlist);
5259
5260 add_setshow_boolean_cmd ("may-interrupt", class_support,
5261 &may_stop_1, _("\
5262 Set permission to interrupt or signal the target."), _("\
5263 Show permission to interrupt or signal the target."), _("\
5264 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5265 Otherwise, any attempt to interrupt or stop will be ignored."),
5266 set_target_permissions, NULL,
5267 &setlist, &showlist);
5268 }
This page took 0.137975 seconds and 4 git commands to generate.