fix up cleanup handling in internal_vproblem
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (const char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_load (char *, int);
146
147 static int debug_to_can_run (void);
148
149 static void debug_to_stop (ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_index;
157 unsigned target_struct_allocsize;
158 #define DEFAULT_ALLOCSIZE 10
159
160 /* The initial current target, so that there is always a semi-valid
161 current target. */
162
163 static struct target_ops dummy_target;
164
165 /* Top of target stack. */
166
167 static struct target_ops *target_stack;
168
169 /* The target structure we are currently using to talk to a process
170 or file or whatever "inferior" we have. */
171
172 struct target_ops current_target;
173
174 /* Command list for target. */
175
176 static struct cmd_list_element *targetlist = NULL;
177
178 /* Nonzero if we should trust readonly sections from the
179 executable when reading memory. */
180
181 static int trust_readonly = 0;
182
183 /* Nonzero if we should show true memory content including
184 memory breakpoint inserted by gdb. */
185
186 static int show_memory_breakpoints = 0;
187
188 /* These globals control whether GDB attempts to perform these
189 operations; they are useful for targets that need to prevent
190 inadvertant disruption, such as in non-stop mode. */
191
192 int may_write_registers = 1;
193
194 int may_write_memory = 1;
195
196 int may_insert_breakpoints = 1;
197
198 int may_insert_tracepoints = 1;
199
200 int may_insert_fast_tracepoints = 1;
201
202 int may_stop = 1;
203
204 /* Non-zero if we want to see trace of target level stuff. */
205
206 static unsigned int targetdebug = 0;
207 static void
208 show_targetdebug (struct ui_file *file, int from_tty,
209 struct cmd_list_element *c, const char *value)
210 {
211 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
212 }
213
214 static void setup_target_debug (void);
215
216 /* The option sets this. */
217 static int stack_cache_enabled_p_1 = 1;
218 /* And set_stack_cache_enabled_p updates this.
219 The reason for the separation is so that we don't flush the cache for
220 on->on transitions. */
221 static int stack_cache_enabled_p = 1;
222
223 /* This is called *after* the stack-cache has been set.
224 Flush the cache for off->on and on->off transitions.
225 There's no real need to flush the cache for on->off transitions,
226 except cleanliness. */
227
228 static void
229 set_stack_cache_enabled_p (char *args, int from_tty,
230 struct cmd_list_element *c)
231 {
232 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
233 target_dcache_invalidate ();
234
235 stack_cache_enabled_p = stack_cache_enabled_p_1;
236 }
237
238 static void
239 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
240 struct cmd_list_element *c, const char *value)
241 {
242 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
243 }
244
245 /* Cache of memory operations, to speed up remote access. */
246 static DCACHE *target_dcache;
247
248 /* Invalidate the target dcache. */
249
250 void
251 target_dcache_invalidate (void)
252 {
253 dcache_invalidate (target_dcache);
254 }
255
256 /* The user just typed 'target' without the name of a target. */
257
258 static void
259 target_command (char *arg, int from_tty)
260 {
261 fputs_filtered ("Argument required (target name). Try `help target'\n",
262 gdb_stdout);
263 }
264
265 /* Default target_has_* methods for process_stratum targets. */
266
267 int
268 default_child_has_all_memory (struct target_ops *ops)
269 {
270 /* If no inferior selected, then we can't read memory here. */
271 if (ptid_equal (inferior_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277 int
278 default_child_has_memory (struct target_ops *ops)
279 {
280 /* If no inferior selected, then we can't read memory here. */
281 if (ptid_equal (inferior_ptid, null_ptid))
282 return 0;
283
284 return 1;
285 }
286
287 int
288 default_child_has_stack (struct target_ops *ops)
289 {
290 /* If no inferior selected, there's no stack. */
291 if (ptid_equal (inferior_ptid, null_ptid))
292 return 0;
293
294 return 1;
295 }
296
297 int
298 default_child_has_registers (struct target_ops *ops)
299 {
300 /* Can't read registers from no inferior. */
301 if (ptid_equal (inferior_ptid, null_ptid))
302 return 0;
303
304 return 1;
305 }
306
307 int
308 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
309 {
310 /* If there's no thread selected, then we can't make it run through
311 hoops. */
312 if (ptid_equal (the_ptid, null_ptid))
313 return 0;
314
315 return 1;
316 }
317
318
319 int
320 target_has_all_memory_1 (void)
321 {
322 struct target_ops *t;
323
324 for (t = current_target.beneath; t != NULL; t = t->beneath)
325 if (t->to_has_all_memory (t))
326 return 1;
327
328 return 0;
329 }
330
331 int
332 target_has_memory_1 (void)
333 {
334 struct target_ops *t;
335
336 for (t = current_target.beneath; t != NULL; t = t->beneath)
337 if (t->to_has_memory (t))
338 return 1;
339
340 return 0;
341 }
342
343 int
344 target_has_stack_1 (void)
345 {
346 struct target_ops *t;
347
348 for (t = current_target.beneath; t != NULL; t = t->beneath)
349 if (t->to_has_stack (t))
350 return 1;
351
352 return 0;
353 }
354
355 int
356 target_has_registers_1 (void)
357 {
358 struct target_ops *t;
359
360 for (t = current_target.beneath; t != NULL; t = t->beneath)
361 if (t->to_has_registers (t))
362 return 1;
363
364 return 0;
365 }
366
367 int
368 target_has_execution_1 (ptid_t the_ptid)
369 {
370 struct target_ops *t;
371
372 for (t = current_target.beneath; t != NULL; t = t->beneath)
373 if (t->to_has_execution (t, the_ptid))
374 return 1;
375
376 return 0;
377 }
378
379 int
380 target_has_execution_current (void)
381 {
382 return target_has_execution_1 (inferior_ptid);
383 }
384
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
388
389 void
390 add_target_with_completer (struct target_ops *t,
391 completer_ftype *completer)
392 {
393 struct cmd_list_element *c;
394
395 /* Provide default values for all "must have" methods. */
396 if (t->to_xfer_partial == NULL)
397 t->to_xfer_partial = default_xfer_partial;
398
399 if (t->to_has_all_memory == NULL)
400 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_memory == NULL)
403 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_stack == NULL)
406 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_registers == NULL)
409 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
410
411 if (t->to_has_execution == NULL)
412 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
413
414 if (!target_structs)
415 {
416 target_struct_allocsize = DEFAULT_ALLOCSIZE;
417 target_structs = (struct target_ops **) xmalloc
418 (target_struct_allocsize * sizeof (*target_structs));
419 }
420 if (target_struct_size >= target_struct_allocsize)
421 {
422 target_struct_allocsize *= 2;
423 target_structs = (struct target_ops **)
424 xrealloc ((char *) target_structs,
425 target_struct_allocsize * sizeof (*target_structs));
426 }
427 target_structs[target_struct_size++] = t;
428
429 if (targetlist == NULL)
430 add_prefix_cmd ("target", class_run, target_command, _("\
431 Connect to a target machine or process.\n\
432 The first argument is the type or protocol of the target machine.\n\
433 Remaining arguments are interpreted by the target protocol. For more\n\
434 information on the arguments for a particular protocol, type\n\
435 `help target ' followed by the protocol name."),
436 &targetlist, "target ", 0, &cmdlist);
437 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
438 &targetlist);
439 if (completer != NULL)
440 set_cmd_completer (c, completer);
441 }
442
443 /* Add a possible target architecture to the list. */
444
445 void
446 add_target (struct target_ops *t)
447 {
448 add_target_with_completer (t, NULL);
449 }
450
451 /* See target.h. */
452
453 void
454 add_deprecated_target_alias (struct target_ops *t, char *alias)
455 {
456 struct cmd_list_element *c;
457 char *alt;
458
459 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
460 see PR cli/15104. */
461 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
462 alt = xstrprintf ("target %s", t->to_shortname);
463 deprecate_cmd (c, alt);
464 }
465
466 /* Stub functions */
467
468 void
469 target_ignore (void)
470 {
471 }
472
473 void
474 target_kill (void)
475 {
476 struct target_ops *t;
477
478 for (t = current_target.beneath; t != NULL; t = t->beneath)
479 if (t->to_kill != NULL)
480 {
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
483
484 t->to_kill (t);
485 return;
486 }
487
488 noprocess ();
489 }
490
491 void
492 target_load (char *arg, int from_tty)
493 {
494 target_dcache_invalidate ();
495 (*current_target.to_load) (arg, from_tty);
496 }
497
498 void
499 target_create_inferior (char *exec_file, char *args,
500 char **env, int from_tty)
501 {
502 struct target_ops *t;
503
504 for (t = current_target.beneath; t != NULL; t = t->beneath)
505 {
506 if (t->to_create_inferior != NULL)
507 {
508 t->to_create_inferior (t, exec_file, args, env, from_tty);
509 if (targetdebug)
510 fprintf_unfiltered (gdb_stdlog,
511 "target_create_inferior (%s, %s, xxx, %d)\n",
512 exec_file, args, from_tty);
513 return;
514 }
515 }
516
517 internal_error (__FILE__, __LINE__,
518 _("could not find a target to create inferior"));
519 }
520
521 void
522 target_terminal_inferior (void)
523 {
524 /* A background resume (``run&'') should leave GDB in control of the
525 terminal. Use target_can_async_p, not target_is_async_p, since at
526 this point the target is not async yet. However, if sync_execution
527 is not set, we know it will become async prior to resume. */
528 if (target_can_async_p () && !sync_execution)
529 return;
530
531 /* If GDB is resuming the inferior in the foreground, install
532 inferior's terminal modes. */
533 (*current_target.to_terminal_inferior) ();
534 }
535
536 static int
537 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
538 struct target_ops *t)
539 {
540 errno = EIO; /* Can't read/write this location. */
541 return 0; /* No bytes handled. */
542 }
543
544 static void
545 tcomplain (void)
546 {
547 error (_("You can't do that when your target is `%s'"),
548 current_target.to_shortname);
549 }
550
551 void
552 noprocess (void)
553 {
554 error (_("You can't do that without a process to debug."));
555 }
556
557 static void
558 default_terminal_info (const char *args, int from_tty)
559 {
560 printf_unfiltered (_("No saved terminal information.\n"));
561 }
562
563 /* A default implementation for the to_get_ada_task_ptid target method.
564
565 This function builds the PTID by using both LWP and TID as part of
566 the PTID lwp and tid elements. The pid used is the pid of the
567 inferior_ptid. */
568
569 static ptid_t
570 default_get_ada_task_ptid (long lwp, long tid)
571 {
572 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
573 }
574
575 static enum exec_direction_kind
576 default_execution_direction (void)
577 {
578 if (!target_can_execute_reverse)
579 return EXEC_FORWARD;
580 else if (!target_can_async_p ())
581 return EXEC_FORWARD;
582 else
583 gdb_assert_not_reached ("\
584 to_execution_direction must be implemented for reverse async");
585 }
586
587 /* Go through the target stack from top to bottom, copying over zero
588 entries in current_target, then filling in still empty entries. In
589 effect, we are doing class inheritance through the pushed target
590 vectors.
591
592 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
593 is currently implemented, is that it discards any knowledge of
594 which target an inherited method originally belonged to.
595 Consequently, new new target methods should instead explicitly and
596 locally search the target stack for the target that can handle the
597 request. */
598
599 static void
600 update_current_target (void)
601 {
602 struct target_ops *t;
603
604 /* First, reset current's contents. */
605 memset (&current_target, 0, sizeof (current_target));
606
607 #define INHERIT(FIELD, TARGET) \
608 if (!current_target.FIELD) \
609 current_target.FIELD = (TARGET)->FIELD
610
611 for (t = target_stack; t; t = t->beneath)
612 {
613 INHERIT (to_shortname, t);
614 INHERIT (to_longname, t);
615 INHERIT (to_doc, t);
616 /* Do not inherit to_open. */
617 /* Do not inherit to_close. */
618 /* Do not inherit to_attach. */
619 INHERIT (to_post_attach, t);
620 INHERIT (to_attach_no_wait, t);
621 /* Do not inherit to_detach. */
622 /* Do not inherit to_disconnect. */
623 /* Do not inherit to_resume. */
624 /* Do not inherit to_wait. */
625 /* Do not inherit to_fetch_registers. */
626 /* Do not inherit to_store_registers. */
627 INHERIT (to_prepare_to_store, t);
628 INHERIT (deprecated_xfer_memory, t);
629 INHERIT (to_files_info, t);
630 INHERIT (to_insert_breakpoint, t);
631 INHERIT (to_remove_breakpoint, t);
632 INHERIT (to_can_use_hw_breakpoint, t);
633 INHERIT (to_insert_hw_breakpoint, t);
634 INHERIT (to_remove_hw_breakpoint, t);
635 /* Do not inherit to_ranged_break_num_registers. */
636 INHERIT (to_insert_watchpoint, t);
637 INHERIT (to_remove_watchpoint, t);
638 /* Do not inherit to_insert_mask_watchpoint. */
639 /* Do not inherit to_remove_mask_watchpoint. */
640 INHERIT (to_stopped_data_address, t);
641 INHERIT (to_have_steppable_watchpoint, t);
642 INHERIT (to_have_continuable_watchpoint, t);
643 INHERIT (to_stopped_by_watchpoint, t);
644 INHERIT (to_watchpoint_addr_within_range, t);
645 INHERIT (to_region_ok_for_hw_watchpoint, t);
646 INHERIT (to_can_accel_watchpoint_condition, t);
647 /* Do not inherit to_masked_watch_num_registers. */
648 INHERIT (to_terminal_init, t);
649 INHERIT (to_terminal_inferior, t);
650 INHERIT (to_terminal_ours_for_output, t);
651 INHERIT (to_terminal_ours, t);
652 INHERIT (to_terminal_save_ours, t);
653 INHERIT (to_terminal_info, t);
654 /* Do not inherit to_kill. */
655 INHERIT (to_load, t);
656 /* Do no inherit to_create_inferior. */
657 INHERIT (to_post_startup_inferior, t);
658 INHERIT (to_insert_fork_catchpoint, t);
659 INHERIT (to_remove_fork_catchpoint, t);
660 INHERIT (to_insert_vfork_catchpoint, t);
661 INHERIT (to_remove_vfork_catchpoint, t);
662 /* Do not inherit to_follow_fork. */
663 INHERIT (to_insert_exec_catchpoint, t);
664 INHERIT (to_remove_exec_catchpoint, t);
665 INHERIT (to_set_syscall_catchpoint, t);
666 INHERIT (to_has_exited, t);
667 /* Do not inherit to_mourn_inferior. */
668 INHERIT (to_can_run, t);
669 /* Do not inherit to_pass_signals. */
670 /* Do not inherit to_program_signals. */
671 /* Do not inherit to_thread_alive. */
672 /* Do not inherit to_find_new_threads. */
673 /* Do not inherit to_pid_to_str. */
674 INHERIT (to_extra_thread_info, t);
675 INHERIT (to_thread_name, t);
676 INHERIT (to_stop, t);
677 /* Do not inherit to_xfer_partial. */
678 INHERIT (to_rcmd, t);
679 INHERIT (to_pid_to_exec_file, t);
680 INHERIT (to_log_command, t);
681 INHERIT (to_stratum, t);
682 /* Do not inherit to_has_all_memory. */
683 /* Do not inherit to_has_memory. */
684 /* Do not inherit to_has_stack. */
685 /* Do not inherit to_has_registers. */
686 /* Do not inherit to_has_execution. */
687 INHERIT (to_has_thread_control, t);
688 INHERIT (to_can_async_p, t);
689 INHERIT (to_is_async_p, t);
690 INHERIT (to_async, t);
691 INHERIT (to_find_memory_regions, t);
692 INHERIT (to_make_corefile_notes, t);
693 INHERIT (to_get_bookmark, t);
694 INHERIT (to_goto_bookmark, t);
695 /* Do not inherit to_get_thread_local_address. */
696 INHERIT (to_can_execute_reverse, t);
697 INHERIT (to_execution_direction, t);
698 INHERIT (to_thread_architecture, t);
699 /* Do not inherit to_read_description. */
700 INHERIT (to_get_ada_task_ptid, t);
701 /* Do not inherit to_search_memory. */
702 INHERIT (to_supports_multi_process, t);
703 INHERIT (to_supports_enable_disable_tracepoint, t);
704 INHERIT (to_supports_string_tracing, t);
705 INHERIT (to_trace_init, t);
706 INHERIT (to_download_tracepoint, t);
707 INHERIT (to_can_download_tracepoint, t);
708 INHERIT (to_download_trace_state_variable, t);
709 INHERIT (to_enable_tracepoint, t);
710 INHERIT (to_disable_tracepoint, t);
711 INHERIT (to_trace_set_readonly_regions, t);
712 INHERIT (to_trace_start, t);
713 INHERIT (to_get_trace_status, t);
714 INHERIT (to_get_tracepoint_status, t);
715 INHERIT (to_trace_stop, t);
716 INHERIT (to_trace_find, t);
717 INHERIT (to_get_trace_state_variable_value, t);
718 INHERIT (to_save_trace_data, t);
719 INHERIT (to_upload_tracepoints, t);
720 INHERIT (to_upload_trace_state_variables, t);
721 INHERIT (to_get_raw_trace_data, t);
722 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
723 INHERIT (to_set_disconnected_tracing, t);
724 INHERIT (to_set_circular_trace_buffer, t);
725 INHERIT (to_set_trace_buffer_size, t);
726 INHERIT (to_set_trace_notes, t);
727 INHERIT (to_get_tib_address, t);
728 INHERIT (to_set_permissions, t);
729 INHERIT (to_static_tracepoint_marker_at, t);
730 INHERIT (to_static_tracepoint_markers_by_strid, t);
731 INHERIT (to_traceframe_info, t);
732 INHERIT (to_use_agent, t);
733 INHERIT (to_can_use_agent, t);
734 INHERIT (to_magic, t);
735 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
736 INHERIT (to_can_run_breakpoint_commands, t);
737 /* Do not inherit to_memory_map. */
738 /* Do not inherit to_flash_erase. */
739 /* Do not inherit to_flash_done. */
740 }
741 #undef INHERIT
742
743 /* Clean up a target struct so it no longer has any zero pointers in
744 it. Some entries are defaulted to a method that print an error,
745 others are hard-wired to a standard recursive default. */
746
747 #define de_fault(field, value) \
748 if (!current_target.field) \
749 current_target.field = value
750
751 de_fault (to_open,
752 (void (*) (char *, int))
753 tcomplain);
754 de_fault (to_close,
755 (void (*) (void))
756 target_ignore);
757 de_fault (to_post_attach,
758 (void (*) (int))
759 target_ignore);
760 de_fault (to_prepare_to_store,
761 (void (*) (struct regcache *))
762 noprocess);
763 de_fault (deprecated_xfer_memory,
764 (int (*) (CORE_ADDR, gdb_byte *, int, int,
765 struct mem_attrib *, struct target_ops *))
766 nomemory);
767 de_fault (to_files_info,
768 (void (*) (struct target_ops *))
769 target_ignore);
770 de_fault (to_insert_breakpoint,
771 memory_insert_breakpoint);
772 de_fault (to_remove_breakpoint,
773 memory_remove_breakpoint);
774 de_fault (to_can_use_hw_breakpoint,
775 (int (*) (int, int, int))
776 return_zero);
777 de_fault (to_insert_hw_breakpoint,
778 (int (*) (struct gdbarch *, struct bp_target_info *))
779 return_minus_one);
780 de_fault (to_remove_hw_breakpoint,
781 (int (*) (struct gdbarch *, struct bp_target_info *))
782 return_minus_one);
783 de_fault (to_insert_watchpoint,
784 (int (*) (CORE_ADDR, int, int, struct expression *))
785 return_minus_one);
786 de_fault (to_remove_watchpoint,
787 (int (*) (CORE_ADDR, int, int, struct expression *))
788 return_minus_one);
789 de_fault (to_stopped_by_watchpoint,
790 (int (*) (void))
791 return_zero);
792 de_fault (to_stopped_data_address,
793 (int (*) (struct target_ops *, CORE_ADDR *))
794 return_zero);
795 de_fault (to_watchpoint_addr_within_range,
796 default_watchpoint_addr_within_range);
797 de_fault (to_region_ok_for_hw_watchpoint,
798 default_region_ok_for_hw_watchpoint);
799 de_fault (to_can_accel_watchpoint_condition,
800 (int (*) (CORE_ADDR, int, int, struct expression *))
801 return_zero);
802 de_fault (to_terminal_init,
803 (void (*) (void))
804 target_ignore);
805 de_fault (to_terminal_inferior,
806 (void (*) (void))
807 target_ignore);
808 de_fault (to_terminal_ours_for_output,
809 (void (*) (void))
810 target_ignore);
811 de_fault (to_terminal_ours,
812 (void (*) (void))
813 target_ignore);
814 de_fault (to_terminal_save_ours,
815 (void (*) (void))
816 target_ignore);
817 de_fault (to_terminal_info,
818 default_terminal_info);
819 de_fault (to_load,
820 (void (*) (char *, int))
821 tcomplain);
822 de_fault (to_post_startup_inferior,
823 (void (*) (ptid_t))
824 target_ignore);
825 de_fault (to_insert_fork_catchpoint,
826 (int (*) (int))
827 return_one);
828 de_fault (to_remove_fork_catchpoint,
829 (int (*) (int))
830 return_one);
831 de_fault (to_insert_vfork_catchpoint,
832 (int (*) (int))
833 return_one);
834 de_fault (to_remove_vfork_catchpoint,
835 (int (*) (int))
836 return_one);
837 de_fault (to_insert_exec_catchpoint,
838 (int (*) (int))
839 return_one);
840 de_fault (to_remove_exec_catchpoint,
841 (int (*) (int))
842 return_one);
843 de_fault (to_set_syscall_catchpoint,
844 (int (*) (int, int, int, int, int *))
845 return_one);
846 de_fault (to_has_exited,
847 (int (*) (int, int, int *))
848 return_zero);
849 de_fault (to_can_run,
850 return_zero);
851 de_fault (to_extra_thread_info,
852 (char *(*) (struct thread_info *))
853 return_zero);
854 de_fault (to_thread_name,
855 (char *(*) (struct thread_info *))
856 return_zero);
857 de_fault (to_stop,
858 (void (*) (ptid_t))
859 target_ignore);
860 current_target.to_xfer_partial = current_xfer_partial;
861 de_fault (to_rcmd,
862 (void (*) (char *, struct ui_file *))
863 tcomplain);
864 de_fault (to_pid_to_exec_file,
865 (char *(*) (int))
866 return_zero);
867 de_fault (to_async,
868 (void (*) (void (*) (enum inferior_event_type, void*), void*))
869 tcomplain);
870 de_fault (to_thread_architecture,
871 default_thread_architecture);
872 current_target.to_read_description = NULL;
873 de_fault (to_get_ada_task_ptid,
874 (ptid_t (*) (long, long))
875 default_get_ada_task_ptid);
876 de_fault (to_supports_multi_process,
877 (int (*) (void))
878 return_zero);
879 de_fault (to_supports_enable_disable_tracepoint,
880 (int (*) (void))
881 return_zero);
882 de_fault (to_supports_string_tracing,
883 (int (*) (void))
884 return_zero);
885 de_fault (to_trace_init,
886 (void (*) (void))
887 tcomplain);
888 de_fault (to_download_tracepoint,
889 (void (*) (struct bp_location *))
890 tcomplain);
891 de_fault (to_can_download_tracepoint,
892 (int (*) (void))
893 return_zero);
894 de_fault (to_download_trace_state_variable,
895 (void (*) (struct trace_state_variable *))
896 tcomplain);
897 de_fault (to_enable_tracepoint,
898 (void (*) (struct bp_location *))
899 tcomplain);
900 de_fault (to_disable_tracepoint,
901 (void (*) (struct bp_location *))
902 tcomplain);
903 de_fault (to_trace_set_readonly_regions,
904 (void (*) (void))
905 tcomplain);
906 de_fault (to_trace_start,
907 (void (*) (void))
908 tcomplain);
909 de_fault (to_get_trace_status,
910 (int (*) (struct trace_status *))
911 return_minus_one);
912 de_fault (to_get_tracepoint_status,
913 (void (*) (struct breakpoint *, struct uploaded_tp *))
914 tcomplain);
915 de_fault (to_trace_stop,
916 (void (*) (void))
917 tcomplain);
918 de_fault (to_trace_find,
919 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
920 return_minus_one);
921 de_fault (to_get_trace_state_variable_value,
922 (int (*) (int, LONGEST *))
923 return_zero);
924 de_fault (to_save_trace_data,
925 (int (*) (const char *))
926 tcomplain);
927 de_fault (to_upload_tracepoints,
928 (int (*) (struct uploaded_tp **))
929 return_zero);
930 de_fault (to_upload_trace_state_variables,
931 (int (*) (struct uploaded_tsv **))
932 return_zero);
933 de_fault (to_get_raw_trace_data,
934 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
935 tcomplain);
936 de_fault (to_get_min_fast_tracepoint_insn_len,
937 (int (*) (void))
938 return_minus_one);
939 de_fault (to_set_disconnected_tracing,
940 (void (*) (int))
941 target_ignore);
942 de_fault (to_set_circular_trace_buffer,
943 (void (*) (int))
944 target_ignore);
945 de_fault (to_set_trace_buffer_size,
946 (void (*) (LONGEST))
947 target_ignore);
948 de_fault (to_set_trace_notes,
949 (int (*) (const char *, const char *, const char *))
950 return_zero);
951 de_fault (to_get_tib_address,
952 (int (*) (ptid_t, CORE_ADDR *))
953 tcomplain);
954 de_fault (to_set_permissions,
955 (void (*) (void))
956 target_ignore);
957 de_fault (to_static_tracepoint_marker_at,
958 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
959 return_zero);
960 de_fault (to_static_tracepoint_markers_by_strid,
961 (VEC(static_tracepoint_marker_p) * (*) (const char *))
962 tcomplain);
963 de_fault (to_traceframe_info,
964 (struct traceframe_info * (*) (void))
965 tcomplain);
966 de_fault (to_supports_evaluation_of_breakpoint_conditions,
967 (int (*) (void))
968 return_zero);
969 de_fault (to_can_run_breakpoint_commands,
970 (int (*) (void))
971 return_zero);
972 de_fault (to_use_agent,
973 (int (*) (int))
974 tcomplain);
975 de_fault (to_can_use_agent,
976 (int (*) (void))
977 return_zero);
978 de_fault (to_execution_direction, default_execution_direction);
979
980 #undef de_fault
981
982 /* Finally, position the target-stack beneath the squashed
983 "current_target". That way code looking for a non-inherited
984 target method can quickly and simply find it. */
985 current_target.beneath = target_stack;
986
987 if (targetdebug)
988 setup_target_debug ();
989 }
990
991 /* Push a new target type into the stack of the existing target accessors,
992 possibly superseding some of the existing accessors.
993
994 Rather than allow an empty stack, we always have the dummy target at
995 the bottom stratum, so we can call the function vectors without
996 checking them. */
997
998 void
999 push_target (struct target_ops *t)
1000 {
1001 struct target_ops **cur;
1002
1003 /* Check magic number. If wrong, it probably means someone changed
1004 the struct definition, but not all the places that initialize one. */
1005 if (t->to_magic != OPS_MAGIC)
1006 {
1007 fprintf_unfiltered (gdb_stderr,
1008 "Magic number of %s target struct wrong\n",
1009 t->to_shortname);
1010 internal_error (__FILE__, __LINE__,
1011 _("failed internal consistency check"));
1012 }
1013
1014 /* Find the proper stratum to install this target in. */
1015 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1016 {
1017 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
1018 break;
1019 }
1020
1021 /* If there's already targets at this stratum, remove them. */
1022 /* FIXME: cagney/2003-10-15: I think this should be popping all
1023 targets to CUR, and not just those at this stratum level. */
1024 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
1025 {
1026 /* There's already something at this stratum level. Close it,
1027 and un-hook it from the stack. */
1028 struct target_ops *tmp = (*cur);
1029
1030 (*cur) = (*cur)->beneath;
1031 tmp->beneath = NULL;
1032 target_close (tmp);
1033 }
1034
1035 /* We have removed all targets in our stratum, now add the new one. */
1036 t->beneath = (*cur);
1037 (*cur) = t;
1038
1039 update_current_target ();
1040 }
1041
1042 /* Remove a target_ops vector from the stack, wherever it may be.
1043 Return how many times it was removed (0 or 1). */
1044
1045 int
1046 unpush_target (struct target_ops *t)
1047 {
1048 struct target_ops **cur;
1049 struct target_ops *tmp;
1050
1051 if (t->to_stratum == dummy_stratum)
1052 internal_error (__FILE__, __LINE__,
1053 _("Attempt to unpush the dummy target"));
1054
1055 /* Look for the specified target. Note that we assume that a target
1056 can only occur once in the target stack. */
1057
1058 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1059 {
1060 if ((*cur) == t)
1061 break;
1062 }
1063
1064 /* If we don't find target_ops, quit. Only open targets should be
1065 closed. */
1066 if ((*cur) == NULL)
1067 return 0;
1068
1069 /* Unchain the target. */
1070 tmp = (*cur);
1071 (*cur) = (*cur)->beneath;
1072 tmp->beneath = NULL;
1073
1074 update_current_target ();
1075
1076 /* Finally close the target. Note we do this after unchaining, so
1077 any target method calls from within the target_close
1078 implementation don't end up in T anymore. */
1079 target_close (t);
1080
1081 return 1;
1082 }
1083
1084 void
1085 pop_target (void)
1086 {
1087 target_close (target_stack); /* Let it clean up. */
1088 if (unpush_target (target_stack) == 1)
1089 return;
1090
1091 fprintf_unfiltered (gdb_stderr,
1092 "pop_target couldn't find target %s\n",
1093 current_target.to_shortname);
1094 internal_error (__FILE__, __LINE__,
1095 _("failed internal consistency check"));
1096 }
1097
1098 void
1099 pop_all_targets_above (enum strata above_stratum)
1100 {
1101 while ((int) (current_target.to_stratum) > (int) above_stratum)
1102 {
1103 target_close (target_stack);
1104 if (!unpush_target (target_stack))
1105 {
1106 fprintf_unfiltered (gdb_stderr,
1107 "pop_all_targets couldn't find target %s\n",
1108 target_stack->to_shortname);
1109 internal_error (__FILE__, __LINE__,
1110 _("failed internal consistency check"));
1111 break;
1112 }
1113 }
1114 }
1115
1116 void
1117 pop_all_targets (void)
1118 {
1119 pop_all_targets_above (dummy_stratum);
1120 }
1121
1122 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1123
1124 int
1125 target_is_pushed (struct target_ops *t)
1126 {
1127 struct target_ops **cur;
1128
1129 /* Check magic number. If wrong, it probably means someone changed
1130 the struct definition, but not all the places that initialize one. */
1131 if (t->to_magic != OPS_MAGIC)
1132 {
1133 fprintf_unfiltered (gdb_stderr,
1134 "Magic number of %s target struct wrong\n",
1135 t->to_shortname);
1136 internal_error (__FILE__, __LINE__,
1137 _("failed internal consistency check"));
1138 }
1139
1140 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1141 if (*cur == t)
1142 return 1;
1143
1144 return 0;
1145 }
1146
1147 /* Using the objfile specified in OBJFILE, find the address for the
1148 current thread's thread-local storage with offset OFFSET. */
1149 CORE_ADDR
1150 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1151 {
1152 volatile CORE_ADDR addr = 0;
1153 struct target_ops *target;
1154
1155 for (target = current_target.beneath;
1156 target != NULL;
1157 target = target->beneath)
1158 {
1159 if (target->to_get_thread_local_address != NULL)
1160 break;
1161 }
1162
1163 if (target != NULL
1164 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1165 {
1166 ptid_t ptid = inferior_ptid;
1167 volatile struct gdb_exception ex;
1168
1169 TRY_CATCH (ex, RETURN_MASK_ALL)
1170 {
1171 CORE_ADDR lm_addr;
1172
1173 /* Fetch the load module address for this objfile. */
1174 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1175 objfile);
1176 /* If it's 0, throw the appropriate exception. */
1177 if (lm_addr == 0)
1178 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1179 _("TLS load module not found"));
1180
1181 addr = target->to_get_thread_local_address (target, ptid,
1182 lm_addr, offset);
1183 }
1184 /* If an error occurred, print TLS related messages here. Otherwise,
1185 throw the error to some higher catcher. */
1186 if (ex.reason < 0)
1187 {
1188 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1189
1190 switch (ex.error)
1191 {
1192 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1193 error (_("Cannot find thread-local variables "
1194 "in this thread library."));
1195 break;
1196 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1197 if (objfile_is_library)
1198 error (_("Cannot find shared library `%s' in dynamic"
1199 " linker's load module list"), objfile->name);
1200 else
1201 error (_("Cannot find executable file `%s' in dynamic"
1202 " linker's load module list"), objfile->name);
1203 break;
1204 case TLS_NOT_ALLOCATED_YET_ERROR:
1205 if (objfile_is_library)
1206 error (_("The inferior has not yet allocated storage for"
1207 " thread-local variables in\n"
1208 "the shared library `%s'\n"
1209 "for %s"),
1210 objfile->name, target_pid_to_str (ptid));
1211 else
1212 error (_("The inferior has not yet allocated storage for"
1213 " thread-local variables in\n"
1214 "the executable `%s'\n"
1215 "for %s"),
1216 objfile->name, target_pid_to_str (ptid));
1217 break;
1218 case TLS_GENERIC_ERROR:
1219 if (objfile_is_library)
1220 error (_("Cannot find thread-local storage for %s, "
1221 "shared library %s:\n%s"),
1222 target_pid_to_str (ptid),
1223 objfile->name, ex.message);
1224 else
1225 error (_("Cannot find thread-local storage for %s, "
1226 "executable file %s:\n%s"),
1227 target_pid_to_str (ptid),
1228 objfile->name, ex.message);
1229 break;
1230 default:
1231 throw_exception (ex);
1232 break;
1233 }
1234 }
1235 }
1236 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1237 TLS is an ABI-specific thing. But we don't do that yet. */
1238 else
1239 error (_("Cannot find thread-local variables on this target"));
1240
1241 return addr;
1242 }
1243
1244 #undef MIN
1245 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1246
1247 /* target_read_string -- read a null terminated string, up to LEN bytes,
1248 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1249 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1250 is responsible for freeing it. Return the number of bytes successfully
1251 read. */
1252
1253 int
1254 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1255 {
1256 int tlen, offset, i;
1257 gdb_byte buf[4];
1258 int errcode = 0;
1259 char *buffer;
1260 int buffer_allocated;
1261 char *bufptr;
1262 unsigned int nbytes_read = 0;
1263
1264 gdb_assert (string);
1265
1266 /* Small for testing. */
1267 buffer_allocated = 4;
1268 buffer = xmalloc (buffer_allocated);
1269 bufptr = buffer;
1270
1271 while (len > 0)
1272 {
1273 tlen = MIN (len, 4 - (memaddr & 3));
1274 offset = memaddr & 3;
1275
1276 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1277 if (errcode != 0)
1278 {
1279 /* The transfer request might have crossed the boundary to an
1280 unallocated region of memory. Retry the transfer, requesting
1281 a single byte. */
1282 tlen = 1;
1283 offset = 0;
1284 errcode = target_read_memory (memaddr, buf, 1);
1285 if (errcode != 0)
1286 goto done;
1287 }
1288
1289 if (bufptr - buffer + tlen > buffer_allocated)
1290 {
1291 unsigned int bytes;
1292
1293 bytes = bufptr - buffer;
1294 buffer_allocated *= 2;
1295 buffer = xrealloc (buffer, buffer_allocated);
1296 bufptr = buffer + bytes;
1297 }
1298
1299 for (i = 0; i < tlen; i++)
1300 {
1301 *bufptr++ = buf[i + offset];
1302 if (buf[i + offset] == '\000')
1303 {
1304 nbytes_read += i + 1;
1305 goto done;
1306 }
1307 }
1308
1309 memaddr += tlen;
1310 len -= tlen;
1311 nbytes_read += tlen;
1312 }
1313 done:
1314 *string = buffer;
1315 if (errnop != NULL)
1316 *errnop = errcode;
1317 return nbytes_read;
1318 }
1319
1320 struct target_section_table *
1321 target_get_section_table (struct target_ops *target)
1322 {
1323 struct target_ops *t;
1324
1325 if (targetdebug)
1326 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1327
1328 for (t = target; t != NULL; t = t->beneath)
1329 if (t->to_get_section_table != NULL)
1330 return (*t->to_get_section_table) (t);
1331
1332 return NULL;
1333 }
1334
1335 /* Find a section containing ADDR. */
1336
1337 struct target_section *
1338 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1339 {
1340 struct target_section_table *table = target_get_section_table (target);
1341 struct target_section *secp;
1342
1343 if (table == NULL)
1344 return NULL;
1345
1346 for (secp = table->sections; secp < table->sections_end; secp++)
1347 {
1348 if (addr >= secp->addr && addr < secp->endaddr)
1349 return secp;
1350 }
1351 return NULL;
1352 }
1353
1354 /* Read memory from the live target, even if currently inspecting a
1355 traceframe. The return is the same as that of target_read. */
1356
1357 static LONGEST
1358 target_read_live_memory (enum target_object object,
1359 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1360 {
1361 int ret;
1362 struct cleanup *cleanup;
1363
1364 /* Switch momentarily out of tfind mode so to access live memory.
1365 Note that this must not clear global state, such as the frame
1366 cache, which must still remain valid for the previous traceframe.
1367 We may be _building_ the frame cache at this point. */
1368 cleanup = make_cleanup_restore_traceframe_number ();
1369 set_traceframe_number (-1);
1370
1371 ret = target_read (current_target.beneath, object, NULL,
1372 myaddr, memaddr, len);
1373
1374 do_cleanups (cleanup);
1375 return ret;
1376 }
1377
1378 /* Using the set of read-only target sections of OPS, read live
1379 read-only memory. Note that the actual reads start from the
1380 top-most target again.
1381
1382 For interface/parameters/return description see target.h,
1383 to_xfer_partial. */
1384
1385 static LONGEST
1386 memory_xfer_live_readonly_partial (struct target_ops *ops,
1387 enum target_object object,
1388 gdb_byte *readbuf, ULONGEST memaddr,
1389 LONGEST len)
1390 {
1391 struct target_section *secp;
1392 struct target_section_table *table;
1393
1394 secp = target_section_by_addr (ops, memaddr);
1395 if (secp != NULL
1396 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1397 & SEC_READONLY))
1398 {
1399 struct target_section *p;
1400 ULONGEST memend = memaddr + len;
1401
1402 table = target_get_section_table (ops);
1403
1404 for (p = table->sections; p < table->sections_end; p++)
1405 {
1406 if (memaddr >= p->addr)
1407 {
1408 if (memend <= p->endaddr)
1409 {
1410 /* Entire transfer is within this section. */
1411 return target_read_live_memory (object, memaddr,
1412 readbuf, len);
1413 }
1414 else if (memaddr >= p->endaddr)
1415 {
1416 /* This section ends before the transfer starts. */
1417 continue;
1418 }
1419 else
1420 {
1421 /* This section overlaps the transfer. Just do half. */
1422 len = p->endaddr - memaddr;
1423 return target_read_live_memory (object, memaddr,
1424 readbuf, len);
1425 }
1426 }
1427 }
1428 }
1429
1430 return 0;
1431 }
1432
1433 /* Perform a partial memory transfer.
1434 For docs see target.h, to_xfer_partial. */
1435
1436 static LONGEST
1437 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1438 void *readbuf, const void *writebuf, ULONGEST memaddr,
1439 LONGEST len)
1440 {
1441 LONGEST res;
1442 int reg_len;
1443 struct mem_region *region;
1444 struct inferior *inf;
1445
1446 /* For accesses to unmapped overlay sections, read directly from
1447 files. Must do this first, as MEMADDR may need adjustment. */
1448 if (readbuf != NULL && overlay_debugging)
1449 {
1450 struct obj_section *section = find_pc_overlay (memaddr);
1451
1452 if (pc_in_unmapped_range (memaddr, section))
1453 {
1454 struct target_section_table *table
1455 = target_get_section_table (ops);
1456 const char *section_name = section->the_bfd_section->name;
1457
1458 memaddr = overlay_mapped_address (memaddr, section);
1459 return section_table_xfer_memory_partial (readbuf, writebuf,
1460 memaddr, len,
1461 table->sections,
1462 table->sections_end,
1463 section_name);
1464 }
1465 }
1466
1467 /* Try the executable files, if "trust-readonly-sections" is set. */
1468 if (readbuf != NULL && trust_readonly)
1469 {
1470 struct target_section *secp;
1471 struct target_section_table *table;
1472
1473 secp = target_section_by_addr (ops, memaddr);
1474 if (secp != NULL
1475 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1476 & SEC_READONLY))
1477 {
1478 table = target_get_section_table (ops);
1479 return section_table_xfer_memory_partial (readbuf, writebuf,
1480 memaddr, len,
1481 table->sections,
1482 table->sections_end,
1483 NULL);
1484 }
1485 }
1486
1487 /* If reading unavailable memory in the context of traceframes, and
1488 this address falls within a read-only section, fallback to
1489 reading from live memory. */
1490 if (readbuf != NULL && get_traceframe_number () != -1)
1491 {
1492 VEC(mem_range_s) *available;
1493
1494 /* If we fail to get the set of available memory, then the
1495 target does not support querying traceframe info, and so we
1496 attempt reading from the traceframe anyway (assuming the
1497 target implements the old QTro packet then). */
1498 if (traceframe_available_memory (&available, memaddr, len))
1499 {
1500 struct cleanup *old_chain;
1501
1502 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1503
1504 if (VEC_empty (mem_range_s, available)
1505 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1506 {
1507 /* Don't read into the traceframe's available
1508 memory. */
1509 if (!VEC_empty (mem_range_s, available))
1510 {
1511 LONGEST oldlen = len;
1512
1513 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1514 gdb_assert (len <= oldlen);
1515 }
1516
1517 do_cleanups (old_chain);
1518
1519 /* This goes through the topmost target again. */
1520 res = memory_xfer_live_readonly_partial (ops, object,
1521 readbuf, memaddr, len);
1522 if (res > 0)
1523 return res;
1524
1525 /* No use trying further, we know some memory starting
1526 at MEMADDR isn't available. */
1527 return -1;
1528 }
1529
1530 /* Don't try to read more than how much is available, in
1531 case the target implements the deprecated QTro packet to
1532 cater for older GDBs (the target's knowledge of read-only
1533 sections may be outdated by now). */
1534 len = VEC_index (mem_range_s, available, 0)->length;
1535
1536 do_cleanups (old_chain);
1537 }
1538 }
1539
1540 /* Try GDB's internal data cache. */
1541 region = lookup_mem_region (memaddr);
1542 /* region->hi == 0 means there's no upper bound. */
1543 if (memaddr + len < region->hi || region->hi == 0)
1544 reg_len = len;
1545 else
1546 reg_len = region->hi - memaddr;
1547
1548 switch (region->attrib.mode)
1549 {
1550 case MEM_RO:
1551 if (writebuf != NULL)
1552 return -1;
1553 break;
1554
1555 case MEM_WO:
1556 if (readbuf != NULL)
1557 return -1;
1558 break;
1559
1560 case MEM_FLASH:
1561 /* We only support writing to flash during "load" for now. */
1562 if (writebuf != NULL)
1563 error (_("Writing to flash memory forbidden in this context"));
1564 break;
1565
1566 case MEM_NONE:
1567 return -1;
1568 }
1569
1570 if (!ptid_equal (inferior_ptid, null_ptid))
1571 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1572 else
1573 inf = NULL;
1574
1575 if (inf != NULL
1576 /* The dcache reads whole cache lines; that doesn't play well
1577 with reading from a trace buffer, because reading outside of
1578 the collected memory range fails. */
1579 && get_traceframe_number () == -1
1580 && (region->attrib.cache
1581 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1582 {
1583 if (readbuf != NULL)
1584 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1585 reg_len, 0);
1586 else
1587 /* FIXME drow/2006-08-09: If we're going to preserve const
1588 correctness dcache_xfer_memory should take readbuf and
1589 writebuf. */
1590 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1591 (void *) writebuf,
1592 reg_len, 1);
1593 if (res <= 0)
1594 return -1;
1595 else
1596 return res;
1597 }
1598
1599 /* If none of those methods found the memory we wanted, fall back
1600 to a target partial transfer. Normally a single call to
1601 to_xfer_partial is enough; if it doesn't recognize an object
1602 it will call the to_xfer_partial of the next target down.
1603 But for memory this won't do. Memory is the only target
1604 object which can be read from more than one valid target.
1605 A core file, for instance, could have some of memory but
1606 delegate other bits to the target below it. So, we must
1607 manually try all targets. */
1608
1609 do
1610 {
1611 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1612 readbuf, writebuf, memaddr, reg_len);
1613 if (res > 0)
1614 break;
1615
1616 /* We want to continue past core files to executables, but not
1617 past a running target's memory. */
1618 if (ops->to_has_all_memory (ops))
1619 break;
1620
1621 ops = ops->beneath;
1622 }
1623 while (ops != NULL);
1624
1625 /* Make sure the cache gets updated no matter what - if we are writing
1626 to the stack. Even if this write is not tagged as such, we still need
1627 to update the cache. */
1628
1629 if (res > 0
1630 && inf != NULL
1631 && writebuf != NULL
1632 && !region->attrib.cache
1633 && stack_cache_enabled_p
1634 && object != TARGET_OBJECT_STACK_MEMORY)
1635 {
1636 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1637 }
1638
1639 /* If we still haven't got anything, return the last error. We
1640 give up. */
1641 return res;
1642 }
1643
1644 /* Perform a partial memory transfer. For docs see target.h,
1645 to_xfer_partial. */
1646
1647 static LONGEST
1648 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1649 void *readbuf, const void *writebuf, ULONGEST memaddr,
1650 LONGEST len)
1651 {
1652 int res;
1653
1654 /* Zero length requests are ok and require no work. */
1655 if (len == 0)
1656 return 0;
1657
1658 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1659 breakpoint insns, thus hiding out from higher layers whether
1660 there are software breakpoints inserted in the code stream. */
1661 if (readbuf != NULL)
1662 {
1663 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1664
1665 if (res > 0 && !show_memory_breakpoints)
1666 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1667 }
1668 else
1669 {
1670 void *buf;
1671 struct cleanup *old_chain;
1672
1673 buf = xmalloc (len);
1674 old_chain = make_cleanup (xfree, buf);
1675 memcpy (buf, writebuf, len);
1676
1677 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1678 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1679
1680 do_cleanups (old_chain);
1681 }
1682
1683 return res;
1684 }
1685
1686 static void
1687 restore_show_memory_breakpoints (void *arg)
1688 {
1689 show_memory_breakpoints = (uintptr_t) arg;
1690 }
1691
1692 struct cleanup *
1693 make_show_memory_breakpoints_cleanup (int show)
1694 {
1695 int current = show_memory_breakpoints;
1696
1697 show_memory_breakpoints = show;
1698 return make_cleanup (restore_show_memory_breakpoints,
1699 (void *) (uintptr_t) current);
1700 }
1701
1702 /* For docs see target.h, to_xfer_partial. */
1703
1704 static LONGEST
1705 target_xfer_partial (struct target_ops *ops,
1706 enum target_object object, const char *annex,
1707 void *readbuf, const void *writebuf,
1708 ULONGEST offset, LONGEST len)
1709 {
1710 LONGEST retval;
1711
1712 gdb_assert (ops->to_xfer_partial != NULL);
1713
1714 if (writebuf && !may_write_memory)
1715 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1716 core_addr_to_string_nz (offset), plongest (len));
1717
1718 /* If this is a memory transfer, let the memory-specific code
1719 have a look at it instead. Memory transfers are more
1720 complicated. */
1721 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1722 retval = memory_xfer_partial (ops, object, readbuf,
1723 writebuf, offset, len);
1724 else
1725 {
1726 enum target_object raw_object = object;
1727
1728 /* If this is a raw memory transfer, request the normal
1729 memory object from other layers. */
1730 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1731 raw_object = TARGET_OBJECT_MEMORY;
1732
1733 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1734 writebuf, offset, len);
1735 }
1736
1737 if (targetdebug)
1738 {
1739 const unsigned char *myaddr = NULL;
1740
1741 fprintf_unfiltered (gdb_stdlog,
1742 "%s:target_xfer_partial "
1743 "(%d, %s, %s, %s, %s, %s) = %s",
1744 ops->to_shortname,
1745 (int) object,
1746 (annex ? annex : "(null)"),
1747 host_address_to_string (readbuf),
1748 host_address_to_string (writebuf),
1749 core_addr_to_string_nz (offset),
1750 plongest (len), plongest (retval));
1751
1752 if (readbuf)
1753 myaddr = readbuf;
1754 if (writebuf)
1755 myaddr = writebuf;
1756 if (retval > 0 && myaddr != NULL)
1757 {
1758 int i;
1759
1760 fputs_unfiltered (", bytes =", gdb_stdlog);
1761 for (i = 0; i < retval; i++)
1762 {
1763 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1764 {
1765 if (targetdebug < 2 && i > 0)
1766 {
1767 fprintf_unfiltered (gdb_stdlog, " ...");
1768 break;
1769 }
1770 fprintf_unfiltered (gdb_stdlog, "\n");
1771 }
1772
1773 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1774 }
1775 }
1776
1777 fputc_unfiltered ('\n', gdb_stdlog);
1778 }
1779 return retval;
1780 }
1781
1782 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1783 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1784 if any error occurs.
1785
1786 If an error occurs, no guarantee is made about the contents of the data at
1787 MYADDR. In particular, the caller should not depend upon partial reads
1788 filling the buffer with good data. There is no way for the caller to know
1789 how much good data might have been transfered anyway. Callers that can
1790 deal with partial reads should call target_read (which will retry until
1791 it makes no progress, and then return how much was transferred). */
1792
1793 int
1794 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1795 {
1796 /* Dispatch to the topmost target, not the flattened current_target.
1797 Memory accesses check target->to_has_(all_)memory, and the
1798 flattened target doesn't inherit those. */
1799 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1800 myaddr, memaddr, len) == len)
1801 return 0;
1802 else
1803 return EIO;
1804 }
1805
1806 /* Like target_read_memory, but specify explicitly that this is a read from
1807 the target's stack. This may trigger different cache behavior. */
1808
1809 int
1810 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1811 {
1812 /* Dispatch to the topmost target, not the flattened current_target.
1813 Memory accesses check target->to_has_(all_)memory, and the
1814 flattened target doesn't inherit those. */
1815
1816 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1817 myaddr, memaddr, len) == len)
1818 return 0;
1819 else
1820 return EIO;
1821 }
1822
1823 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1824 Returns either 0 for success or an errno value if any error occurs.
1825 If an error occurs, no guarantee is made about how much data got written.
1826 Callers that can deal with partial writes should call target_write. */
1827
1828 int
1829 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1830 {
1831 /* Dispatch to the topmost target, not the flattened current_target.
1832 Memory accesses check target->to_has_(all_)memory, and the
1833 flattened target doesn't inherit those. */
1834 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1835 myaddr, memaddr, len) == len)
1836 return 0;
1837 else
1838 return EIO;
1839 }
1840
1841 /* Write LEN bytes from MYADDR to target raw memory at address
1842 MEMADDR. Returns either 0 for success or an errno value if any
1843 error occurs. If an error occurs, no guarantee is made about how
1844 much data got written. Callers that can deal with partial writes
1845 should call target_write. */
1846
1847 int
1848 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1849 {
1850 /* Dispatch to the topmost target, not the flattened current_target.
1851 Memory accesses check target->to_has_(all_)memory, and the
1852 flattened target doesn't inherit those. */
1853 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1854 myaddr, memaddr, len) == len)
1855 return 0;
1856 else
1857 return EIO;
1858 }
1859
1860 /* Fetch the target's memory map. */
1861
1862 VEC(mem_region_s) *
1863 target_memory_map (void)
1864 {
1865 VEC(mem_region_s) *result;
1866 struct mem_region *last_one, *this_one;
1867 int ix;
1868 struct target_ops *t;
1869
1870 if (targetdebug)
1871 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1872
1873 for (t = current_target.beneath; t != NULL; t = t->beneath)
1874 if (t->to_memory_map != NULL)
1875 break;
1876
1877 if (t == NULL)
1878 return NULL;
1879
1880 result = t->to_memory_map (t);
1881 if (result == NULL)
1882 return NULL;
1883
1884 qsort (VEC_address (mem_region_s, result),
1885 VEC_length (mem_region_s, result),
1886 sizeof (struct mem_region), mem_region_cmp);
1887
1888 /* Check that regions do not overlap. Simultaneously assign
1889 a numbering for the "mem" commands to use to refer to
1890 each region. */
1891 last_one = NULL;
1892 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1893 {
1894 this_one->number = ix;
1895
1896 if (last_one && last_one->hi > this_one->lo)
1897 {
1898 warning (_("Overlapping regions in memory map: ignoring"));
1899 VEC_free (mem_region_s, result);
1900 return NULL;
1901 }
1902 last_one = this_one;
1903 }
1904
1905 return result;
1906 }
1907
1908 void
1909 target_flash_erase (ULONGEST address, LONGEST length)
1910 {
1911 struct target_ops *t;
1912
1913 for (t = current_target.beneath; t != NULL; t = t->beneath)
1914 if (t->to_flash_erase != NULL)
1915 {
1916 if (targetdebug)
1917 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1918 hex_string (address), phex (length, 0));
1919 t->to_flash_erase (t, address, length);
1920 return;
1921 }
1922
1923 tcomplain ();
1924 }
1925
1926 void
1927 target_flash_done (void)
1928 {
1929 struct target_ops *t;
1930
1931 for (t = current_target.beneath; t != NULL; t = t->beneath)
1932 if (t->to_flash_done != NULL)
1933 {
1934 if (targetdebug)
1935 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1936 t->to_flash_done (t);
1937 return;
1938 }
1939
1940 tcomplain ();
1941 }
1942
1943 static void
1944 show_trust_readonly (struct ui_file *file, int from_tty,
1945 struct cmd_list_element *c, const char *value)
1946 {
1947 fprintf_filtered (file,
1948 _("Mode for reading from readonly sections is %s.\n"),
1949 value);
1950 }
1951
1952 /* More generic transfers. */
1953
1954 static LONGEST
1955 default_xfer_partial (struct target_ops *ops, enum target_object object,
1956 const char *annex, gdb_byte *readbuf,
1957 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1958 {
1959 if (object == TARGET_OBJECT_MEMORY
1960 && ops->deprecated_xfer_memory != NULL)
1961 /* If available, fall back to the target's
1962 "deprecated_xfer_memory" method. */
1963 {
1964 int xfered = -1;
1965
1966 errno = 0;
1967 if (writebuf != NULL)
1968 {
1969 void *buffer = xmalloc (len);
1970 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1971
1972 memcpy (buffer, writebuf, len);
1973 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1974 1/*write*/, NULL, ops);
1975 do_cleanups (cleanup);
1976 }
1977 if (readbuf != NULL)
1978 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1979 0/*read*/, NULL, ops);
1980 if (xfered > 0)
1981 return xfered;
1982 else if (xfered == 0 && errno == 0)
1983 /* "deprecated_xfer_memory" uses 0, cross checked against
1984 ERRNO as one indication of an error. */
1985 return 0;
1986 else
1987 return -1;
1988 }
1989 else if (ops->beneath != NULL)
1990 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1991 readbuf, writebuf, offset, len);
1992 else
1993 return -1;
1994 }
1995
1996 /* The xfer_partial handler for the topmost target. Unlike the default,
1997 it does not need to handle memory specially; it just passes all
1998 requests down the stack. */
1999
2000 static LONGEST
2001 current_xfer_partial (struct target_ops *ops, enum target_object object,
2002 const char *annex, gdb_byte *readbuf,
2003 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
2004 {
2005 if (ops->beneath != NULL)
2006 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2007 readbuf, writebuf, offset, len);
2008 else
2009 return -1;
2010 }
2011
2012 /* Target vector read/write partial wrapper functions. */
2013
2014 static LONGEST
2015 target_read_partial (struct target_ops *ops,
2016 enum target_object object,
2017 const char *annex, gdb_byte *buf,
2018 ULONGEST offset, LONGEST len)
2019 {
2020 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2021 }
2022
2023 static LONGEST
2024 target_write_partial (struct target_ops *ops,
2025 enum target_object object,
2026 const char *annex, const gdb_byte *buf,
2027 ULONGEST offset, LONGEST len)
2028 {
2029 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2030 }
2031
2032 /* Wrappers to perform the full transfer. */
2033
2034 /* For docs on target_read see target.h. */
2035
2036 LONGEST
2037 target_read (struct target_ops *ops,
2038 enum target_object object,
2039 const char *annex, gdb_byte *buf,
2040 ULONGEST offset, LONGEST len)
2041 {
2042 LONGEST xfered = 0;
2043
2044 while (xfered < len)
2045 {
2046 LONGEST xfer = target_read_partial (ops, object, annex,
2047 (gdb_byte *) buf + xfered,
2048 offset + xfered, len - xfered);
2049
2050 /* Call an observer, notifying them of the xfer progress? */
2051 if (xfer == 0)
2052 return xfered;
2053 if (xfer < 0)
2054 return -1;
2055 xfered += xfer;
2056 QUIT;
2057 }
2058 return len;
2059 }
2060
2061 /* Assuming that the entire [begin, end) range of memory cannot be
2062 read, try to read whatever subrange is possible to read.
2063
2064 The function returns, in RESULT, either zero or one memory block.
2065 If there's a readable subrange at the beginning, it is completely
2066 read and returned. Any further readable subrange will not be read.
2067 Otherwise, if there's a readable subrange at the end, it will be
2068 completely read and returned. Any readable subranges before it
2069 (obviously, not starting at the beginning), will be ignored. In
2070 other cases -- either no readable subrange, or readable subrange(s)
2071 that is neither at the beginning, or end, nothing is returned.
2072
2073 The purpose of this function is to handle a read across a boundary
2074 of accessible memory in a case when memory map is not available.
2075 The above restrictions are fine for this case, but will give
2076 incorrect results if the memory is 'patchy'. However, supporting
2077 'patchy' memory would require trying to read every single byte,
2078 and it seems unacceptable solution. Explicit memory map is
2079 recommended for this case -- and target_read_memory_robust will
2080 take care of reading multiple ranges then. */
2081
2082 static void
2083 read_whatever_is_readable (struct target_ops *ops,
2084 ULONGEST begin, ULONGEST end,
2085 VEC(memory_read_result_s) **result)
2086 {
2087 gdb_byte *buf = xmalloc (end - begin);
2088 ULONGEST current_begin = begin;
2089 ULONGEST current_end = end;
2090 int forward;
2091 memory_read_result_s r;
2092
2093 /* If we previously failed to read 1 byte, nothing can be done here. */
2094 if (end - begin <= 1)
2095 {
2096 xfree (buf);
2097 return;
2098 }
2099
2100 /* Check that either first or the last byte is readable, and give up
2101 if not. This heuristic is meant to permit reading accessible memory
2102 at the boundary of accessible region. */
2103 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2104 buf, begin, 1) == 1)
2105 {
2106 forward = 1;
2107 ++current_begin;
2108 }
2109 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2110 buf + (end-begin) - 1, end - 1, 1) == 1)
2111 {
2112 forward = 0;
2113 --current_end;
2114 }
2115 else
2116 {
2117 xfree (buf);
2118 return;
2119 }
2120
2121 /* Loop invariant is that the [current_begin, current_end) was previously
2122 found to be not readable as a whole.
2123
2124 Note loop condition -- if the range has 1 byte, we can't divide the range
2125 so there's no point trying further. */
2126 while (current_end - current_begin > 1)
2127 {
2128 ULONGEST first_half_begin, first_half_end;
2129 ULONGEST second_half_begin, second_half_end;
2130 LONGEST xfer;
2131 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2132
2133 if (forward)
2134 {
2135 first_half_begin = current_begin;
2136 first_half_end = middle;
2137 second_half_begin = middle;
2138 second_half_end = current_end;
2139 }
2140 else
2141 {
2142 first_half_begin = middle;
2143 first_half_end = current_end;
2144 second_half_begin = current_begin;
2145 second_half_end = middle;
2146 }
2147
2148 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2149 buf + (first_half_begin - begin),
2150 first_half_begin,
2151 first_half_end - first_half_begin);
2152
2153 if (xfer == first_half_end - first_half_begin)
2154 {
2155 /* This half reads up fine. So, the error must be in the
2156 other half. */
2157 current_begin = second_half_begin;
2158 current_end = second_half_end;
2159 }
2160 else
2161 {
2162 /* This half is not readable. Because we've tried one byte, we
2163 know some part of this half if actually redable. Go to the next
2164 iteration to divide again and try to read.
2165
2166 We don't handle the other half, because this function only tries
2167 to read a single readable subrange. */
2168 current_begin = first_half_begin;
2169 current_end = first_half_end;
2170 }
2171 }
2172
2173 if (forward)
2174 {
2175 /* The [begin, current_begin) range has been read. */
2176 r.begin = begin;
2177 r.end = current_begin;
2178 r.data = buf;
2179 }
2180 else
2181 {
2182 /* The [current_end, end) range has been read. */
2183 LONGEST rlen = end - current_end;
2184
2185 r.data = xmalloc (rlen);
2186 memcpy (r.data, buf + current_end - begin, rlen);
2187 r.begin = current_end;
2188 r.end = end;
2189 xfree (buf);
2190 }
2191 VEC_safe_push(memory_read_result_s, (*result), &r);
2192 }
2193
2194 void
2195 free_memory_read_result_vector (void *x)
2196 {
2197 VEC(memory_read_result_s) *v = x;
2198 memory_read_result_s *current;
2199 int ix;
2200
2201 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2202 {
2203 xfree (current->data);
2204 }
2205 VEC_free (memory_read_result_s, v);
2206 }
2207
2208 VEC(memory_read_result_s) *
2209 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2210 {
2211 VEC(memory_read_result_s) *result = 0;
2212
2213 LONGEST xfered = 0;
2214 while (xfered < len)
2215 {
2216 struct mem_region *region = lookup_mem_region (offset + xfered);
2217 LONGEST rlen;
2218
2219 /* If there is no explicit region, a fake one should be created. */
2220 gdb_assert (region);
2221
2222 if (region->hi == 0)
2223 rlen = len - xfered;
2224 else
2225 rlen = region->hi - offset;
2226
2227 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2228 {
2229 /* Cannot read this region. Note that we can end up here only
2230 if the region is explicitly marked inaccessible, or
2231 'inaccessible-by-default' is in effect. */
2232 xfered += rlen;
2233 }
2234 else
2235 {
2236 LONGEST to_read = min (len - xfered, rlen);
2237 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2238
2239 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2240 (gdb_byte *) buffer,
2241 offset + xfered, to_read);
2242 /* Call an observer, notifying them of the xfer progress? */
2243 if (xfer <= 0)
2244 {
2245 /* Got an error reading full chunk. See if maybe we can read
2246 some subrange. */
2247 xfree (buffer);
2248 read_whatever_is_readable (ops, offset + xfered,
2249 offset + xfered + to_read, &result);
2250 xfered += to_read;
2251 }
2252 else
2253 {
2254 struct memory_read_result r;
2255 r.data = buffer;
2256 r.begin = offset + xfered;
2257 r.end = r.begin + xfer;
2258 VEC_safe_push (memory_read_result_s, result, &r);
2259 xfered += xfer;
2260 }
2261 QUIT;
2262 }
2263 }
2264 return result;
2265 }
2266
2267
2268 /* An alternative to target_write with progress callbacks. */
2269
2270 LONGEST
2271 target_write_with_progress (struct target_ops *ops,
2272 enum target_object object,
2273 const char *annex, const gdb_byte *buf,
2274 ULONGEST offset, LONGEST len,
2275 void (*progress) (ULONGEST, void *), void *baton)
2276 {
2277 LONGEST xfered = 0;
2278
2279 /* Give the progress callback a chance to set up. */
2280 if (progress)
2281 (*progress) (0, baton);
2282
2283 while (xfered < len)
2284 {
2285 LONGEST xfer = target_write_partial (ops, object, annex,
2286 (gdb_byte *) buf + xfered,
2287 offset + xfered, len - xfered);
2288
2289 if (xfer == 0)
2290 return xfered;
2291 if (xfer < 0)
2292 return -1;
2293
2294 if (progress)
2295 (*progress) (xfer, baton);
2296
2297 xfered += xfer;
2298 QUIT;
2299 }
2300 return len;
2301 }
2302
2303 /* For docs on target_write see target.h. */
2304
2305 LONGEST
2306 target_write (struct target_ops *ops,
2307 enum target_object object,
2308 const char *annex, const gdb_byte *buf,
2309 ULONGEST offset, LONGEST len)
2310 {
2311 return target_write_with_progress (ops, object, annex, buf, offset, len,
2312 NULL, NULL);
2313 }
2314
2315 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2316 the size of the transferred data. PADDING additional bytes are
2317 available in *BUF_P. This is a helper function for
2318 target_read_alloc; see the declaration of that function for more
2319 information. */
2320
2321 static LONGEST
2322 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2323 const char *annex, gdb_byte **buf_p, int padding)
2324 {
2325 size_t buf_alloc, buf_pos;
2326 gdb_byte *buf;
2327 LONGEST n;
2328
2329 /* This function does not have a length parameter; it reads the
2330 entire OBJECT). Also, it doesn't support objects fetched partly
2331 from one target and partly from another (in a different stratum,
2332 e.g. a core file and an executable). Both reasons make it
2333 unsuitable for reading memory. */
2334 gdb_assert (object != TARGET_OBJECT_MEMORY);
2335
2336 /* Start by reading up to 4K at a time. The target will throttle
2337 this number down if necessary. */
2338 buf_alloc = 4096;
2339 buf = xmalloc (buf_alloc);
2340 buf_pos = 0;
2341 while (1)
2342 {
2343 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2344 buf_pos, buf_alloc - buf_pos - padding);
2345 if (n < 0)
2346 {
2347 /* An error occurred. */
2348 xfree (buf);
2349 return -1;
2350 }
2351 else if (n == 0)
2352 {
2353 /* Read all there was. */
2354 if (buf_pos == 0)
2355 xfree (buf);
2356 else
2357 *buf_p = buf;
2358 return buf_pos;
2359 }
2360
2361 buf_pos += n;
2362
2363 /* If the buffer is filling up, expand it. */
2364 if (buf_alloc < buf_pos * 2)
2365 {
2366 buf_alloc *= 2;
2367 buf = xrealloc (buf, buf_alloc);
2368 }
2369
2370 QUIT;
2371 }
2372 }
2373
2374 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2375 the size of the transferred data. See the declaration in "target.h"
2376 function for more information about the return value. */
2377
2378 LONGEST
2379 target_read_alloc (struct target_ops *ops, enum target_object object,
2380 const char *annex, gdb_byte **buf_p)
2381 {
2382 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2383 }
2384
2385 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2386 returned as a string, allocated using xmalloc. If an error occurs
2387 or the transfer is unsupported, NULL is returned. Empty objects
2388 are returned as allocated but empty strings. A warning is issued
2389 if the result contains any embedded NUL bytes. */
2390
2391 char *
2392 target_read_stralloc (struct target_ops *ops, enum target_object object,
2393 const char *annex)
2394 {
2395 gdb_byte *buffer;
2396 char *bufstr;
2397 LONGEST i, transferred;
2398
2399 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2400 bufstr = (char *) buffer;
2401
2402 if (transferred < 0)
2403 return NULL;
2404
2405 if (transferred == 0)
2406 return xstrdup ("");
2407
2408 bufstr[transferred] = 0;
2409
2410 /* Check for embedded NUL bytes; but allow trailing NULs. */
2411 for (i = strlen (bufstr); i < transferred; i++)
2412 if (bufstr[i] != 0)
2413 {
2414 warning (_("target object %d, annex %s, "
2415 "contained unexpected null characters"),
2416 (int) object, annex ? annex : "(none)");
2417 break;
2418 }
2419
2420 return bufstr;
2421 }
2422
2423 /* Memory transfer methods. */
2424
2425 void
2426 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2427 LONGEST len)
2428 {
2429 /* This method is used to read from an alternate, non-current
2430 target. This read must bypass the overlay support (as symbols
2431 don't match this target), and GDB's internal cache (wrong cache
2432 for this target). */
2433 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2434 != len)
2435 memory_error (EIO, addr);
2436 }
2437
2438 ULONGEST
2439 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2440 int len, enum bfd_endian byte_order)
2441 {
2442 gdb_byte buf[sizeof (ULONGEST)];
2443
2444 gdb_assert (len <= sizeof (buf));
2445 get_target_memory (ops, addr, buf, len);
2446 return extract_unsigned_integer (buf, len, byte_order);
2447 }
2448
2449 int
2450 target_insert_breakpoint (struct gdbarch *gdbarch,
2451 struct bp_target_info *bp_tgt)
2452 {
2453 if (!may_insert_breakpoints)
2454 {
2455 warning (_("May not insert breakpoints"));
2456 return 1;
2457 }
2458
2459 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2460 }
2461
2462 int
2463 target_remove_breakpoint (struct gdbarch *gdbarch,
2464 struct bp_target_info *bp_tgt)
2465 {
2466 /* This is kind of a weird case to handle, but the permission might
2467 have been changed after breakpoints were inserted - in which case
2468 we should just take the user literally and assume that any
2469 breakpoints should be left in place. */
2470 if (!may_insert_breakpoints)
2471 {
2472 warning (_("May not remove breakpoints"));
2473 return 1;
2474 }
2475
2476 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2477 }
2478
2479 static void
2480 target_info (char *args, int from_tty)
2481 {
2482 struct target_ops *t;
2483 int has_all_mem = 0;
2484
2485 if (symfile_objfile != NULL)
2486 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2487
2488 for (t = target_stack; t != NULL; t = t->beneath)
2489 {
2490 if (!(*t->to_has_memory) (t))
2491 continue;
2492
2493 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2494 continue;
2495 if (has_all_mem)
2496 printf_unfiltered (_("\tWhile running this, "
2497 "GDB does not access memory from...\n"));
2498 printf_unfiltered ("%s:\n", t->to_longname);
2499 (t->to_files_info) (t);
2500 has_all_mem = (*t->to_has_all_memory) (t);
2501 }
2502 }
2503
2504 /* This function is called before any new inferior is created, e.g.
2505 by running a program, attaching, or connecting to a target.
2506 It cleans up any state from previous invocations which might
2507 change between runs. This is a subset of what target_preopen
2508 resets (things which might change between targets). */
2509
2510 void
2511 target_pre_inferior (int from_tty)
2512 {
2513 /* Clear out solib state. Otherwise the solib state of the previous
2514 inferior might have survived and is entirely wrong for the new
2515 target. This has been observed on GNU/Linux using glibc 2.3. How
2516 to reproduce:
2517
2518 bash$ ./foo&
2519 [1] 4711
2520 bash$ ./foo&
2521 [1] 4712
2522 bash$ gdb ./foo
2523 [...]
2524 (gdb) attach 4711
2525 (gdb) detach
2526 (gdb) attach 4712
2527 Cannot access memory at address 0xdeadbeef
2528 */
2529
2530 /* In some OSs, the shared library list is the same/global/shared
2531 across inferiors. If code is shared between processes, so are
2532 memory regions and features. */
2533 if (!gdbarch_has_global_solist (target_gdbarch ()))
2534 {
2535 no_shared_libraries (NULL, from_tty);
2536
2537 invalidate_target_mem_regions ();
2538
2539 target_clear_description ();
2540 }
2541
2542 agent_capability_invalidate ();
2543 }
2544
2545 /* Callback for iterate_over_inferiors. Gets rid of the given
2546 inferior. */
2547
2548 static int
2549 dispose_inferior (struct inferior *inf, void *args)
2550 {
2551 struct thread_info *thread;
2552
2553 thread = any_thread_of_process (inf->pid);
2554 if (thread)
2555 {
2556 switch_to_thread (thread->ptid);
2557
2558 /* Core inferiors actually should be detached, not killed. */
2559 if (target_has_execution)
2560 target_kill ();
2561 else
2562 target_detach (NULL, 0);
2563 }
2564
2565 return 0;
2566 }
2567
2568 /* This is to be called by the open routine before it does
2569 anything. */
2570
2571 void
2572 target_preopen (int from_tty)
2573 {
2574 dont_repeat ();
2575
2576 if (have_inferiors ())
2577 {
2578 if (!from_tty
2579 || !have_live_inferiors ()
2580 || query (_("A program is being debugged already. Kill it? ")))
2581 iterate_over_inferiors (dispose_inferior, NULL);
2582 else
2583 error (_("Program not killed."));
2584 }
2585
2586 /* Calling target_kill may remove the target from the stack. But if
2587 it doesn't (which seems like a win for UDI), remove it now. */
2588 /* Leave the exec target, though. The user may be switching from a
2589 live process to a core of the same program. */
2590 pop_all_targets_above (file_stratum);
2591
2592 target_pre_inferior (from_tty);
2593 }
2594
2595 /* Detach a target after doing deferred register stores. */
2596
2597 void
2598 target_detach (char *args, int from_tty)
2599 {
2600 struct target_ops* t;
2601
2602 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2603 /* Don't remove global breakpoints here. They're removed on
2604 disconnection from the target. */
2605 ;
2606 else
2607 /* If we're in breakpoints-always-inserted mode, have to remove
2608 them before detaching. */
2609 remove_breakpoints_pid (PIDGET (inferior_ptid));
2610
2611 prepare_for_detach ();
2612
2613 for (t = current_target.beneath; t != NULL; t = t->beneath)
2614 {
2615 if (t->to_detach != NULL)
2616 {
2617 t->to_detach (t, args, from_tty);
2618 if (targetdebug)
2619 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2620 args, from_tty);
2621 return;
2622 }
2623 }
2624
2625 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2626 }
2627
2628 void
2629 target_disconnect (char *args, int from_tty)
2630 {
2631 struct target_ops *t;
2632
2633 /* If we're in breakpoints-always-inserted mode or if breakpoints
2634 are global across processes, we have to remove them before
2635 disconnecting. */
2636 remove_breakpoints ();
2637
2638 for (t = current_target.beneath; t != NULL; t = t->beneath)
2639 if (t->to_disconnect != NULL)
2640 {
2641 if (targetdebug)
2642 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2643 args, from_tty);
2644 t->to_disconnect (t, args, from_tty);
2645 return;
2646 }
2647
2648 tcomplain ();
2649 }
2650
2651 ptid_t
2652 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2653 {
2654 struct target_ops *t;
2655
2656 for (t = current_target.beneath; t != NULL; t = t->beneath)
2657 {
2658 if (t->to_wait != NULL)
2659 {
2660 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2661
2662 if (targetdebug)
2663 {
2664 char *status_string;
2665 char *options_string;
2666
2667 status_string = target_waitstatus_to_string (status);
2668 options_string = target_options_to_string (options);
2669 fprintf_unfiltered (gdb_stdlog,
2670 "target_wait (%d, status, options={%s})"
2671 " = %d, %s\n",
2672 PIDGET (ptid), options_string,
2673 PIDGET (retval), status_string);
2674 xfree (status_string);
2675 xfree (options_string);
2676 }
2677
2678 return retval;
2679 }
2680 }
2681
2682 noprocess ();
2683 }
2684
2685 char *
2686 target_pid_to_str (ptid_t ptid)
2687 {
2688 struct target_ops *t;
2689
2690 for (t = current_target.beneath; t != NULL; t = t->beneath)
2691 {
2692 if (t->to_pid_to_str != NULL)
2693 return (*t->to_pid_to_str) (t, ptid);
2694 }
2695
2696 return normal_pid_to_str (ptid);
2697 }
2698
2699 char *
2700 target_thread_name (struct thread_info *info)
2701 {
2702 struct target_ops *t;
2703
2704 for (t = current_target.beneath; t != NULL; t = t->beneath)
2705 {
2706 if (t->to_thread_name != NULL)
2707 return (*t->to_thread_name) (info);
2708 }
2709
2710 return NULL;
2711 }
2712
2713 void
2714 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2715 {
2716 struct target_ops *t;
2717
2718 target_dcache_invalidate ();
2719
2720 for (t = current_target.beneath; t != NULL; t = t->beneath)
2721 {
2722 if (t->to_resume != NULL)
2723 {
2724 t->to_resume (t, ptid, step, signal);
2725 if (targetdebug)
2726 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2727 PIDGET (ptid),
2728 step ? "step" : "continue",
2729 gdb_signal_to_name (signal));
2730
2731 registers_changed_ptid (ptid);
2732 set_executing (ptid, 1);
2733 set_running (ptid, 1);
2734 clear_inline_frame_state (ptid);
2735 return;
2736 }
2737 }
2738
2739 noprocess ();
2740 }
2741
2742 void
2743 target_pass_signals (int numsigs, unsigned char *pass_signals)
2744 {
2745 struct target_ops *t;
2746
2747 for (t = current_target.beneath; t != NULL; t = t->beneath)
2748 {
2749 if (t->to_pass_signals != NULL)
2750 {
2751 if (targetdebug)
2752 {
2753 int i;
2754
2755 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2756 numsigs);
2757
2758 for (i = 0; i < numsigs; i++)
2759 if (pass_signals[i])
2760 fprintf_unfiltered (gdb_stdlog, " %s",
2761 gdb_signal_to_name (i));
2762
2763 fprintf_unfiltered (gdb_stdlog, " })\n");
2764 }
2765
2766 (*t->to_pass_signals) (numsigs, pass_signals);
2767 return;
2768 }
2769 }
2770 }
2771
2772 void
2773 target_program_signals (int numsigs, unsigned char *program_signals)
2774 {
2775 struct target_ops *t;
2776
2777 for (t = current_target.beneath; t != NULL; t = t->beneath)
2778 {
2779 if (t->to_program_signals != NULL)
2780 {
2781 if (targetdebug)
2782 {
2783 int i;
2784
2785 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2786 numsigs);
2787
2788 for (i = 0; i < numsigs; i++)
2789 if (program_signals[i])
2790 fprintf_unfiltered (gdb_stdlog, " %s",
2791 gdb_signal_to_name (i));
2792
2793 fprintf_unfiltered (gdb_stdlog, " })\n");
2794 }
2795
2796 (*t->to_program_signals) (numsigs, program_signals);
2797 return;
2798 }
2799 }
2800 }
2801
2802 /* Look through the list of possible targets for a target that can
2803 follow forks. */
2804
2805 int
2806 target_follow_fork (int follow_child)
2807 {
2808 struct target_ops *t;
2809
2810 for (t = current_target.beneath; t != NULL; t = t->beneath)
2811 {
2812 if (t->to_follow_fork != NULL)
2813 {
2814 int retval = t->to_follow_fork (t, follow_child);
2815
2816 if (targetdebug)
2817 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2818 follow_child, retval);
2819 return retval;
2820 }
2821 }
2822
2823 /* Some target returned a fork event, but did not know how to follow it. */
2824 internal_error (__FILE__, __LINE__,
2825 _("could not find a target to follow fork"));
2826 }
2827
2828 void
2829 target_mourn_inferior (void)
2830 {
2831 struct target_ops *t;
2832
2833 for (t = current_target.beneath; t != NULL; t = t->beneath)
2834 {
2835 if (t->to_mourn_inferior != NULL)
2836 {
2837 t->to_mourn_inferior (t);
2838 if (targetdebug)
2839 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2840
2841 /* We no longer need to keep handles on any of the object files.
2842 Make sure to release them to avoid unnecessarily locking any
2843 of them while we're not actually debugging. */
2844 bfd_cache_close_all ();
2845
2846 return;
2847 }
2848 }
2849
2850 internal_error (__FILE__, __LINE__,
2851 _("could not find a target to follow mourn inferior"));
2852 }
2853
2854 /* Look for a target which can describe architectural features, starting
2855 from TARGET. If we find one, return its description. */
2856
2857 const struct target_desc *
2858 target_read_description (struct target_ops *target)
2859 {
2860 struct target_ops *t;
2861
2862 for (t = target; t != NULL; t = t->beneath)
2863 if (t->to_read_description != NULL)
2864 {
2865 const struct target_desc *tdesc;
2866
2867 tdesc = t->to_read_description (t);
2868 if (tdesc)
2869 return tdesc;
2870 }
2871
2872 return NULL;
2873 }
2874
2875 /* The default implementation of to_search_memory.
2876 This implements a basic search of memory, reading target memory and
2877 performing the search here (as opposed to performing the search in on the
2878 target side with, for example, gdbserver). */
2879
2880 int
2881 simple_search_memory (struct target_ops *ops,
2882 CORE_ADDR start_addr, ULONGEST search_space_len,
2883 const gdb_byte *pattern, ULONGEST pattern_len,
2884 CORE_ADDR *found_addrp)
2885 {
2886 /* NOTE: also defined in find.c testcase. */
2887 #define SEARCH_CHUNK_SIZE 16000
2888 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2889 /* Buffer to hold memory contents for searching. */
2890 gdb_byte *search_buf;
2891 unsigned search_buf_size;
2892 struct cleanup *old_cleanups;
2893
2894 search_buf_size = chunk_size + pattern_len - 1;
2895
2896 /* No point in trying to allocate a buffer larger than the search space. */
2897 if (search_space_len < search_buf_size)
2898 search_buf_size = search_space_len;
2899
2900 search_buf = malloc (search_buf_size);
2901 if (search_buf == NULL)
2902 error (_("Unable to allocate memory to perform the search."));
2903 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2904
2905 /* Prime the search buffer. */
2906
2907 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2908 search_buf, start_addr, search_buf_size) != search_buf_size)
2909 {
2910 warning (_("Unable to access %s bytes of target "
2911 "memory at %s, halting search."),
2912 pulongest (search_buf_size), hex_string (start_addr));
2913 do_cleanups (old_cleanups);
2914 return -1;
2915 }
2916
2917 /* Perform the search.
2918
2919 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2920 When we've scanned N bytes we copy the trailing bytes to the start and
2921 read in another N bytes. */
2922
2923 while (search_space_len >= pattern_len)
2924 {
2925 gdb_byte *found_ptr;
2926 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2927
2928 found_ptr = memmem (search_buf, nr_search_bytes,
2929 pattern, pattern_len);
2930
2931 if (found_ptr != NULL)
2932 {
2933 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2934
2935 *found_addrp = found_addr;
2936 do_cleanups (old_cleanups);
2937 return 1;
2938 }
2939
2940 /* Not found in this chunk, skip to next chunk. */
2941
2942 /* Don't let search_space_len wrap here, it's unsigned. */
2943 if (search_space_len >= chunk_size)
2944 search_space_len -= chunk_size;
2945 else
2946 search_space_len = 0;
2947
2948 if (search_space_len >= pattern_len)
2949 {
2950 unsigned keep_len = search_buf_size - chunk_size;
2951 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2952 int nr_to_read;
2953
2954 /* Copy the trailing part of the previous iteration to the front
2955 of the buffer for the next iteration. */
2956 gdb_assert (keep_len == pattern_len - 1);
2957 memcpy (search_buf, search_buf + chunk_size, keep_len);
2958
2959 nr_to_read = min (search_space_len - keep_len, chunk_size);
2960
2961 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2962 search_buf + keep_len, read_addr,
2963 nr_to_read) != nr_to_read)
2964 {
2965 warning (_("Unable to access %s bytes of target "
2966 "memory at %s, halting search."),
2967 plongest (nr_to_read),
2968 hex_string (read_addr));
2969 do_cleanups (old_cleanups);
2970 return -1;
2971 }
2972
2973 start_addr += chunk_size;
2974 }
2975 }
2976
2977 /* Not found. */
2978
2979 do_cleanups (old_cleanups);
2980 return 0;
2981 }
2982
2983 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2984 sequence of bytes in PATTERN with length PATTERN_LEN.
2985
2986 The result is 1 if found, 0 if not found, and -1 if there was an error
2987 requiring halting of the search (e.g. memory read error).
2988 If the pattern is found the address is recorded in FOUND_ADDRP. */
2989
2990 int
2991 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2992 const gdb_byte *pattern, ULONGEST pattern_len,
2993 CORE_ADDR *found_addrp)
2994 {
2995 struct target_ops *t;
2996 int found;
2997
2998 /* We don't use INHERIT to set current_target.to_search_memory,
2999 so we have to scan the target stack and handle targetdebug
3000 ourselves. */
3001
3002 if (targetdebug)
3003 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3004 hex_string (start_addr));
3005
3006 for (t = current_target.beneath; t != NULL; t = t->beneath)
3007 if (t->to_search_memory != NULL)
3008 break;
3009
3010 if (t != NULL)
3011 {
3012 found = t->to_search_memory (t, start_addr, search_space_len,
3013 pattern, pattern_len, found_addrp);
3014 }
3015 else
3016 {
3017 /* If a special version of to_search_memory isn't available, use the
3018 simple version. */
3019 found = simple_search_memory (current_target.beneath,
3020 start_addr, search_space_len,
3021 pattern, pattern_len, found_addrp);
3022 }
3023
3024 if (targetdebug)
3025 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3026
3027 return found;
3028 }
3029
3030 /* Look through the currently pushed targets. If none of them will
3031 be able to restart the currently running process, issue an error
3032 message. */
3033
3034 void
3035 target_require_runnable (void)
3036 {
3037 struct target_ops *t;
3038
3039 for (t = target_stack; t != NULL; t = t->beneath)
3040 {
3041 /* If this target knows how to create a new program, then
3042 assume we will still be able to after killing the current
3043 one. Either killing and mourning will not pop T, or else
3044 find_default_run_target will find it again. */
3045 if (t->to_create_inferior != NULL)
3046 return;
3047
3048 /* Do not worry about thread_stratum targets that can not
3049 create inferiors. Assume they will be pushed again if
3050 necessary, and continue to the process_stratum. */
3051 if (t->to_stratum == thread_stratum
3052 || t->to_stratum == arch_stratum)
3053 continue;
3054
3055 error (_("The \"%s\" target does not support \"run\". "
3056 "Try \"help target\" or \"continue\"."),
3057 t->to_shortname);
3058 }
3059
3060 /* This function is only called if the target is running. In that
3061 case there should have been a process_stratum target and it
3062 should either know how to create inferiors, or not... */
3063 internal_error (__FILE__, __LINE__, _("No targets found"));
3064 }
3065
3066 /* Look through the list of possible targets for a target that can
3067 execute a run or attach command without any other data. This is
3068 used to locate the default process stratum.
3069
3070 If DO_MESG is not NULL, the result is always valid (error() is
3071 called for errors); else, return NULL on error. */
3072
3073 static struct target_ops *
3074 find_default_run_target (char *do_mesg)
3075 {
3076 struct target_ops **t;
3077 struct target_ops *runable = NULL;
3078 int count;
3079
3080 count = 0;
3081
3082 for (t = target_structs; t < target_structs + target_struct_size;
3083 ++t)
3084 {
3085 if ((*t)->to_can_run && target_can_run (*t))
3086 {
3087 runable = *t;
3088 ++count;
3089 }
3090 }
3091
3092 if (count != 1)
3093 {
3094 if (do_mesg)
3095 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3096 else
3097 return NULL;
3098 }
3099
3100 return runable;
3101 }
3102
3103 void
3104 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3105 {
3106 struct target_ops *t;
3107
3108 t = find_default_run_target ("attach");
3109 (t->to_attach) (t, args, from_tty);
3110 return;
3111 }
3112
3113 void
3114 find_default_create_inferior (struct target_ops *ops,
3115 char *exec_file, char *allargs, char **env,
3116 int from_tty)
3117 {
3118 struct target_ops *t;
3119
3120 t = find_default_run_target ("run");
3121 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3122 return;
3123 }
3124
3125 static int
3126 find_default_can_async_p (void)
3127 {
3128 struct target_ops *t;
3129
3130 /* This may be called before the target is pushed on the stack;
3131 look for the default process stratum. If there's none, gdb isn't
3132 configured with a native debugger, and target remote isn't
3133 connected yet. */
3134 t = find_default_run_target (NULL);
3135 if (t && t->to_can_async_p)
3136 return (t->to_can_async_p) ();
3137 return 0;
3138 }
3139
3140 static int
3141 find_default_is_async_p (void)
3142 {
3143 struct target_ops *t;
3144
3145 /* This may be called before the target is pushed on the stack;
3146 look for the default process stratum. If there's none, gdb isn't
3147 configured with a native debugger, and target remote isn't
3148 connected yet. */
3149 t = find_default_run_target (NULL);
3150 if (t && t->to_is_async_p)
3151 return (t->to_is_async_p) ();
3152 return 0;
3153 }
3154
3155 static int
3156 find_default_supports_non_stop (void)
3157 {
3158 struct target_ops *t;
3159
3160 t = find_default_run_target (NULL);
3161 if (t && t->to_supports_non_stop)
3162 return (t->to_supports_non_stop) ();
3163 return 0;
3164 }
3165
3166 int
3167 target_supports_non_stop (void)
3168 {
3169 struct target_ops *t;
3170
3171 for (t = &current_target; t != NULL; t = t->beneath)
3172 if (t->to_supports_non_stop)
3173 return t->to_supports_non_stop ();
3174
3175 return 0;
3176 }
3177
3178 /* Implement the "info proc" command. */
3179
3180 int
3181 target_info_proc (char *args, enum info_proc_what what)
3182 {
3183 struct target_ops *t;
3184
3185 /* If we're already connected to something that can get us OS
3186 related data, use it. Otherwise, try using the native
3187 target. */
3188 if (current_target.to_stratum >= process_stratum)
3189 t = current_target.beneath;
3190 else
3191 t = find_default_run_target (NULL);
3192
3193 for (; t != NULL; t = t->beneath)
3194 {
3195 if (t->to_info_proc != NULL)
3196 {
3197 t->to_info_proc (t, args, what);
3198
3199 if (targetdebug)
3200 fprintf_unfiltered (gdb_stdlog,
3201 "target_info_proc (\"%s\", %d)\n", args, what);
3202
3203 return 1;
3204 }
3205 }
3206
3207 return 0;
3208 }
3209
3210 static int
3211 find_default_supports_disable_randomization (void)
3212 {
3213 struct target_ops *t;
3214
3215 t = find_default_run_target (NULL);
3216 if (t && t->to_supports_disable_randomization)
3217 return (t->to_supports_disable_randomization) ();
3218 return 0;
3219 }
3220
3221 int
3222 target_supports_disable_randomization (void)
3223 {
3224 struct target_ops *t;
3225
3226 for (t = &current_target; t != NULL; t = t->beneath)
3227 if (t->to_supports_disable_randomization)
3228 return t->to_supports_disable_randomization ();
3229
3230 return 0;
3231 }
3232
3233 char *
3234 target_get_osdata (const char *type)
3235 {
3236 struct target_ops *t;
3237
3238 /* If we're already connected to something that can get us OS
3239 related data, use it. Otherwise, try using the native
3240 target. */
3241 if (current_target.to_stratum >= process_stratum)
3242 t = current_target.beneath;
3243 else
3244 t = find_default_run_target ("get OS data");
3245
3246 if (!t)
3247 return NULL;
3248
3249 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3250 }
3251
3252 /* Determine the current address space of thread PTID. */
3253
3254 struct address_space *
3255 target_thread_address_space (ptid_t ptid)
3256 {
3257 struct address_space *aspace;
3258 struct inferior *inf;
3259 struct target_ops *t;
3260
3261 for (t = current_target.beneath; t != NULL; t = t->beneath)
3262 {
3263 if (t->to_thread_address_space != NULL)
3264 {
3265 aspace = t->to_thread_address_space (t, ptid);
3266 gdb_assert (aspace);
3267
3268 if (targetdebug)
3269 fprintf_unfiltered (gdb_stdlog,
3270 "target_thread_address_space (%s) = %d\n",
3271 target_pid_to_str (ptid),
3272 address_space_num (aspace));
3273 return aspace;
3274 }
3275 }
3276
3277 /* Fall-back to the "main" address space of the inferior. */
3278 inf = find_inferior_pid (ptid_get_pid (ptid));
3279
3280 if (inf == NULL || inf->aspace == NULL)
3281 internal_error (__FILE__, __LINE__,
3282 _("Can't determine the current "
3283 "address space of thread %s\n"),
3284 target_pid_to_str (ptid));
3285
3286 return inf->aspace;
3287 }
3288
3289
3290 /* Target file operations. */
3291
3292 static struct target_ops *
3293 default_fileio_target (void)
3294 {
3295 /* If we're already connected to something that can perform
3296 file I/O, use it. Otherwise, try using the native target. */
3297 if (current_target.to_stratum >= process_stratum)
3298 return current_target.beneath;
3299 else
3300 return find_default_run_target ("file I/O");
3301 }
3302
3303 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3304 target file descriptor, or -1 if an error occurs (and set
3305 *TARGET_ERRNO). */
3306 int
3307 target_fileio_open (const char *filename, int flags, int mode,
3308 int *target_errno)
3309 {
3310 struct target_ops *t;
3311
3312 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3313 {
3314 if (t->to_fileio_open != NULL)
3315 {
3316 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3317
3318 if (targetdebug)
3319 fprintf_unfiltered (gdb_stdlog,
3320 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3321 filename, flags, mode,
3322 fd, fd != -1 ? 0 : *target_errno);
3323 return fd;
3324 }
3325 }
3326
3327 *target_errno = FILEIO_ENOSYS;
3328 return -1;
3329 }
3330
3331 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3332 Return the number of bytes written, or -1 if an error occurs
3333 (and set *TARGET_ERRNO). */
3334 int
3335 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3336 ULONGEST offset, int *target_errno)
3337 {
3338 struct target_ops *t;
3339
3340 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3341 {
3342 if (t->to_fileio_pwrite != NULL)
3343 {
3344 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3345 target_errno);
3346
3347 if (targetdebug)
3348 fprintf_unfiltered (gdb_stdlog,
3349 "target_fileio_pwrite (%d,...,%d,%s) "
3350 "= %d (%d)\n",
3351 fd, len, pulongest (offset),
3352 ret, ret != -1 ? 0 : *target_errno);
3353 return ret;
3354 }
3355 }
3356
3357 *target_errno = FILEIO_ENOSYS;
3358 return -1;
3359 }
3360
3361 /* Read up to LEN bytes FD on the target into READ_BUF.
3362 Return the number of bytes read, or -1 if an error occurs
3363 (and set *TARGET_ERRNO). */
3364 int
3365 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3366 ULONGEST offset, int *target_errno)
3367 {
3368 struct target_ops *t;
3369
3370 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3371 {
3372 if (t->to_fileio_pread != NULL)
3373 {
3374 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3375 target_errno);
3376
3377 if (targetdebug)
3378 fprintf_unfiltered (gdb_stdlog,
3379 "target_fileio_pread (%d,...,%d,%s) "
3380 "= %d (%d)\n",
3381 fd, len, pulongest (offset),
3382 ret, ret != -1 ? 0 : *target_errno);
3383 return ret;
3384 }
3385 }
3386
3387 *target_errno = FILEIO_ENOSYS;
3388 return -1;
3389 }
3390
3391 /* Close FD on the target. Return 0, or -1 if an error occurs
3392 (and set *TARGET_ERRNO). */
3393 int
3394 target_fileio_close (int fd, int *target_errno)
3395 {
3396 struct target_ops *t;
3397
3398 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3399 {
3400 if (t->to_fileio_close != NULL)
3401 {
3402 int ret = t->to_fileio_close (fd, target_errno);
3403
3404 if (targetdebug)
3405 fprintf_unfiltered (gdb_stdlog,
3406 "target_fileio_close (%d) = %d (%d)\n",
3407 fd, ret, ret != -1 ? 0 : *target_errno);
3408 return ret;
3409 }
3410 }
3411
3412 *target_errno = FILEIO_ENOSYS;
3413 return -1;
3414 }
3415
3416 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3417 occurs (and set *TARGET_ERRNO). */
3418 int
3419 target_fileio_unlink (const char *filename, int *target_errno)
3420 {
3421 struct target_ops *t;
3422
3423 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3424 {
3425 if (t->to_fileio_unlink != NULL)
3426 {
3427 int ret = t->to_fileio_unlink (filename, target_errno);
3428
3429 if (targetdebug)
3430 fprintf_unfiltered (gdb_stdlog,
3431 "target_fileio_unlink (%s) = %d (%d)\n",
3432 filename, ret, ret != -1 ? 0 : *target_errno);
3433 return ret;
3434 }
3435 }
3436
3437 *target_errno = FILEIO_ENOSYS;
3438 return -1;
3439 }
3440
3441 /* Read value of symbolic link FILENAME on the target. Return a
3442 null-terminated string allocated via xmalloc, or NULL if an error
3443 occurs (and set *TARGET_ERRNO). */
3444 char *
3445 target_fileio_readlink (const char *filename, int *target_errno)
3446 {
3447 struct target_ops *t;
3448
3449 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3450 {
3451 if (t->to_fileio_readlink != NULL)
3452 {
3453 char *ret = t->to_fileio_readlink (filename, target_errno);
3454
3455 if (targetdebug)
3456 fprintf_unfiltered (gdb_stdlog,
3457 "target_fileio_readlink (%s) = %s (%d)\n",
3458 filename, ret? ret : "(nil)",
3459 ret? 0 : *target_errno);
3460 return ret;
3461 }
3462 }
3463
3464 *target_errno = FILEIO_ENOSYS;
3465 return NULL;
3466 }
3467
3468 static void
3469 target_fileio_close_cleanup (void *opaque)
3470 {
3471 int fd = *(int *) opaque;
3472 int target_errno;
3473
3474 target_fileio_close (fd, &target_errno);
3475 }
3476
3477 /* Read target file FILENAME. Store the result in *BUF_P and
3478 return the size of the transferred data. PADDING additional bytes are
3479 available in *BUF_P. This is a helper function for
3480 target_fileio_read_alloc; see the declaration of that function for more
3481 information. */
3482
3483 static LONGEST
3484 target_fileio_read_alloc_1 (const char *filename,
3485 gdb_byte **buf_p, int padding)
3486 {
3487 struct cleanup *close_cleanup;
3488 size_t buf_alloc, buf_pos;
3489 gdb_byte *buf;
3490 LONGEST n;
3491 int fd;
3492 int target_errno;
3493
3494 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3495 if (fd == -1)
3496 return -1;
3497
3498 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3499
3500 /* Start by reading up to 4K at a time. The target will throttle
3501 this number down if necessary. */
3502 buf_alloc = 4096;
3503 buf = xmalloc (buf_alloc);
3504 buf_pos = 0;
3505 while (1)
3506 {
3507 n = target_fileio_pread (fd, &buf[buf_pos],
3508 buf_alloc - buf_pos - padding, buf_pos,
3509 &target_errno);
3510 if (n < 0)
3511 {
3512 /* An error occurred. */
3513 do_cleanups (close_cleanup);
3514 xfree (buf);
3515 return -1;
3516 }
3517 else if (n == 0)
3518 {
3519 /* Read all there was. */
3520 do_cleanups (close_cleanup);
3521 if (buf_pos == 0)
3522 xfree (buf);
3523 else
3524 *buf_p = buf;
3525 return buf_pos;
3526 }
3527
3528 buf_pos += n;
3529
3530 /* If the buffer is filling up, expand it. */
3531 if (buf_alloc < buf_pos * 2)
3532 {
3533 buf_alloc *= 2;
3534 buf = xrealloc (buf, buf_alloc);
3535 }
3536
3537 QUIT;
3538 }
3539 }
3540
3541 /* Read target file FILENAME. Store the result in *BUF_P and return
3542 the size of the transferred data. See the declaration in "target.h"
3543 function for more information about the return value. */
3544
3545 LONGEST
3546 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3547 {
3548 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3549 }
3550
3551 /* Read target file FILENAME. The result is NUL-terminated and
3552 returned as a string, allocated using xmalloc. If an error occurs
3553 or the transfer is unsupported, NULL is returned. Empty objects
3554 are returned as allocated but empty strings. A warning is issued
3555 if the result contains any embedded NUL bytes. */
3556
3557 char *
3558 target_fileio_read_stralloc (const char *filename)
3559 {
3560 gdb_byte *buffer;
3561 char *bufstr;
3562 LONGEST i, transferred;
3563
3564 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3565 bufstr = (char *) buffer;
3566
3567 if (transferred < 0)
3568 return NULL;
3569
3570 if (transferred == 0)
3571 return xstrdup ("");
3572
3573 bufstr[transferred] = 0;
3574
3575 /* Check for embedded NUL bytes; but allow trailing NULs. */
3576 for (i = strlen (bufstr); i < transferred; i++)
3577 if (bufstr[i] != 0)
3578 {
3579 warning (_("target file %s "
3580 "contained unexpected null characters"),
3581 filename);
3582 break;
3583 }
3584
3585 return bufstr;
3586 }
3587
3588
3589 static int
3590 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3591 {
3592 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3593 }
3594
3595 static int
3596 default_watchpoint_addr_within_range (struct target_ops *target,
3597 CORE_ADDR addr,
3598 CORE_ADDR start, int length)
3599 {
3600 return addr >= start && addr < start + length;
3601 }
3602
3603 static struct gdbarch *
3604 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3605 {
3606 return target_gdbarch ();
3607 }
3608
3609 static int
3610 return_zero (void)
3611 {
3612 return 0;
3613 }
3614
3615 static int
3616 return_one (void)
3617 {
3618 return 1;
3619 }
3620
3621 static int
3622 return_minus_one (void)
3623 {
3624 return -1;
3625 }
3626
3627 /* Find a single runnable target in the stack and return it. If for
3628 some reason there is more than one, return NULL. */
3629
3630 struct target_ops *
3631 find_run_target (void)
3632 {
3633 struct target_ops **t;
3634 struct target_ops *runable = NULL;
3635 int count;
3636
3637 count = 0;
3638
3639 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3640 {
3641 if ((*t)->to_can_run && target_can_run (*t))
3642 {
3643 runable = *t;
3644 ++count;
3645 }
3646 }
3647
3648 return (count == 1 ? runable : NULL);
3649 }
3650
3651 /*
3652 * Find the next target down the stack from the specified target.
3653 */
3654
3655 struct target_ops *
3656 find_target_beneath (struct target_ops *t)
3657 {
3658 return t->beneath;
3659 }
3660
3661 \f
3662 /* The inferior process has died. Long live the inferior! */
3663
3664 void
3665 generic_mourn_inferior (void)
3666 {
3667 ptid_t ptid;
3668
3669 ptid = inferior_ptid;
3670 inferior_ptid = null_ptid;
3671
3672 /* Mark breakpoints uninserted in case something tries to delete a
3673 breakpoint while we delete the inferior's threads (which would
3674 fail, since the inferior is long gone). */
3675 mark_breakpoints_out ();
3676
3677 if (!ptid_equal (ptid, null_ptid))
3678 {
3679 int pid = ptid_get_pid (ptid);
3680 exit_inferior (pid);
3681 }
3682
3683 /* Note this wipes step-resume breakpoints, so needs to be done
3684 after exit_inferior, which ends up referencing the step-resume
3685 breakpoints through clear_thread_inferior_resources. */
3686 breakpoint_init_inferior (inf_exited);
3687
3688 registers_changed ();
3689
3690 reopen_exec_file ();
3691 reinit_frame_cache ();
3692
3693 if (deprecated_detach_hook)
3694 deprecated_detach_hook ();
3695 }
3696 \f
3697 /* Convert a normal process ID to a string. Returns the string in a
3698 static buffer. */
3699
3700 char *
3701 normal_pid_to_str (ptid_t ptid)
3702 {
3703 static char buf[32];
3704
3705 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3706 return buf;
3707 }
3708
3709 static char *
3710 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3711 {
3712 return normal_pid_to_str (ptid);
3713 }
3714
3715 /* Error-catcher for target_find_memory_regions. */
3716 static int
3717 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3718 {
3719 error (_("Command not implemented for this target."));
3720 return 0;
3721 }
3722
3723 /* Error-catcher for target_make_corefile_notes. */
3724 static char *
3725 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3726 {
3727 error (_("Command not implemented for this target."));
3728 return NULL;
3729 }
3730
3731 /* Error-catcher for target_get_bookmark. */
3732 static gdb_byte *
3733 dummy_get_bookmark (char *ignore1, int ignore2)
3734 {
3735 tcomplain ();
3736 return NULL;
3737 }
3738
3739 /* Error-catcher for target_goto_bookmark. */
3740 static void
3741 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3742 {
3743 tcomplain ();
3744 }
3745
3746 /* Set up the handful of non-empty slots needed by the dummy target
3747 vector. */
3748
3749 static void
3750 init_dummy_target (void)
3751 {
3752 dummy_target.to_shortname = "None";
3753 dummy_target.to_longname = "None";
3754 dummy_target.to_doc = "";
3755 dummy_target.to_attach = find_default_attach;
3756 dummy_target.to_detach =
3757 (void (*)(struct target_ops *, char *, int))target_ignore;
3758 dummy_target.to_create_inferior = find_default_create_inferior;
3759 dummy_target.to_can_async_p = find_default_can_async_p;
3760 dummy_target.to_is_async_p = find_default_is_async_p;
3761 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3762 dummy_target.to_supports_disable_randomization
3763 = find_default_supports_disable_randomization;
3764 dummy_target.to_pid_to_str = dummy_pid_to_str;
3765 dummy_target.to_stratum = dummy_stratum;
3766 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3767 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3768 dummy_target.to_get_bookmark = dummy_get_bookmark;
3769 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3770 dummy_target.to_xfer_partial = default_xfer_partial;
3771 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3772 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3773 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3774 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3775 dummy_target.to_has_execution
3776 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3777 dummy_target.to_stopped_by_watchpoint = return_zero;
3778 dummy_target.to_stopped_data_address =
3779 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3780 dummy_target.to_magic = OPS_MAGIC;
3781 }
3782 \f
3783 static void
3784 debug_to_open (char *args, int from_tty)
3785 {
3786 debug_target.to_open (args, from_tty);
3787
3788 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3789 }
3790
3791 void
3792 target_close (struct target_ops *targ)
3793 {
3794 if (targ->to_xclose != NULL)
3795 targ->to_xclose (targ);
3796 else if (targ->to_close != NULL)
3797 targ->to_close ();
3798
3799 if (targetdebug)
3800 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3801 }
3802
3803 void
3804 target_attach (char *args, int from_tty)
3805 {
3806 struct target_ops *t;
3807
3808 for (t = current_target.beneath; t != NULL; t = t->beneath)
3809 {
3810 if (t->to_attach != NULL)
3811 {
3812 t->to_attach (t, args, from_tty);
3813 if (targetdebug)
3814 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3815 args, from_tty);
3816 return;
3817 }
3818 }
3819
3820 internal_error (__FILE__, __LINE__,
3821 _("could not find a target to attach"));
3822 }
3823
3824 int
3825 target_thread_alive (ptid_t ptid)
3826 {
3827 struct target_ops *t;
3828
3829 for (t = current_target.beneath; t != NULL; t = t->beneath)
3830 {
3831 if (t->to_thread_alive != NULL)
3832 {
3833 int retval;
3834
3835 retval = t->to_thread_alive (t, ptid);
3836 if (targetdebug)
3837 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3838 PIDGET (ptid), retval);
3839
3840 return retval;
3841 }
3842 }
3843
3844 return 0;
3845 }
3846
3847 void
3848 target_find_new_threads (void)
3849 {
3850 struct target_ops *t;
3851
3852 for (t = current_target.beneath; t != NULL; t = t->beneath)
3853 {
3854 if (t->to_find_new_threads != NULL)
3855 {
3856 t->to_find_new_threads (t);
3857 if (targetdebug)
3858 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3859
3860 return;
3861 }
3862 }
3863 }
3864
3865 void
3866 target_stop (ptid_t ptid)
3867 {
3868 if (!may_stop)
3869 {
3870 warning (_("May not interrupt or stop the target, ignoring attempt"));
3871 return;
3872 }
3873
3874 (*current_target.to_stop) (ptid);
3875 }
3876
3877 static void
3878 debug_to_post_attach (int pid)
3879 {
3880 debug_target.to_post_attach (pid);
3881
3882 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3883 }
3884
3885 /* Return a pretty printed form of target_waitstatus.
3886 Space for the result is malloc'd, caller must free. */
3887
3888 char *
3889 target_waitstatus_to_string (const struct target_waitstatus *ws)
3890 {
3891 const char *kind_str = "status->kind = ";
3892
3893 switch (ws->kind)
3894 {
3895 case TARGET_WAITKIND_EXITED:
3896 return xstrprintf ("%sexited, status = %d",
3897 kind_str, ws->value.integer);
3898 case TARGET_WAITKIND_STOPPED:
3899 return xstrprintf ("%sstopped, signal = %s",
3900 kind_str, gdb_signal_to_name (ws->value.sig));
3901 case TARGET_WAITKIND_SIGNALLED:
3902 return xstrprintf ("%ssignalled, signal = %s",
3903 kind_str, gdb_signal_to_name (ws->value.sig));
3904 case TARGET_WAITKIND_LOADED:
3905 return xstrprintf ("%sloaded", kind_str);
3906 case TARGET_WAITKIND_FORKED:
3907 return xstrprintf ("%sforked", kind_str);
3908 case TARGET_WAITKIND_VFORKED:
3909 return xstrprintf ("%svforked", kind_str);
3910 case TARGET_WAITKIND_EXECD:
3911 return xstrprintf ("%sexecd", kind_str);
3912 case TARGET_WAITKIND_VFORK_DONE:
3913 return xstrprintf ("%svfork-done", kind_str);
3914 case TARGET_WAITKIND_SYSCALL_ENTRY:
3915 return xstrprintf ("%sentered syscall", kind_str);
3916 case TARGET_WAITKIND_SYSCALL_RETURN:
3917 return xstrprintf ("%sexited syscall", kind_str);
3918 case TARGET_WAITKIND_SPURIOUS:
3919 return xstrprintf ("%sspurious", kind_str);
3920 case TARGET_WAITKIND_IGNORE:
3921 return xstrprintf ("%signore", kind_str);
3922 case TARGET_WAITKIND_NO_HISTORY:
3923 return xstrprintf ("%sno-history", kind_str);
3924 case TARGET_WAITKIND_NO_RESUMED:
3925 return xstrprintf ("%sno-resumed", kind_str);
3926 default:
3927 return xstrprintf ("%sunknown???", kind_str);
3928 }
3929 }
3930
3931 /* Concatenate ELEM to LIST, a comma separate list, and return the
3932 result. The LIST incoming argument is released. */
3933
3934 static char *
3935 str_comma_list_concat_elem (char *list, const char *elem)
3936 {
3937 if (list == NULL)
3938 return xstrdup (elem);
3939 else
3940 return reconcat (list, list, ", ", elem, (char *) NULL);
3941 }
3942
3943 /* Helper for target_options_to_string. If OPT is present in
3944 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3945 Returns the new resulting string. OPT is removed from
3946 TARGET_OPTIONS. */
3947
3948 static char *
3949 do_option (int *target_options, char *ret,
3950 int opt, char *opt_str)
3951 {
3952 if ((*target_options & opt) != 0)
3953 {
3954 ret = str_comma_list_concat_elem (ret, opt_str);
3955 *target_options &= ~opt;
3956 }
3957
3958 return ret;
3959 }
3960
3961 char *
3962 target_options_to_string (int target_options)
3963 {
3964 char *ret = NULL;
3965
3966 #define DO_TARG_OPTION(OPT) \
3967 ret = do_option (&target_options, ret, OPT, #OPT)
3968
3969 DO_TARG_OPTION (TARGET_WNOHANG);
3970
3971 if (target_options != 0)
3972 ret = str_comma_list_concat_elem (ret, "unknown???");
3973
3974 if (ret == NULL)
3975 ret = xstrdup ("");
3976 return ret;
3977 }
3978
3979 static void
3980 debug_print_register (const char * func,
3981 struct regcache *regcache, int regno)
3982 {
3983 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3984
3985 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3986 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3987 && gdbarch_register_name (gdbarch, regno) != NULL
3988 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3989 fprintf_unfiltered (gdb_stdlog, "(%s)",
3990 gdbarch_register_name (gdbarch, regno));
3991 else
3992 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3993 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3994 {
3995 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3996 int i, size = register_size (gdbarch, regno);
3997 gdb_byte buf[MAX_REGISTER_SIZE];
3998
3999 regcache_raw_collect (regcache, regno, buf);
4000 fprintf_unfiltered (gdb_stdlog, " = ");
4001 for (i = 0; i < size; i++)
4002 {
4003 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4004 }
4005 if (size <= sizeof (LONGEST))
4006 {
4007 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4008
4009 fprintf_unfiltered (gdb_stdlog, " %s %s",
4010 core_addr_to_string_nz (val), plongest (val));
4011 }
4012 }
4013 fprintf_unfiltered (gdb_stdlog, "\n");
4014 }
4015
4016 void
4017 target_fetch_registers (struct regcache *regcache, int regno)
4018 {
4019 struct target_ops *t;
4020
4021 for (t = current_target.beneath; t != NULL; t = t->beneath)
4022 {
4023 if (t->to_fetch_registers != NULL)
4024 {
4025 t->to_fetch_registers (t, regcache, regno);
4026 if (targetdebug)
4027 debug_print_register ("target_fetch_registers", regcache, regno);
4028 return;
4029 }
4030 }
4031 }
4032
4033 void
4034 target_store_registers (struct regcache *regcache, int regno)
4035 {
4036 struct target_ops *t;
4037
4038 if (!may_write_registers)
4039 error (_("Writing to registers is not allowed (regno %d)"), regno);
4040
4041 for (t = current_target.beneath; t != NULL; t = t->beneath)
4042 {
4043 if (t->to_store_registers != NULL)
4044 {
4045 t->to_store_registers (t, regcache, regno);
4046 if (targetdebug)
4047 {
4048 debug_print_register ("target_store_registers", regcache, regno);
4049 }
4050 return;
4051 }
4052 }
4053
4054 noprocess ();
4055 }
4056
4057 int
4058 target_core_of_thread (ptid_t ptid)
4059 {
4060 struct target_ops *t;
4061
4062 for (t = current_target.beneath; t != NULL; t = t->beneath)
4063 {
4064 if (t->to_core_of_thread != NULL)
4065 {
4066 int retval = t->to_core_of_thread (t, ptid);
4067
4068 if (targetdebug)
4069 fprintf_unfiltered (gdb_stdlog,
4070 "target_core_of_thread (%d) = %d\n",
4071 PIDGET (ptid), retval);
4072 return retval;
4073 }
4074 }
4075
4076 return -1;
4077 }
4078
4079 int
4080 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4081 {
4082 struct target_ops *t;
4083
4084 for (t = current_target.beneath; t != NULL; t = t->beneath)
4085 {
4086 if (t->to_verify_memory != NULL)
4087 {
4088 int retval = t->to_verify_memory (t, data, memaddr, size);
4089
4090 if (targetdebug)
4091 fprintf_unfiltered (gdb_stdlog,
4092 "target_verify_memory (%s, %s) = %d\n",
4093 paddress (target_gdbarch (), memaddr),
4094 pulongest (size),
4095 retval);
4096 return retval;
4097 }
4098 }
4099
4100 tcomplain ();
4101 }
4102
4103 /* The documentation for this function is in its prototype declaration in
4104 target.h. */
4105
4106 int
4107 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4108 {
4109 struct target_ops *t;
4110
4111 for (t = current_target.beneath; t != NULL; t = t->beneath)
4112 if (t->to_insert_mask_watchpoint != NULL)
4113 {
4114 int ret;
4115
4116 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4117
4118 if (targetdebug)
4119 fprintf_unfiltered (gdb_stdlog, "\
4120 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4121 core_addr_to_string (addr),
4122 core_addr_to_string (mask), rw, ret);
4123
4124 return ret;
4125 }
4126
4127 return 1;
4128 }
4129
4130 /* The documentation for this function is in its prototype declaration in
4131 target.h. */
4132
4133 int
4134 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4135 {
4136 struct target_ops *t;
4137
4138 for (t = current_target.beneath; t != NULL; t = t->beneath)
4139 if (t->to_remove_mask_watchpoint != NULL)
4140 {
4141 int ret;
4142
4143 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4144
4145 if (targetdebug)
4146 fprintf_unfiltered (gdb_stdlog, "\
4147 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4148 core_addr_to_string (addr),
4149 core_addr_to_string (mask), rw, ret);
4150
4151 return ret;
4152 }
4153
4154 return 1;
4155 }
4156
4157 /* The documentation for this function is in its prototype declaration
4158 in target.h. */
4159
4160 int
4161 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4162 {
4163 struct target_ops *t;
4164
4165 for (t = current_target.beneath; t != NULL; t = t->beneath)
4166 if (t->to_masked_watch_num_registers != NULL)
4167 return t->to_masked_watch_num_registers (t, addr, mask);
4168
4169 return -1;
4170 }
4171
4172 /* The documentation for this function is in its prototype declaration
4173 in target.h. */
4174
4175 int
4176 target_ranged_break_num_registers (void)
4177 {
4178 struct target_ops *t;
4179
4180 for (t = current_target.beneath; t != NULL; t = t->beneath)
4181 if (t->to_ranged_break_num_registers != NULL)
4182 return t->to_ranged_break_num_registers (t);
4183
4184 return -1;
4185 }
4186
4187 /* See target.h. */
4188
4189 int
4190 target_supports_btrace (void)
4191 {
4192 struct target_ops *t;
4193
4194 for (t = current_target.beneath; t != NULL; t = t->beneath)
4195 if (t->to_supports_btrace != NULL)
4196 return t->to_supports_btrace ();
4197
4198 return 0;
4199 }
4200
4201 /* See target.h. */
4202
4203 struct btrace_target_info *
4204 target_enable_btrace (ptid_t ptid)
4205 {
4206 struct target_ops *t;
4207
4208 for (t = current_target.beneath; t != NULL; t = t->beneath)
4209 if (t->to_enable_btrace != NULL)
4210 return t->to_enable_btrace (ptid);
4211
4212 tcomplain ();
4213 return NULL;
4214 }
4215
4216 /* See target.h. */
4217
4218 void
4219 target_disable_btrace (struct btrace_target_info *btinfo)
4220 {
4221 struct target_ops *t;
4222
4223 for (t = current_target.beneath; t != NULL; t = t->beneath)
4224 if (t->to_disable_btrace != NULL)
4225 return t->to_disable_btrace (btinfo);
4226
4227 tcomplain ();
4228 }
4229
4230 /* See target.h. */
4231
4232 void
4233 target_teardown_btrace (struct btrace_target_info *btinfo)
4234 {
4235 struct target_ops *t;
4236
4237 for (t = current_target.beneath; t != NULL; t = t->beneath)
4238 if (t->to_teardown_btrace != NULL)
4239 return t->to_teardown_btrace (btinfo);
4240
4241 tcomplain ();
4242 }
4243
4244 /* See target.h. */
4245
4246 VEC (btrace_block_s) *
4247 target_read_btrace (struct btrace_target_info *btinfo,
4248 enum btrace_read_type type)
4249 {
4250 struct target_ops *t;
4251
4252 for (t = current_target.beneath; t != NULL; t = t->beneath)
4253 if (t->to_read_btrace != NULL)
4254 return t->to_read_btrace (btinfo, type);
4255
4256 tcomplain ();
4257 return NULL;
4258 }
4259
4260 /* See target.h. */
4261
4262 void
4263 target_stop_recording (void)
4264 {
4265 struct target_ops *t;
4266
4267 for (t = current_target.beneath; t != NULL; t = t->beneath)
4268 if (t->to_stop_recording != NULL)
4269 {
4270 t->to_stop_recording ();
4271 return;
4272 }
4273
4274 /* This is optional. */
4275 }
4276
4277 /* See target.h. */
4278
4279 void
4280 target_info_record (void)
4281 {
4282 struct target_ops *t;
4283
4284 for (t = current_target.beneath; t != NULL; t = t->beneath)
4285 if (t->to_info_record != NULL)
4286 {
4287 t->to_info_record ();
4288 return;
4289 }
4290
4291 tcomplain ();
4292 }
4293
4294 /* See target.h. */
4295
4296 void
4297 target_save_record (const char *filename)
4298 {
4299 struct target_ops *t;
4300
4301 for (t = current_target.beneath; t != NULL; t = t->beneath)
4302 if (t->to_save_record != NULL)
4303 {
4304 t->to_save_record (filename);
4305 return;
4306 }
4307
4308 tcomplain ();
4309 }
4310
4311 /* See target.h. */
4312
4313 int
4314 target_supports_delete_record (void)
4315 {
4316 struct target_ops *t;
4317
4318 for (t = current_target.beneath; t != NULL; t = t->beneath)
4319 if (t->to_delete_record != NULL)
4320 return 1;
4321
4322 return 0;
4323 }
4324
4325 /* See target.h. */
4326
4327 void
4328 target_delete_record (void)
4329 {
4330 struct target_ops *t;
4331
4332 for (t = current_target.beneath; t != NULL; t = t->beneath)
4333 if (t->to_delete_record != NULL)
4334 {
4335 t->to_delete_record ();
4336 return;
4337 }
4338
4339 tcomplain ();
4340 }
4341
4342 /* See target.h. */
4343
4344 int
4345 target_record_is_replaying (void)
4346 {
4347 struct target_ops *t;
4348
4349 for (t = current_target.beneath; t != NULL; t = t->beneath)
4350 if (t->to_record_is_replaying != NULL)
4351 return t->to_record_is_replaying ();
4352
4353 return 0;
4354 }
4355
4356 /* See target.h. */
4357
4358 void
4359 target_goto_record_begin (void)
4360 {
4361 struct target_ops *t;
4362
4363 for (t = current_target.beneath; t != NULL; t = t->beneath)
4364 if (t->to_goto_record_begin != NULL)
4365 {
4366 t->to_goto_record_begin ();
4367 return;
4368 }
4369
4370 tcomplain ();
4371 }
4372
4373 /* See target.h. */
4374
4375 void
4376 target_goto_record_end (void)
4377 {
4378 struct target_ops *t;
4379
4380 for (t = current_target.beneath; t != NULL; t = t->beneath)
4381 if (t->to_goto_record_end != NULL)
4382 {
4383 t->to_goto_record_end ();
4384 return;
4385 }
4386
4387 tcomplain ();
4388 }
4389
4390 /* See target.h. */
4391
4392 void
4393 target_goto_record (ULONGEST insn)
4394 {
4395 struct target_ops *t;
4396
4397 for (t = current_target.beneath; t != NULL; t = t->beneath)
4398 if (t->to_goto_record != NULL)
4399 {
4400 t->to_goto_record (insn);
4401 return;
4402 }
4403
4404 tcomplain ();
4405 }
4406
4407 /* See target.h. */
4408
4409 void
4410 target_insn_history (int size, int flags)
4411 {
4412 struct target_ops *t;
4413
4414 for (t = current_target.beneath; t != NULL; t = t->beneath)
4415 if (t->to_insn_history != NULL)
4416 {
4417 t->to_insn_history (size, flags);
4418 return;
4419 }
4420
4421 tcomplain ();
4422 }
4423
4424 /* See target.h. */
4425
4426 void
4427 target_insn_history_from (ULONGEST from, int size, int flags)
4428 {
4429 struct target_ops *t;
4430
4431 for (t = current_target.beneath; t != NULL; t = t->beneath)
4432 if (t->to_insn_history_from != NULL)
4433 {
4434 t->to_insn_history_from (from, size, flags);
4435 return;
4436 }
4437
4438 tcomplain ();
4439 }
4440
4441 /* See target.h. */
4442
4443 void
4444 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4445 {
4446 struct target_ops *t;
4447
4448 for (t = current_target.beneath; t != NULL; t = t->beneath)
4449 if (t->to_insn_history_range != NULL)
4450 {
4451 t->to_insn_history_range (begin, end, flags);
4452 return;
4453 }
4454
4455 tcomplain ();
4456 }
4457
4458 /* See target.h. */
4459
4460 void
4461 target_call_history (int size, int flags)
4462 {
4463 struct target_ops *t;
4464
4465 for (t = current_target.beneath; t != NULL; t = t->beneath)
4466 if (t->to_call_history != NULL)
4467 {
4468 t->to_call_history (size, flags);
4469 return;
4470 }
4471
4472 tcomplain ();
4473 }
4474
4475 /* See target.h. */
4476
4477 void
4478 target_call_history_from (ULONGEST begin, int size, int flags)
4479 {
4480 struct target_ops *t;
4481
4482 for (t = current_target.beneath; t != NULL; t = t->beneath)
4483 if (t->to_call_history_from != NULL)
4484 {
4485 t->to_call_history_from (begin, size, flags);
4486 return;
4487 }
4488
4489 tcomplain ();
4490 }
4491
4492 /* See target.h. */
4493
4494 void
4495 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4496 {
4497 struct target_ops *t;
4498
4499 for (t = current_target.beneath; t != NULL; t = t->beneath)
4500 if (t->to_call_history_range != NULL)
4501 {
4502 t->to_call_history_range (begin, end, flags);
4503 return;
4504 }
4505
4506 tcomplain ();
4507 }
4508
4509 static void
4510 debug_to_prepare_to_store (struct regcache *regcache)
4511 {
4512 debug_target.to_prepare_to_store (regcache);
4513
4514 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4515 }
4516
4517 static int
4518 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4519 int write, struct mem_attrib *attrib,
4520 struct target_ops *target)
4521 {
4522 int retval;
4523
4524 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4525 attrib, target);
4526
4527 fprintf_unfiltered (gdb_stdlog,
4528 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4529 paddress (target_gdbarch (), memaddr), len,
4530 write ? "write" : "read", retval);
4531
4532 if (retval > 0)
4533 {
4534 int i;
4535
4536 fputs_unfiltered (", bytes =", gdb_stdlog);
4537 for (i = 0; i < retval; i++)
4538 {
4539 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4540 {
4541 if (targetdebug < 2 && i > 0)
4542 {
4543 fprintf_unfiltered (gdb_stdlog, " ...");
4544 break;
4545 }
4546 fprintf_unfiltered (gdb_stdlog, "\n");
4547 }
4548
4549 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4550 }
4551 }
4552
4553 fputc_unfiltered ('\n', gdb_stdlog);
4554
4555 return retval;
4556 }
4557
4558 static void
4559 debug_to_files_info (struct target_ops *target)
4560 {
4561 debug_target.to_files_info (target);
4562
4563 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4564 }
4565
4566 static int
4567 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4568 struct bp_target_info *bp_tgt)
4569 {
4570 int retval;
4571
4572 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4573
4574 fprintf_unfiltered (gdb_stdlog,
4575 "target_insert_breakpoint (%s, xxx) = %ld\n",
4576 core_addr_to_string (bp_tgt->placed_address),
4577 (unsigned long) retval);
4578 return retval;
4579 }
4580
4581 static int
4582 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4583 struct bp_target_info *bp_tgt)
4584 {
4585 int retval;
4586
4587 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4588
4589 fprintf_unfiltered (gdb_stdlog,
4590 "target_remove_breakpoint (%s, xxx) = %ld\n",
4591 core_addr_to_string (bp_tgt->placed_address),
4592 (unsigned long) retval);
4593 return retval;
4594 }
4595
4596 static int
4597 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4598 {
4599 int retval;
4600
4601 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4602
4603 fprintf_unfiltered (gdb_stdlog,
4604 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4605 (unsigned long) type,
4606 (unsigned long) cnt,
4607 (unsigned long) from_tty,
4608 (unsigned long) retval);
4609 return retval;
4610 }
4611
4612 static int
4613 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4614 {
4615 CORE_ADDR retval;
4616
4617 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4618
4619 fprintf_unfiltered (gdb_stdlog,
4620 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4621 core_addr_to_string (addr), (unsigned long) len,
4622 core_addr_to_string (retval));
4623 return retval;
4624 }
4625
4626 static int
4627 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4628 struct expression *cond)
4629 {
4630 int retval;
4631
4632 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4633 rw, cond);
4634
4635 fprintf_unfiltered (gdb_stdlog,
4636 "target_can_accel_watchpoint_condition "
4637 "(%s, %d, %d, %s) = %ld\n",
4638 core_addr_to_string (addr), len, rw,
4639 host_address_to_string (cond), (unsigned long) retval);
4640 return retval;
4641 }
4642
4643 static int
4644 debug_to_stopped_by_watchpoint (void)
4645 {
4646 int retval;
4647
4648 retval = debug_target.to_stopped_by_watchpoint ();
4649
4650 fprintf_unfiltered (gdb_stdlog,
4651 "target_stopped_by_watchpoint () = %ld\n",
4652 (unsigned long) retval);
4653 return retval;
4654 }
4655
4656 static int
4657 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4658 {
4659 int retval;
4660
4661 retval = debug_target.to_stopped_data_address (target, addr);
4662
4663 fprintf_unfiltered (gdb_stdlog,
4664 "target_stopped_data_address ([%s]) = %ld\n",
4665 core_addr_to_string (*addr),
4666 (unsigned long)retval);
4667 return retval;
4668 }
4669
4670 static int
4671 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4672 CORE_ADDR addr,
4673 CORE_ADDR start, int length)
4674 {
4675 int retval;
4676
4677 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4678 start, length);
4679
4680 fprintf_filtered (gdb_stdlog,
4681 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4682 core_addr_to_string (addr), core_addr_to_string (start),
4683 length, retval);
4684 return retval;
4685 }
4686
4687 static int
4688 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4689 struct bp_target_info *bp_tgt)
4690 {
4691 int retval;
4692
4693 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4694
4695 fprintf_unfiltered (gdb_stdlog,
4696 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4697 core_addr_to_string (bp_tgt->placed_address),
4698 (unsigned long) retval);
4699 return retval;
4700 }
4701
4702 static int
4703 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4704 struct bp_target_info *bp_tgt)
4705 {
4706 int retval;
4707
4708 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4709
4710 fprintf_unfiltered (gdb_stdlog,
4711 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4712 core_addr_to_string (bp_tgt->placed_address),
4713 (unsigned long) retval);
4714 return retval;
4715 }
4716
4717 static int
4718 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4719 struct expression *cond)
4720 {
4721 int retval;
4722
4723 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4724
4725 fprintf_unfiltered (gdb_stdlog,
4726 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4727 core_addr_to_string (addr), len, type,
4728 host_address_to_string (cond), (unsigned long) retval);
4729 return retval;
4730 }
4731
4732 static int
4733 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4734 struct expression *cond)
4735 {
4736 int retval;
4737
4738 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4739
4740 fprintf_unfiltered (gdb_stdlog,
4741 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4742 core_addr_to_string (addr), len, type,
4743 host_address_to_string (cond), (unsigned long) retval);
4744 return retval;
4745 }
4746
4747 static void
4748 debug_to_terminal_init (void)
4749 {
4750 debug_target.to_terminal_init ();
4751
4752 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4753 }
4754
4755 static void
4756 debug_to_terminal_inferior (void)
4757 {
4758 debug_target.to_terminal_inferior ();
4759
4760 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4761 }
4762
4763 static void
4764 debug_to_terminal_ours_for_output (void)
4765 {
4766 debug_target.to_terminal_ours_for_output ();
4767
4768 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4769 }
4770
4771 static void
4772 debug_to_terminal_ours (void)
4773 {
4774 debug_target.to_terminal_ours ();
4775
4776 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4777 }
4778
4779 static void
4780 debug_to_terminal_save_ours (void)
4781 {
4782 debug_target.to_terminal_save_ours ();
4783
4784 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4785 }
4786
4787 static void
4788 debug_to_terminal_info (const char *arg, int from_tty)
4789 {
4790 debug_target.to_terminal_info (arg, from_tty);
4791
4792 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4793 from_tty);
4794 }
4795
4796 static void
4797 debug_to_load (char *args, int from_tty)
4798 {
4799 debug_target.to_load (args, from_tty);
4800
4801 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4802 }
4803
4804 static void
4805 debug_to_post_startup_inferior (ptid_t ptid)
4806 {
4807 debug_target.to_post_startup_inferior (ptid);
4808
4809 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4810 PIDGET (ptid));
4811 }
4812
4813 static int
4814 debug_to_insert_fork_catchpoint (int pid)
4815 {
4816 int retval;
4817
4818 retval = debug_target.to_insert_fork_catchpoint (pid);
4819
4820 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4821 pid, retval);
4822
4823 return retval;
4824 }
4825
4826 static int
4827 debug_to_remove_fork_catchpoint (int pid)
4828 {
4829 int retval;
4830
4831 retval = debug_target.to_remove_fork_catchpoint (pid);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4834 pid, retval);
4835
4836 return retval;
4837 }
4838
4839 static int
4840 debug_to_insert_vfork_catchpoint (int pid)
4841 {
4842 int retval;
4843
4844 retval = debug_target.to_insert_vfork_catchpoint (pid);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4847 pid, retval);
4848
4849 return retval;
4850 }
4851
4852 static int
4853 debug_to_remove_vfork_catchpoint (int pid)
4854 {
4855 int retval;
4856
4857 retval = debug_target.to_remove_vfork_catchpoint (pid);
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4860 pid, retval);
4861
4862 return retval;
4863 }
4864
4865 static int
4866 debug_to_insert_exec_catchpoint (int pid)
4867 {
4868 int retval;
4869
4870 retval = debug_target.to_insert_exec_catchpoint (pid);
4871
4872 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4873 pid, retval);
4874
4875 return retval;
4876 }
4877
4878 static int
4879 debug_to_remove_exec_catchpoint (int pid)
4880 {
4881 int retval;
4882
4883 retval = debug_target.to_remove_exec_catchpoint (pid);
4884
4885 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4886 pid, retval);
4887
4888 return retval;
4889 }
4890
4891 static int
4892 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4893 {
4894 int has_exited;
4895
4896 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4897
4898 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4899 pid, wait_status, *exit_status, has_exited);
4900
4901 return has_exited;
4902 }
4903
4904 static int
4905 debug_to_can_run (void)
4906 {
4907 int retval;
4908
4909 retval = debug_target.to_can_run ();
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4912
4913 return retval;
4914 }
4915
4916 static struct gdbarch *
4917 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4918 {
4919 struct gdbarch *retval;
4920
4921 retval = debug_target.to_thread_architecture (ops, ptid);
4922
4923 fprintf_unfiltered (gdb_stdlog,
4924 "target_thread_architecture (%s) = %s [%s]\n",
4925 target_pid_to_str (ptid),
4926 host_address_to_string (retval),
4927 gdbarch_bfd_arch_info (retval)->printable_name);
4928 return retval;
4929 }
4930
4931 static void
4932 debug_to_stop (ptid_t ptid)
4933 {
4934 debug_target.to_stop (ptid);
4935
4936 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4937 target_pid_to_str (ptid));
4938 }
4939
4940 static void
4941 debug_to_rcmd (char *command,
4942 struct ui_file *outbuf)
4943 {
4944 debug_target.to_rcmd (command, outbuf);
4945 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4946 }
4947
4948 static char *
4949 debug_to_pid_to_exec_file (int pid)
4950 {
4951 char *exec_file;
4952
4953 exec_file = debug_target.to_pid_to_exec_file (pid);
4954
4955 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4956 pid, exec_file);
4957
4958 return exec_file;
4959 }
4960
4961 static void
4962 setup_target_debug (void)
4963 {
4964 memcpy (&debug_target, &current_target, sizeof debug_target);
4965
4966 current_target.to_open = debug_to_open;
4967 current_target.to_post_attach = debug_to_post_attach;
4968 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4969 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4970 current_target.to_files_info = debug_to_files_info;
4971 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4972 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4973 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4974 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4975 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4976 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4977 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4978 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4979 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4980 current_target.to_watchpoint_addr_within_range
4981 = debug_to_watchpoint_addr_within_range;
4982 current_target.to_region_ok_for_hw_watchpoint
4983 = debug_to_region_ok_for_hw_watchpoint;
4984 current_target.to_can_accel_watchpoint_condition
4985 = debug_to_can_accel_watchpoint_condition;
4986 current_target.to_terminal_init = debug_to_terminal_init;
4987 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4988 current_target.to_terminal_ours_for_output
4989 = debug_to_terminal_ours_for_output;
4990 current_target.to_terminal_ours = debug_to_terminal_ours;
4991 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4992 current_target.to_terminal_info = debug_to_terminal_info;
4993 current_target.to_load = debug_to_load;
4994 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4995 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4996 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4997 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4998 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4999 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5000 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5001 current_target.to_has_exited = debug_to_has_exited;
5002 current_target.to_can_run = debug_to_can_run;
5003 current_target.to_stop = debug_to_stop;
5004 current_target.to_rcmd = debug_to_rcmd;
5005 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5006 current_target.to_thread_architecture = debug_to_thread_architecture;
5007 }
5008 \f
5009
5010 static char targ_desc[] =
5011 "Names of targets and files being debugged.\nShows the entire \
5012 stack of targets currently in use (including the exec-file,\n\
5013 core-file, and process, if any), as well as the symbol file name.";
5014
5015 static void
5016 do_monitor_command (char *cmd,
5017 int from_tty)
5018 {
5019 if ((current_target.to_rcmd
5020 == (void (*) (char *, struct ui_file *)) tcomplain)
5021 || (current_target.to_rcmd == debug_to_rcmd
5022 && (debug_target.to_rcmd
5023 == (void (*) (char *, struct ui_file *)) tcomplain)))
5024 error (_("\"monitor\" command not supported by this target."));
5025 target_rcmd (cmd, gdb_stdtarg);
5026 }
5027
5028 /* Print the name of each layers of our target stack. */
5029
5030 static void
5031 maintenance_print_target_stack (char *cmd, int from_tty)
5032 {
5033 struct target_ops *t;
5034
5035 printf_filtered (_("The current target stack is:\n"));
5036
5037 for (t = target_stack; t != NULL; t = t->beneath)
5038 {
5039 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5040 }
5041 }
5042
5043 /* Controls if async mode is permitted. */
5044 int target_async_permitted = 0;
5045
5046 /* The set command writes to this variable. If the inferior is
5047 executing, linux_nat_async_permitted is *not* updated. */
5048 static int target_async_permitted_1 = 0;
5049
5050 static void
5051 set_target_async_command (char *args, int from_tty,
5052 struct cmd_list_element *c)
5053 {
5054 if (have_live_inferiors ())
5055 {
5056 target_async_permitted_1 = target_async_permitted;
5057 error (_("Cannot change this setting while the inferior is running."));
5058 }
5059
5060 target_async_permitted = target_async_permitted_1;
5061 }
5062
5063 static void
5064 show_target_async_command (struct ui_file *file, int from_tty,
5065 struct cmd_list_element *c,
5066 const char *value)
5067 {
5068 fprintf_filtered (file,
5069 _("Controlling the inferior in "
5070 "asynchronous mode is %s.\n"), value);
5071 }
5072
5073 /* Temporary copies of permission settings. */
5074
5075 static int may_write_registers_1 = 1;
5076 static int may_write_memory_1 = 1;
5077 static int may_insert_breakpoints_1 = 1;
5078 static int may_insert_tracepoints_1 = 1;
5079 static int may_insert_fast_tracepoints_1 = 1;
5080 static int may_stop_1 = 1;
5081
5082 /* Make the user-set values match the real values again. */
5083
5084 void
5085 update_target_permissions (void)
5086 {
5087 may_write_registers_1 = may_write_registers;
5088 may_write_memory_1 = may_write_memory;
5089 may_insert_breakpoints_1 = may_insert_breakpoints;
5090 may_insert_tracepoints_1 = may_insert_tracepoints;
5091 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5092 may_stop_1 = may_stop;
5093 }
5094
5095 /* The one function handles (most of) the permission flags in the same
5096 way. */
5097
5098 static void
5099 set_target_permissions (char *args, int from_tty,
5100 struct cmd_list_element *c)
5101 {
5102 if (target_has_execution)
5103 {
5104 update_target_permissions ();
5105 error (_("Cannot change this setting while the inferior is running."));
5106 }
5107
5108 /* Make the real values match the user-changed values. */
5109 may_write_registers = may_write_registers_1;
5110 may_insert_breakpoints = may_insert_breakpoints_1;
5111 may_insert_tracepoints = may_insert_tracepoints_1;
5112 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5113 may_stop = may_stop_1;
5114 update_observer_mode ();
5115 }
5116
5117 /* Set memory write permission independently of observer mode. */
5118
5119 static void
5120 set_write_memory_permission (char *args, int from_tty,
5121 struct cmd_list_element *c)
5122 {
5123 /* Make the real values match the user-changed values. */
5124 may_write_memory = may_write_memory_1;
5125 update_observer_mode ();
5126 }
5127
5128
5129 void
5130 initialize_targets (void)
5131 {
5132 init_dummy_target ();
5133 push_target (&dummy_target);
5134
5135 add_info ("target", target_info, targ_desc);
5136 add_info ("files", target_info, targ_desc);
5137
5138 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5139 Set target debugging."), _("\
5140 Show target debugging."), _("\
5141 When non-zero, target debugging is enabled. Higher numbers are more\n\
5142 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5143 command."),
5144 NULL,
5145 show_targetdebug,
5146 &setdebuglist, &showdebuglist);
5147
5148 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5149 &trust_readonly, _("\
5150 Set mode for reading from readonly sections."), _("\
5151 Show mode for reading from readonly sections."), _("\
5152 When this mode is on, memory reads from readonly sections (such as .text)\n\
5153 will be read from the object file instead of from the target. This will\n\
5154 result in significant performance improvement for remote targets."),
5155 NULL,
5156 show_trust_readonly,
5157 &setlist, &showlist);
5158
5159 add_com ("monitor", class_obscure, do_monitor_command,
5160 _("Send a command to the remote monitor (remote targets only)."));
5161
5162 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5163 _("Print the name of each layer of the internal target stack."),
5164 &maintenanceprintlist);
5165
5166 add_setshow_boolean_cmd ("target-async", no_class,
5167 &target_async_permitted_1, _("\
5168 Set whether gdb controls the inferior in asynchronous mode."), _("\
5169 Show whether gdb controls the inferior in asynchronous mode."), _("\
5170 Tells gdb whether to control the inferior in asynchronous mode."),
5171 set_target_async_command,
5172 show_target_async_command,
5173 &setlist,
5174 &showlist);
5175
5176 add_setshow_boolean_cmd ("stack-cache", class_support,
5177 &stack_cache_enabled_p_1, _("\
5178 Set cache use for stack access."), _("\
5179 Show cache use for stack access."), _("\
5180 When on, use the data cache for all stack access, regardless of any\n\
5181 configured memory regions. This improves remote performance significantly.\n\
5182 By default, caching for stack access is on."),
5183 set_stack_cache_enabled_p,
5184 show_stack_cache_enabled_p,
5185 &setlist, &showlist);
5186
5187 add_setshow_boolean_cmd ("may-write-registers", class_support,
5188 &may_write_registers_1, _("\
5189 Set permission to write into registers."), _("\
5190 Show permission to write into registers."), _("\
5191 When this permission is on, GDB may write into the target's registers.\n\
5192 Otherwise, any sort of write attempt will result in an error."),
5193 set_target_permissions, NULL,
5194 &setlist, &showlist);
5195
5196 add_setshow_boolean_cmd ("may-write-memory", class_support,
5197 &may_write_memory_1, _("\
5198 Set permission to write into target memory."), _("\
5199 Show permission to write into target memory."), _("\
5200 When this permission is on, GDB may write into the target's memory.\n\
5201 Otherwise, any sort of write attempt will result in an error."),
5202 set_write_memory_permission, NULL,
5203 &setlist, &showlist);
5204
5205 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5206 &may_insert_breakpoints_1, _("\
5207 Set permission to insert breakpoints in the target."), _("\
5208 Show permission to insert breakpoints in the target."), _("\
5209 When this permission is on, GDB may insert breakpoints in the program.\n\
5210 Otherwise, any sort of insertion attempt will result in an error."),
5211 set_target_permissions, NULL,
5212 &setlist, &showlist);
5213
5214 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5215 &may_insert_tracepoints_1, _("\
5216 Set permission to insert tracepoints in the target."), _("\
5217 Show permission to insert tracepoints in the target."), _("\
5218 When this permission is on, GDB may insert tracepoints in the program.\n\
5219 Otherwise, any sort of insertion attempt will result in an error."),
5220 set_target_permissions, NULL,
5221 &setlist, &showlist);
5222
5223 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5224 &may_insert_fast_tracepoints_1, _("\
5225 Set permission to insert fast tracepoints in the target."), _("\
5226 Show permission to insert fast tracepoints in the target."), _("\
5227 When this permission is on, GDB may insert fast tracepoints.\n\
5228 Otherwise, any sort of insertion attempt will result in an error."),
5229 set_target_permissions, NULL,
5230 &setlist, &showlist);
5231
5232 add_setshow_boolean_cmd ("may-interrupt", class_support,
5233 &may_stop_1, _("\
5234 Set permission to interrupt or signal the target."), _("\
5235 Show permission to interrupt or signal the target."), _("\
5236 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5237 Otherwise, any attempt to interrupt or stop will be ignored."),
5238 set_target_permissions, NULL,
5239 &setlist, &showlist);
5240
5241
5242 target_dcache = dcache_init ();
5243 }
This page took 0.139536 seconds and 4 git commands to generate.