2013-08-30 Phil Muldoon <pmuldoon@redhat.com>
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (const char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
85 ptid_t ptid);
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct regcache *);
94
95 static void debug_to_files_info (struct target_ops *);
96
97 static int debug_to_insert_breakpoint (struct gdbarch *,
98 struct bp_target_info *);
99
100 static int debug_to_remove_breakpoint (struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_can_use_hw_breakpoint (int, int, int);
104
105 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
106 struct bp_target_info *);
107
108 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
112 struct expression *);
113
114 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
115 struct expression *);
116
117 static int debug_to_stopped_by_watchpoint (void);
118
119 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
120
121 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
122 CORE_ADDR, CORE_ADDR, int);
123
124 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
125
126 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
127 struct expression *);
128
129 static void debug_to_terminal_init (void);
130
131 static void debug_to_terminal_inferior (void);
132
133 static void debug_to_terminal_ours_for_output (void);
134
135 static void debug_to_terminal_save_ours (void);
136
137 static void debug_to_terminal_ours (void);
138
139 static void debug_to_load (char *, int);
140
141 static int debug_to_can_run (void);
142
143 static void debug_to_stop (ptid_t);
144
145 /* Pointer to array of target architecture structures; the size of the
146 array; the current index into the array; the allocated size of the
147 array. */
148 struct target_ops **target_structs;
149 unsigned target_struct_size;
150 unsigned target_struct_allocsize;
151 #define DEFAULT_ALLOCSIZE 10
152
153 /* The initial current target, so that there is always a semi-valid
154 current target. */
155
156 static struct target_ops dummy_target;
157
158 /* Top of target stack. */
159
160 static struct target_ops *target_stack;
161
162 /* The target structure we are currently using to talk to a process
163 or file or whatever "inferior" we have. */
164
165 struct target_ops current_target;
166
167 /* Command list for target. */
168
169 static struct cmd_list_element *targetlist = NULL;
170
171 /* Nonzero if we should trust readonly sections from the
172 executable when reading memory. */
173
174 static int trust_readonly = 0;
175
176 /* Nonzero if we should show true memory content including
177 memory breakpoint inserted by gdb. */
178
179 static int show_memory_breakpoints = 0;
180
181 /* These globals control whether GDB attempts to perform these
182 operations; they are useful for targets that need to prevent
183 inadvertant disruption, such as in non-stop mode. */
184
185 int may_write_registers = 1;
186
187 int may_write_memory = 1;
188
189 int may_insert_breakpoints = 1;
190
191 int may_insert_tracepoints = 1;
192
193 int may_insert_fast_tracepoints = 1;
194
195 int may_stop = 1;
196
197 /* Non-zero if we want to see trace of target level stuff. */
198
199 static unsigned int targetdebug = 0;
200 static void
201 show_targetdebug (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203 {
204 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
205 }
206
207 static void setup_target_debug (void);
208
209 /* The option sets this. */
210 static int stack_cache_enabled_p_1 = 1;
211 /* And set_stack_cache_enabled_p updates this.
212 The reason for the separation is so that we don't flush the cache for
213 on->on transitions. */
214 static int stack_cache_enabled_p = 1;
215
216 /* This is called *after* the stack-cache has been set.
217 Flush the cache for off->on and on->off transitions.
218 There's no real need to flush the cache for on->off transitions,
219 except cleanliness. */
220
221 static void
222 set_stack_cache_enabled_p (char *args, int from_tty,
223 struct cmd_list_element *c)
224 {
225 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
226 target_dcache_invalidate ();
227
228 stack_cache_enabled_p = stack_cache_enabled_p_1;
229 }
230
231 static void
232 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
233 struct cmd_list_element *c, const char *value)
234 {
235 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
236 }
237
238 /* Cache of memory operations, to speed up remote access. */
239 static DCACHE *target_dcache;
240
241 /* Invalidate the target dcache. */
242
243 void
244 target_dcache_invalidate (void)
245 {
246 dcache_invalidate (target_dcache);
247 }
248
249 /* The user just typed 'target' without the name of a target. */
250
251 static void
252 target_command (char *arg, int from_tty)
253 {
254 fputs_filtered ("Argument required (target name). Try `help target'\n",
255 gdb_stdout);
256 }
257
258 /* Default target_has_* methods for process_stratum targets. */
259
260 int
261 default_child_has_all_memory (struct target_ops *ops)
262 {
263 /* If no inferior selected, then we can't read memory here. */
264 if (ptid_equal (inferior_ptid, null_ptid))
265 return 0;
266
267 return 1;
268 }
269
270 int
271 default_child_has_memory (struct target_ops *ops)
272 {
273 /* If no inferior selected, then we can't read memory here. */
274 if (ptid_equal (inferior_ptid, null_ptid))
275 return 0;
276
277 return 1;
278 }
279
280 int
281 default_child_has_stack (struct target_ops *ops)
282 {
283 /* If no inferior selected, there's no stack. */
284 if (ptid_equal (inferior_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290 int
291 default_child_has_registers (struct target_ops *ops)
292 {
293 /* Can't read registers from no inferior. */
294 if (ptid_equal (inferior_ptid, null_ptid))
295 return 0;
296
297 return 1;
298 }
299
300 int
301 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
302 {
303 /* If there's no thread selected, then we can't make it run through
304 hoops. */
305 if (ptid_equal (the_ptid, null_ptid))
306 return 0;
307
308 return 1;
309 }
310
311
312 int
313 target_has_all_memory_1 (void)
314 {
315 struct target_ops *t;
316
317 for (t = current_target.beneath; t != NULL; t = t->beneath)
318 if (t->to_has_all_memory (t))
319 return 1;
320
321 return 0;
322 }
323
324 int
325 target_has_memory_1 (void)
326 {
327 struct target_ops *t;
328
329 for (t = current_target.beneath; t != NULL; t = t->beneath)
330 if (t->to_has_memory (t))
331 return 1;
332
333 return 0;
334 }
335
336 int
337 target_has_stack_1 (void)
338 {
339 struct target_ops *t;
340
341 for (t = current_target.beneath; t != NULL; t = t->beneath)
342 if (t->to_has_stack (t))
343 return 1;
344
345 return 0;
346 }
347
348 int
349 target_has_registers_1 (void)
350 {
351 struct target_ops *t;
352
353 for (t = current_target.beneath; t != NULL; t = t->beneath)
354 if (t->to_has_registers (t))
355 return 1;
356
357 return 0;
358 }
359
360 int
361 target_has_execution_1 (ptid_t the_ptid)
362 {
363 struct target_ops *t;
364
365 for (t = current_target.beneath; t != NULL; t = t->beneath)
366 if (t->to_has_execution (t, the_ptid))
367 return 1;
368
369 return 0;
370 }
371
372 int
373 target_has_execution_current (void)
374 {
375 return target_has_execution_1 (inferior_ptid);
376 }
377
378 /* Complete initialization of T. This ensures that various fields in
379 T are set, if needed by the target implementation. */
380
381 void
382 complete_target_initialization (struct target_ops *t)
383 {
384 /* Provide default values for all "must have" methods. */
385 if (t->to_xfer_partial == NULL)
386 t->to_xfer_partial = default_xfer_partial;
387
388 if (t->to_has_all_memory == NULL)
389 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
390
391 if (t->to_has_memory == NULL)
392 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
393
394 if (t->to_has_stack == NULL)
395 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
396
397 if (t->to_has_registers == NULL)
398 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
399
400 if (t->to_has_execution == NULL)
401 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
402 }
403
404 /* Add possible target architecture T to the list and add a new
405 command 'target T->to_shortname'. Set COMPLETER as the command's
406 completer if not NULL. */
407
408 void
409 add_target_with_completer (struct target_ops *t,
410 completer_ftype *completer)
411 {
412 struct cmd_list_element *c;
413
414 complete_target_initialization (t);
415
416 if (!target_structs)
417 {
418 target_struct_allocsize = DEFAULT_ALLOCSIZE;
419 target_structs = (struct target_ops **) xmalloc
420 (target_struct_allocsize * sizeof (*target_structs));
421 }
422 if (target_struct_size >= target_struct_allocsize)
423 {
424 target_struct_allocsize *= 2;
425 target_structs = (struct target_ops **)
426 xrealloc ((char *) target_structs,
427 target_struct_allocsize * sizeof (*target_structs));
428 }
429 target_structs[target_struct_size++] = t;
430
431 if (targetlist == NULL)
432 add_prefix_cmd ("target", class_run, target_command, _("\
433 Connect to a target machine or process.\n\
434 The first argument is the type or protocol of the target machine.\n\
435 Remaining arguments are interpreted by the target protocol. For more\n\
436 information on the arguments for a particular protocol, type\n\
437 `help target ' followed by the protocol name."),
438 &targetlist, "target ", 0, &cmdlist);
439 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
440 &targetlist);
441 if (completer != NULL)
442 set_cmd_completer (c, completer);
443 }
444
445 /* Add a possible target architecture to the list. */
446
447 void
448 add_target (struct target_ops *t)
449 {
450 add_target_with_completer (t, NULL);
451 }
452
453 /* See target.h. */
454
455 void
456 add_deprecated_target_alias (struct target_ops *t, char *alias)
457 {
458 struct cmd_list_element *c;
459 char *alt;
460
461 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
462 see PR cli/15104. */
463 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
464 alt = xstrprintf ("target %s", t->to_shortname);
465 deprecate_cmd (c, alt);
466 }
467
468 /* Stub functions */
469
470 void
471 target_ignore (void)
472 {
473 }
474
475 void
476 target_kill (void)
477 {
478 struct target_ops *t;
479
480 for (t = current_target.beneath; t != NULL; t = t->beneath)
481 if (t->to_kill != NULL)
482 {
483 if (targetdebug)
484 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
485
486 t->to_kill (t);
487 return;
488 }
489
490 noprocess ();
491 }
492
493 void
494 target_load (char *arg, int from_tty)
495 {
496 target_dcache_invalidate ();
497 (*current_target.to_load) (arg, from_tty);
498 }
499
500 void
501 target_create_inferior (char *exec_file, char *args,
502 char **env, int from_tty)
503 {
504 struct target_ops *t;
505
506 for (t = current_target.beneath; t != NULL; t = t->beneath)
507 {
508 if (t->to_create_inferior != NULL)
509 {
510 t->to_create_inferior (t, exec_file, args, env, from_tty);
511 if (targetdebug)
512 fprintf_unfiltered (gdb_stdlog,
513 "target_create_inferior (%s, %s, xxx, %d)\n",
514 exec_file, args, from_tty);
515 return;
516 }
517 }
518
519 internal_error (__FILE__, __LINE__,
520 _("could not find a target to create inferior"));
521 }
522
523 void
524 target_terminal_inferior (void)
525 {
526 /* A background resume (``run&'') should leave GDB in control of the
527 terminal. Use target_can_async_p, not target_is_async_p, since at
528 this point the target is not async yet. However, if sync_execution
529 is not set, we know it will become async prior to resume. */
530 if (target_can_async_p () && !sync_execution)
531 return;
532
533 /* If GDB is resuming the inferior in the foreground, install
534 inferior's terminal modes. */
535 (*current_target.to_terminal_inferior) ();
536 }
537
538 static int
539 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
540 struct target_ops *t)
541 {
542 errno = EIO; /* Can't read/write this location. */
543 return 0; /* No bytes handled. */
544 }
545
546 static void
547 tcomplain (void)
548 {
549 error (_("You can't do that when your target is `%s'"),
550 current_target.to_shortname);
551 }
552
553 void
554 noprocess (void)
555 {
556 error (_("You can't do that without a process to debug."));
557 }
558
559 static void
560 default_terminal_info (const char *args, int from_tty)
561 {
562 printf_unfiltered (_("No saved terminal information.\n"));
563 }
564
565 /* A default implementation for the to_get_ada_task_ptid target method.
566
567 This function builds the PTID by using both LWP and TID as part of
568 the PTID lwp and tid elements. The pid used is the pid of the
569 inferior_ptid. */
570
571 static ptid_t
572 default_get_ada_task_ptid (long lwp, long tid)
573 {
574 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
575 }
576
577 static enum exec_direction_kind
578 default_execution_direction (void)
579 {
580 if (!target_can_execute_reverse)
581 return EXEC_FORWARD;
582 else if (!target_can_async_p ())
583 return EXEC_FORWARD;
584 else
585 gdb_assert_not_reached ("\
586 to_execution_direction must be implemented for reverse async");
587 }
588
589 /* Go through the target stack from top to bottom, copying over zero
590 entries in current_target, then filling in still empty entries. In
591 effect, we are doing class inheritance through the pushed target
592 vectors.
593
594 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
595 is currently implemented, is that it discards any knowledge of
596 which target an inherited method originally belonged to.
597 Consequently, new new target methods should instead explicitly and
598 locally search the target stack for the target that can handle the
599 request. */
600
601 static void
602 update_current_target (void)
603 {
604 struct target_ops *t;
605
606 /* First, reset current's contents. */
607 memset (&current_target, 0, sizeof (current_target));
608
609 #define INHERIT(FIELD, TARGET) \
610 if (!current_target.FIELD) \
611 current_target.FIELD = (TARGET)->FIELD
612
613 for (t = target_stack; t; t = t->beneath)
614 {
615 INHERIT (to_shortname, t);
616 INHERIT (to_longname, t);
617 INHERIT (to_doc, t);
618 /* Do not inherit to_open. */
619 /* Do not inherit to_close. */
620 /* Do not inherit to_attach. */
621 INHERIT (to_post_attach, t);
622 INHERIT (to_attach_no_wait, t);
623 /* Do not inherit to_detach. */
624 /* Do not inherit to_disconnect. */
625 /* Do not inherit to_resume. */
626 /* Do not inherit to_wait. */
627 /* Do not inherit to_fetch_registers. */
628 /* Do not inherit to_store_registers. */
629 INHERIT (to_prepare_to_store, t);
630 INHERIT (deprecated_xfer_memory, t);
631 INHERIT (to_files_info, t);
632 INHERIT (to_insert_breakpoint, t);
633 INHERIT (to_remove_breakpoint, t);
634 INHERIT (to_can_use_hw_breakpoint, t);
635 INHERIT (to_insert_hw_breakpoint, t);
636 INHERIT (to_remove_hw_breakpoint, t);
637 /* Do not inherit to_ranged_break_num_registers. */
638 INHERIT (to_insert_watchpoint, t);
639 INHERIT (to_remove_watchpoint, t);
640 /* Do not inherit to_insert_mask_watchpoint. */
641 /* Do not inherit to_remove_mask_watchpoint. */
642 INHERIT (to_stopped_data_address, t);
643 INHERIT (to_have_steppable_watchpoint, t);
644 INHERIT (to_have_continuable_watchpoint, t);
645 INHERIT (to_stopped_by_watchpoint, t);
646 INHERIT (to_watchpoint_addr_within_range, t);
647 INHERIT (to_region_ok_for_hw_watchpoint, t);
648 INHERIT (to_can_accel_watchpoint_condition, t);
649 /* Do not inherit to_masked_watch_num_registers. */
650 INHERIT (to_terminal_init, t);
651 INHERIT (to_terminal_inferior, t);
652 INHERIT (to_terminal_ours_for_output, t);
653 INHERIT (to_terminal_ours, t);
654 INHERIT (to_terminal_save_ours, t);
655 INHERIT (to_terminal_info, t);
656 /* Do not inherit to_kill. */
657 INHERIT (to_load, t);
658 /* Do no inherit to_create_inferior. */
659 INHERIT (to_post_startup_inferior, t);
660 INHERIT (to_insert_fork_catchpoint, t);
661 INHERIT (to_remove_fork_catchpoint, t);
662 INHERIT (to_insert_vfork_catchpoint, t);
663 INHERIT (to_remove_vfork_catchpoint, t);
664 /* Do not inherit to_follow_fork. */
665 INHERIT (to_insert_exec_catchpoint, t);
666 INHERIT (to_remove_exec_catchpoint, t);
667 INHERIT (to_set_syscall_catchpoint, t);
668 INHERIT (to_has_exited, t);
669 /* Do not inherit to_mourn_inferior. */
670 INHERIT (to_can_run, t);
671 /* Do not inherit to_pass_signals. */
672 /* Do not inherit to_program_signals. */
673 /* Do not inherit to_thread_alive. */
674 /* Do not inherit to_find_new_threads. */
675 /* Do not inherit to_pid_to_str. */
676 INHERIT (to_extra_thread_info, t);
677 INHERIT (to_thread_name, t);
678 INHERIT (to_stop, t);
679 /* Do not inherit to_xfer_partial. */
680 INHERIT (to_rcmd, t);
681 INHERIT (to_pid_to_exec_file, t);
682 INHERIT (to_log_command, t);
683 INHERIT (to_stratum, t);
684 /* Do not inherit to_has_all_memory. */
685 /* Do not inherit to_has_memory. */
686 /* Do not inherit to_has_stack. */
687 /* Do not inherit to_has_registers. */
688 /* Do not inherit to_has_execution. */
689 INHERIT (to_has_thread_control, t);
690 INHERIT (to_can_async_p, t);
691 INHERIT (to_is_async_p, t);
692 INHERIT (to_async, t);
693 INHERIT (to_find_memory_regions, t);
694 INHERIT (to_make_corefile_notes, t);
695 INHERIT (to_get_bookmark, t);
696 INHERIT (to_goto_bookmark, t);
697 /* Do not inherit to_get_thread_local_address. */
698 INHERIT (to_can_execute_reverse, t);
699 INHERIT (to_execution_direction, t);
700 INHERIT (to_thread_architecture, t);
701 /* Do not inherit to_read_description. */
702 INHERIT (to_get_ada_task_ptid, t);
703 /* Do not inherit to_search_memory. */
704 INHERIT (to_supports_multi_process, t);
705 INHERIT (to_supports_enable_disable_tracepoint, t);
706 INHERIT (to_supports_string_tracing, t);
707 INHERIT (to_trace_init, t);
708 INHERIT (to_download_tracepoint, t);
709 INHERIT (to_can_download_tracepoint, t);
710 INHERIT (to_download_trace_state_variable, t);
711 INHERIT (to_enable_tracepoint, t);
712 INHERIT (to_disable_tracepoint, t);
713 INHERIT (to_trace_set_readonly_regions, t);
714 INHERIT (to_trace_start, t);
715 INHERIT (to_get_trace_status, t);
716 INHERIT (to_get_tracepoint_status, t);
717 INHERIT (to_trace_stop, t);
718 INHERIT (to_trace_find, t);
719 INHERIT (to_get_trace_state_variable_value, t);
720 INHERIT (to_save_trace_data, t);
721 INHERIT (to_upload_tracepoints, t);
722 INHERIT (to_upload_trace_state_variables, t);
723 INHERIT (to_get_raw_trace_data, t);
724 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
725 INHERIT (to_set_disconnected_tracing, t);
726 INHERIT (to_set_circular_trace_buffer, t);
727 INHERIT (to_set_trace_buffer_size, t);
728 INHERIT (to_set_trace_notes, t);
729 INHERIT (to_get_tib_address, t);
730 INHERIT (to_set_permissions, t);
731 INHERIT (to_static_tracepoint_marker_at, t);
732 INHERIT (to_static_tracepoint_markers_by_strid, t);
733 INHERIT (to_traceframe_info, t);
734 INHERIT (to_use_agent, t);
735 INHERIT (to_can_use_agent, t);
736 INHERIT (to_augmented_libraries_svr4_read, t);
737 INHERIT (to_magic, t);
738 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
739 INHERIT (to_can_run_breakpoint_commands, t);
740 /* Do not inherit to_memory_map. */
741 /* Do not inherit to_flash_erase. */
742 /* Do not inherit to_flash_done. */
743 }
744 #undef INHERIT
745
746 /* Clean up a target struct so it no longer has any zero pointers in
747 it. Some entries are defaulted to a method that print an error,
748 others are hard-wired to a standard recursive default. */
749
750 #define de_fault(field, value) \
751 if (!current_target.field) \
752 current_target.field = value
753
754 de_fault (to_open,
755 (void (*) (char *, int))
756 tcomplain);
757 de_fault (to_close,
758 (void (*) (void))
759 target_ignore);
760 de_fault (to_post_attach,
761 (void (*) (int))
762 target_ignore);
763 de_fault (to_prepare_to_store,
764 (void (*) (struct regcache *))
765 noprocess);
766 de_fault (deprecated_xfer_memory,
767 (int (*) (CORE_ADDR, gdb_byte *, int, int,
768 struct mem_attrib *, struct target_ops *))
769 nomemory);
770 de_fault (to_files_info,
771 (void (*) (struct target_ops *))
772 target_ignore);
773 de_fault (to_insert_breakpoint,
774 memory_insert_breakpoint);
775 de_fault (to_remove_breakpoint,
776 memory_remove_breakpoint);
777 de_fault (to_can_use_hw_breakpoint,
778 (int (*) (int, int, int))
779 return_zero);
780 de_fault (to_insert_hw_breakpoint,
781 (int (*) (struct gdbarch *, struct bp_target_info *))
782 return_minus_one);
783 de_fault (to_remove_hw_breakpoint,
784 (int (*) (struct gdbarch *, struct bp_target_info *))
785 return_minus_one);
786 de_fault (to_insert_watchpoint,
787 (int (*) (CORE_ADDR, int, int, struct expression *))
788 return_minus_one);
789 de_fault (to_remove_watchpoint,
790 (int (*) (CORE_ADDR, int, int, struct expression *))
791 return_minus_one);
792 de_fault (to_stopped_by_watchpoint,
793 (int (*) (void))
794 return_zero);
795 de_fault (to_stopped_data_address,
796 (int (*) (struct target_ops *, CORE_ADDR *))
797 return_zero);
798 de_fault (to_watchpoint_addr_within_range,
799 default_watchpoint_addr_within_range);
800 de_fault (to_region_ok_for_hw_watchpoint,
801 default_region_ok_for_hw_watchpoint);
802 de_fault (to_can_accel_watchpoint_condition,
803 (int (*) (CORE_ADDR, int, int, struct expression *))
804 return_zero);
805 de_fault (to_terminal_init,
806 (void (*) (void))
807 target_ignore);
808 de_fault (to_terminal_inferior,
809 (void (*) (void))
810 target_ignore);
811 de_fault (to_terminal_ours_for_output,
812 (void (*) (void))
813 target_ignore);
814 de_fault (to_terminal_ours,
815 (void (*) (void))
816 target_ignore);
817 de_fault (to_terminal_save_ours,
818 (void (*) (void))
819 target_ignore);
820 de_fault (to_terminal_info,
821 default_terminal_info);
822 de_fault (to_load,
823 (void (*) (char *, int))
824 tcomplain);
825 de_fault (to_post_startup_inferior,
826 (void (*) (ptid_t))
827 target_ignore);
828 de_fault (to_insert_fork_catchpoint,
829 (int (*) (int))
830 return_one);
831 de_fault (to_remove_fork_catchpoint,
832 (int (*) (int))
833 return_one);
834 de_fault (to_insert_vfork_catchpoint,
835 (int (*) (int))
836 return_one);
837 de_fault (to_remove_vfork_catchpoint,
838 (int (*) (int))
839 return_one);
840 de_fault (to_insert_exec_catchpoint,
841 (int (*) (int))
842 return_one);
843 de_fault (to_remove_exec_catchpoint,
844 (int (*) (int))
845 return_one);
846 de_fault (to_set_syscall_catchpoint,
847 (int (*) (int, int, int, int, int *))
848 return_one);
849 de_fault (to_has_exited,
850 (int (*) (int, int, int *))
851 return_zero);
852 de_fault (to_can_run,
853 return_zero);
854 de_fault (to_extra_thread_info,
855 (char *(*) (struct thread_info *))
856 return_zero);
857 de_fault (to_thread_name,
858 (char *(*) (struct thread_info *))
859 return_zero);
860 de_fault (to_stop,
861 (void (*) (ptid_t))
862 target_ignore);
863 current_target.to_xfer_partial = current_xfer_partial;
864 de_fault (to_rcmd,
865 (void (*) (char *, struct ui_file *))
866 tcomplain);
867 de_fault (to_pid_to_exec_file,
868 (char *(*) (int))
869 return_zero);
870 de_fault (to_async,
871 (void (*) (void (*) (enum inferior_event_type, void*), void*))
872 tcomplain);
873 de_fault (to_thread_architecture,
874 default_thread_architecture);
875 current_target.to_read_description = NULL;
876 de_fault (to_get_ada_task_ptid,
877 (ptid_t (*) (long, long))
878 default_get_ada_task_ptid);
879 de_fault (to_supports_multi_process,
880 (int (*) (void))
881 return_zero);
882 de_fault (to_supports_enable_disable_tracepoint,
883 (int (*) (void))
884 return_zero);
885 de_fault (to_supports_string_tracing,
886 (int (*) (void))
887 return_zero);
888 de_fault (to_trace_init,
889 (void (*) (void))
890 tcomplain);
891 de_fault (to_download_tracepoint,
892 (void (*) (struct bp_location *))
893 tcomplain);
894 de_fault (to_can_download_tracepoint,
895 (int (*) (void))
896 return_zero);
897 de_fault (to_download_trace_state_variable,
898 (void (*) (struct trace_state_variable *))
899 tcomplain);
900 de_fault (to_enable_tracepoint,
901 (void (*) (struct bp_location *))
902 tcomplain);
903 de_fault (to_disable_tracepoint,
904 (void (*) (struct bp_location *))
905 tcomplain);
906 de_fault (to_trace_set_readonly_regions,
907 (void (*) (void))
908 tcomplain);
909 de_fault (to_trace_start,
910 (void (*) (void))
911 tcomplain);
912 de_fault (to_get_trace_status,
913 (int (*) (struct trace_status *))
914 return_minus_one);
915 de_fault (to_get_tracepoint_status,
916 (void (*) (struct breakpoint *, struct uploaded_tp *))
917 tcomplain);
918 de_fault (to_trace_stop,
919 (void (*) (void))
920 tcomplain);
921 de_fault (to_trace_find,
922 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
923 return_minus_one);
924 de_fault (to_get_trace_state_variable_value,
925 (int (*) (int, LONGEST *))
926 return_zero);
927 de_fault (to_save_trace_data,
928 (int (*) (const char *))
929 tcomplain);
930 de_fault (to_upload_tracepoints,
931 (int (*) (struct uploaded_tp **))
932 return_zero);
933 de_fault (to_upload_trace_state_variables,
934 (int (*) (struct uploaded_tsv **))
935 return_zero);
936 de_fault (to_get_raw_trace_data,
937 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
938 tcomplain);
939 de_fault (to_get_min_fast_tracepoint_insn_len,
940 (int (*) (void))
941 return_minus_one);
942 de_fault (to_set_disconnected_tracing,
943 (void (*) (int))
944 target_ignore);
945 de_fault (to_set_circular_trace_buffer,
946 (void (*) (int))
947 target_ignore);
948 de_fault (to_set_trace_buffer_size,
949 (void (*) (LONGEST))
950 target_ignore);
951 de_fault (to_set_trace_notes,
952 (int (*) (const char *, const char *, const char *))
953 return_zero);
954 de_fault (to_get_tib_address,
955 (int (*) (ptid_t, CORE_ADDR *))
956 tcomplain);
957 de_fault (to_set_permissions,
958 (void (*) (void))
959 target_ignore);
960 de_fault (to_static_tracepoint_marker_at,
961 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
962 return_zero);
963 de_fault (to_static_tracepoint_markers_by_strid,
964 (VEC(static_tracepoint_marker_p) * (*) (const char *))
965 tcomplain);
966 de_fault (to_traceframe_info,
967 (struct traceframe_info * (*) (void))
968 return_zero);
969 de_fault (to_supports_evaluation_of_breakpoint_conditions,
970 (int (*) (void))
971 return_zero);
972 de_fault (to_can_run_breakpoint_commands,
973 (int (*) (void))
974 return_zero);
975 de_fault (to_use_agent,
976 (int (*) (int))
977 tcomplain);
978 de_fault (to_can_use_agent,
979 (int (*) (void))
980 return_zero);
981 de_fault (to_augmented_libraries_svr4_read,
982 (int (*) (void))
983 return_zero);
984 de_fault (to_execution_direction, default_execution_direction);
985
986 #undef de_fault
987
988 /* Finally, position the target-stack beneath the squashed
989 "current_target". That way code looking for a non-inherited
990 target method can quickly and simply find it. */
991 current_target.beneath = target_stack;
992
993 if (targetdebug)
994 setup_target_debug ();
995 }
996
997 /* Push a new target type into the stack of the existing target accessors,
998 possibly superseding some of the existing accessors.
999
1000 Rather than allow an empty stack, we always have the dummy target at
1001 the bottom stratum, so we can call the function vectors without
1002 checking them. */
1003
1004 void
1005 push_target (struct target_ops *t)
1006 {
1007 struct target_ops **cur;
1008
1009 /* Check magic number. If wrong, it probably means someone changed
1010 the struct definition, but not all the places that initialize one. */
1011 if (t->to_magic != OPS_MAGIC)
1012 {
1013 fprintf_unfiltered (gdb_stderr,
1014 "Magic number of %s target struct wrong\n",
1015 t->to_shortname);
1016 internal_error (__FILE__, __LINE__,
1017 _("failed internal consistency check"));
1018 }
1019
1020 /* Find the proper stratum to install this target in. */
1021 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1022 {
1023 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
1024 break;
1025 }
1026
1027 /* If there's already targets at this stratum, remove them. */
1028 /* FIXME: cagney/2003-10-15: I think this should be popping all
1029 targets to CUR, and not just those at this stratum level. */
1030 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
1031 {
1032 /* There's already something at this stratum level. Close it,
1033 and un-hook it from the stack. */
1034 struct target_ops *tmp = (*cur);
1035
1036 (*cur) = (*cur)->beneath;
1037 tmp->beneath = NULL;
1038 target_close (tmp);
1039 }
1040
1041 /* We have removed all targets in our stratum, now add the new one. */
1042 t->beneath = (*cur);
1043 (*cur) = t;
1044
1045 update_current_target ();
1046 }
1047
1048 /* Remove a target_ops vector from the stack, wherever it may be.
1049 Return how many times it was removed (0 or 1). */
1050
1051 int
1052 unpush_target (struct target_ops *t)
1053 {
1054 struct target_ops **cur;
1055 struct target_ops *tmp;
1056
1057 if (t->to_stratum == dummy_stratum)
1058 internal_error (__FILE__, __LINE__,
1059 _("Attempt to unpush the dummy target"));
1060
1061 /* Look for the specified target. Note that we assume that a target
1062 can only occur once in the target stack. */
1063
1064 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1065 {
1066 if ((*cur) == t)
1067 break;
1068 }
1069
1070 /* If we don't find target_ops, quit. Only open targets should be
1071 closed. */
1072 if ((*cur) == NULL)
1073 return 0;
1074
1075 /* Unchain the target. */
1076 tmp = (*cur);
1077 (*cur) = (*cur)->beneath;
1078 tmp->beneath = NULL;
1079
1080 update_current_target ();
1081
1082 /* Finally close the target. Note we do this after unchaining, so
1083 any target method calls from within the target_close
1084 implementation don't end up in T anymore. */
1085 target_close (t);
1086
1087 return 1;
1088 }
1089
1090 void
1091 pop_all_targets_above (enum strata above_stratum)
1092 {
1093 while ((int) (current_target.to_stratum) > (int) above_stratum)
1094 {
1095 if (!unpush_target (target_stack))
1096 {
1097 fprintf_unfiltered (gdb_stderr,
1098 "pop_all_targets couldn't find target %s\n",
1099 target_stack->to_shortname);
1100 internal_error (__FILE__, __LINE__,
1101 _("failed internal consistency check"));
1102 break;
1103 }
1104 }
1105 }
1106
1107 void
1108 pop_all_targets (void)
1109 {
1110 pop_all_targets_above (dummy_stratum);
1111 }
1112
1113 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1114
1115 int
1116 target_is_pushed (struct target_ops *t)
1117 {
1118 struct target_ops **cur;
1119
1120 /* Check magic number. If wrong, it probably means someone changed
1121 the struct definition, but not all the places that initialize one. */
1122 if (t->to_magic != OPS_MAGIC)
1123 {
1124 fprintf_unfiltered (gdb_stderr,
1125 "Magic number of %s target struct wrong\n",
1126 t->to_shortname);
1127 internal_error (__FILE__, __LINE__,
1128 _("failed internal consistency check"));
1129 }
1130
1131 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1132 if (*cur == t)
1133 return 1;
1134
1135 return 0;
1136 }
1137
1138 /* Using the objfile specified in OBJFILE, find the address for the
1139 current thread's thread-local storage with offset OFFSET. */
1140 CORE_ADDR
1141 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1142 {
1143 volatile CORE_ADDR addr = 0;
1144 struct target_ops *target;
1145
1146 for (target = current_target.beneath;
1147 target != NULL;
1148 target = target->beneath)
1149 {
1150 if (target->to_get_thread_local_address != NULL)
1151 break;
1152 }
1153
1154 if (target != NULL
1155 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1156 {
1157 ptid_t ptid = inferior_ptid;
1158 volatile struct gdb_exception ex;
1159
1160 TRY_CATCH (ex, RETURN_MASK_ALL)
1161 {
1162 CORE_ADDR lm_addr;
1163
1164 /* Fetch the load module address for this objfile. */
1165 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1166 objfile);
1167 /* If it's 0, throw the appropriate exception. */
1168 if (lm_addr == 0)
1169 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1170 _("TLS load module not found"));
1171
1172 addr = target->to_get_thread_local_address (target, ptid,
1173 lm_addr, offset);
1174 }
1175 /* If an error occurred, print TLS related messages here. Otherwise,
1176 throw the error to some higher catcher. */
1177 if (ex.reason < 0)
1178 {
1179 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1180
1181 switch (ex.error)
1182 {
1183 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1184 error (_("Cannot find thread-local variables "
1185 "in this thread library."));
1186 break;
1187 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1188 if (objfile_is_library)
1189 error (_("Cannot find shared library `%s' in dynamic"
1190 " linker's load module list"), objfile->name);
1191 else
1192 error (_("Cannot find executable file `%s' in dynamic"
1193 " linker's load module list"), objfile->name);
1194 break;
1195 case TLS_NOT_ALLOCATED_YET_ERROR:
1196 if (objfile_is_library)
1197 error (_("The inferior has not yet allocated storage for"
1198 " thread-local variables in\n"
1199 "the shared library `%s'\n"
1200 "for %s"),
1201 objfile->name, target_pid_to_str (ptid));
1202 else
1203 error (_("The inferior has not yet allocated storage for"
1204 " thread-local variables in\n"
1205 "the executable `%s'\n"
1206 "for %s"),
1207 objfile->name, target_pid_to_str (ptid));
1208 break;
1209 case TLS_GENERIC_ERROR:
1210 if (objfile_is_library)
1211 error (_("Cannot find thread-local storage for %s, "
1212 "shared library %s:\n%s"),
1213 target_pid_to_str (ptid),
1214 objfile->name, ex.message);
1215 else
1216 error (_("Cannot find thread-local storage for %s, "
1217 "executable file %s:\n%s"),
1218 target_pid_to_str (ptid),
1219 objfile->name, ex.message);
1220 break;
1221 default:
1222 throw_exception (ex);
1223 break;
1224 }
1225 }
1226 }
1227 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1228 TLS is an ABI-specific thing. But we don't do that yet. */
1229 else
1230 error (_("Cannot find thread-local variables on this target"));
1231
1232 return addr;
1233 }
1234
1235 const char *
1236 target_xfer_error_to_string (enum target_xfer_error err)
1237 {
1238 #define CASE(X) case X: return #X
1239 switch (err)
1240 {
1241 CASE(TARGET_XFER_E_IO);
1242 CASE(TARGET_XFER_E_UNAVAILABLE);
1243 default:
1244 return "<unknown>";
1245 }
1246 #undef CASE
1247 };
1248
1249
1250 #undef MIN
1251 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1252
1253 /* target_read_string -- read a null terminated string, up to LEN bytes,
1254 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1255 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1256 is responsible for freeing it. Return the number of bytes successfully
1257 read. */
1258
1259 int
1260 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1261 {
1262 int tlen, offset, i;
1263 gdb_byte buf[4];
1264 int errcode = 0;
1265 char *buffer;
1266 int buffer_allocated;
1267 char *bufptr;
1268 unsigned int nbytes_read = 0;
1269
1270 gdb_assert (string);
1271
1272 /* Small for testing. */
1273 buffer_allocated = 4;
1274 buffer = xmalloc (buffer_allocated);
1275 bufptr = buffer;
1276
1277 while (len > 0)
1278 {
1279 tlen = MIN (len, 4 - (memaddr & 3));
1280 offset = memaddr & 3;
1281
1282 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1283 if (errcode != 0)
1284 {
1285 /* The transfer request might have crossed the boundary to an
1286 unallocated region of memory. Retry the transfer, requesting
1287 a single byte. */
1288 tlen = 1;
1289 offset = 0;
1290 errcode = target_read_memory (memaddr, buf, 1);
1291 if (errcode != 0)
1292 goto done;
1293 }
1294
1295 if (bufptr - buffer + tlen > buffer_allocated)
1296 {
1297 unsigned int bytes;
1298
1299 bytes = bufptr - buffer;
1300 buffer_allocated *= 2;
1301 buffer = xrealloc (buffer, buffer_allocated);
1302 bufptr = buffer + bytes;
1303 }
1304
1305 for (i = 0; i < tlen; i++)
1306 {
1307 *bufptr++ = buf[i + offset];
1308 if (buf[i + offset] == '\000')
1309 {
1310 nbytes_read += i + 1;
1311 goto done;
1312 }
1313 }
1314
1315 memaddr += tlen;
1316 len -= tlen;
1317 nbytes_read += tlen;
1318 }
1319 done:
1320 *string = buffer;
1321 if (errnop != NULL)
1322 *errnop = errcode;
1323 return nbytes_read;
1324 }
1325
1326 struct target_section_table *
1327 target_get_section_table (struct target_ops *target)
1328 {
1329 struct target_ops *t;
1330
1331 if (targetdebug)
1332 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1333
1334 for (t = target; t != NULL; t = t->beneath)
1335 if (t->to_get_section_table != NULL)
1336 return (*t->to_get_section_table) (t);
1337
1338 return NULL;
1339 }
1340
1341 /* Find a section containing ADDR. */
1342
1343 struct target_section *
1344 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1345 {
1346 struct target_section_table *table = target_get_section_table (target);
1347 struct target_section *secp;
1348
1349 if (table == NULL)
1350 return NULL;
1351
1352 for (secp = table->sections; secp < table->sections_end; secp++)
1353 {
1354 if (addr >= secp->addr && addr < secp->endaddr)
1355 return secp;
1356 }
1357 return NULL;
1358 }
1359
1360 /* Read memory from the live target, even if currently inspecting a
1361 traceframe. The return is the same as that of target_read. */
1362
1363 static LONGEST
1364 target_read_live_memory (enum target_object object,
1365 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1366 {
1367 LONGEST ret;
1368 struct cleanup *cleanup;
1369
1370 /* Switch momentarily out of tfind mode so to access live memory.
1371 Note that this must not clear global state, such as the frame
1372 cache, which must still remain valid for the previous traceframe.
1373 We may be _building_ the frame cache at this point. */
1374 cleanup = make_cleanup_restore_traceframe_number ();
1375 set_traceframe_number (-1);
1376
1377 ret = target_read (current_target.beneath, object, NULL,
1378 myaddr, memaddr, len);
1379
1380 do_cleanups (cleanup);
1381 return ret;
1382 }
1383
1384 /* Using the set of read-only target sections of OPS, read live
1385 read-only memory. Note that the actual reads start from the
1386 top-most target again.
1387
1388 For interface/parameters/return description see target.h,
1389 to_xfer_partial. */
1390
1391 static LONGEST
1392 memory_xfer_live_readonly_partial (struct target_ops *ops,
1393 enum target_object object,
1394 gdb_byte *readbuf, ULONGEST memaddr,
1395 LONGEST len)
1396 {
1397 struct target_section *secp;
1398 struct target_section_table *table;
1399
1400 secp = target_section_by_addr (ops, memaddr);
1401 if (secp != NULL
1402 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1403 secp->the_bfd_section)
1404 & SEC_READONLY))
1405 {
1406 struct target_section *p;
1407 ULONGEST memend = memaddr + len;
1408
1409 table = target_get_section_table (ops);
1410
1411 for (p = table->sections; p < table->sections_end; p++)
1412 {
1413 if (memaddr >= p->addr)
1414 {
1415 if (memend <= p->endaddr)
1416 {
1417 /* Entire transfer is within this section. */
1418 return target_read_live_memory (object, memaddr,
1419 readbuf, len);
1420 }
1421 else if (memaddr >= p->endaddr)
1422 {
1423 /* This section ends before the transfer starts. */
1424 continue;
1425 }
1426 else
1427 {
1428 /* This section overlaps the transfer. Just do half. */
1429 len = p->endaddr - memaddr;
1430 return target_read_live_memory (object, memaddr,
1431 readbuf, len);
1432 }
1433 }
1434 }
1435 }
1436
1437 return 0;
1438 }
1439
1440 /* Perform a partial memory transfer.
1441 For docs see target.h, to_xfer_partial. */
1442
1443 static LONGEST
1444 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1445 void *readbuf, const void *writebuf, ULONGEST memaddr,
1446 LONGEST len)
1447 {
1448 LONGEST res;
1449 int reg_len;
1450 struct mem_region *region;
1451 struct inferior *inf;
1452
1453 /* For accesses to unmapped overlay sections, read directly from
1454 files. Must do this first, as MEMADDR may need adjustment. */
1455 if (readbuf != NULL && overlay_debugging)
1456 {
1457 struct obj_section *section = find_pc_overlay (memaddr);
1458
1459 if (pc_in_unmapped_range (memaddr, section))
1460 {
1461 struct target_section_table *table
1462 = target_get_section_table (ops);
1463 const char *section_name = section->the_bfd_section->name;
1464
1465 memaddr = overlay_mapped_address (memaddr, section);
1466 return section_table_xfer_memory_partial (readbuf, writebuf,
1467 memaddr, len,
1468 table->sections,
1469 table->sections_end,
1470 section_name);
1471 }
1472 }
1473
1474 /* Try the executable files, if "trust-readonly-sections" is set. */
1475 if (readbuf != NULL && trust_readonly)
1476 {
1477 struct target_section *secp;
1478 struct target_section_table *table;
1479
1480 secp = target_section_by_addr (ops, memaddr);
1481 if (secp != NULL
1482 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1483 secp->the_bfd_section)
1484 & SEC_READONLY))
1485 {
1486 table = target_get_section_table (ops);
1487 return section_table_xfer_memory_partial (readbuf, writebuf,
1488 memaddr, len,
1489 table->sections,
1490 table->sections_end,
1491 NULL);
1492 }
1493 }
1494
1495 /* If reading unavailable memory in the context of traceframes, and
1496 this address falls within a read-only section, fallback to
1497 reading from live memory. */
1498 if (readbuf != NULL && get_traceframe_number () != -1)
1499 {
1500 VEC(mem_range_s) *available;
1501
1502 /* If we fail to get the set of available memory, then the
1503 target does not support querying traceframe info, and so we
1504 attempt reading from the traceframe anyway (assuming the
1505 target implements the old QTro packet then). */
1506 if (traceframe_available_memory (&available, memaddr, len))
1507 {
1508 struct cleanup *old_chain;
1509
1510 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1511
1512 if (VEC_empty (mem_range_s, available)
1513 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1514 {
1515 /* Don't read into the traceframe's available
1516 memory. */
1517 if (!VEC_empty (mem_range_s, available))
1518 {
1519 LONGEST oldlen = len;
1520
1521 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1522 gdb_assert (len <= oldlen);
1523 }
1524
1525 do_cleanups (old_chain);
1526
1527 /* This goes through the topmost target again. */
1528 res = memory_xfer_live_readonly_partial (ops, object,
1529 readbuf, memaddr, len);
1530 if (res > 0)
1531 return res;
1532
1533 /* No use trying further, we know some memory starting
1534 at MEMADDR isn't available. */
1535 return TARGET_XFER_E_UNAVAILABLE;
1536 }
1537
1538 /* Don't try to read more than how much is available, in
1539 case the target implements the deprecated QTro packet to
1540 cater for older GDBs (the target's knowledge of read-only
1541 sections may be outdated by now). */
1542 len = VEC_index (mem_range_s, available, 0)->length;
1543
1544 do_cleanups (old_chain);
1545 }
1546 }
1547
1548 /* Try GDB's internal data cache. */
1549 region = lookup_mem_region (memaddr);
1550 /* region->hi == 0 means there's no upper bound. */
1551 if (memaddr + len < region->hi || region->hi == 0)
1552 reg_len = len;
1553 else
1554 reg_len = region->hi - memaddr;
1555
1556 switch (region->attrib.mode)
1557 {
1558 case MEM_RO:
1559 if (writebuf != NULL)
1560 return -1;
1561 break;
1562
1563 case MEM_WO:
1564 if (readbuf != NULL)
1565 return -1;
1566 break;
1567
1568 case MEM_FLASH:
1569 /* We only support writing to flash during "load" for now. */
1570 if (writebuf != NULL)
1571 error (_("Writing to flash memory forbidden in this context"));
1572 break;
1573
1574 case MEM_NONE:
1575 return -1;
1576 }
1577
1578 if (!ptid_equal (inferior_ptid, null_ptid))
1579 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1580 else
1581 inf = NULL;
1582
1583 if (inf != NULL
1584 /* The dcache reads whole cache lines; that doesn't play well
1585 with reading from a trace buffer, because reading outside of
1586 the collected memory range fails. */
1587 && get_traceframe_number () == -1
1588 && (region->attrib.cache
1589 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1590 {
1591 if (readbuf != NULL)
1592 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1593 reg_len, 0);
1594 else
1595 /* FIXME drow/2006-08-09: If we're going to preserve const
1596 correctness dcache_xfer_memory should take readbuf and
1597 writebuf. */
1598 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1599 (void *) writebuf,
1600 reg_len, 1);
1601 if (res <= 0)
1602 return -1;
1603 else
1604 return res;
1605 }
1606
1607 /* If none of those methods found the memory we wanted, fall back
1608 to a target partial transfer. Normally a single call to
1609 to_xfer_partial is enough; if it doesn't recognize an object
1610 it will call the to_xfer_partial of the next target down.
1611 But for memory this won't do. Memory is the only target
1612 object which can be read from more than one valid target.
1613 A core file, for instance, could have some of memory but
1614 delegate other bits to the target below it. So, we must
1615 manually try all targets. */
1616
1617 do
1618 {
1619 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1620 readbuf, writebuf, memaddr, reg_len);
1621 if (res > 0)
1622 break;
1623
1624 /* We want to continue past core files to executables, but not
1625 past a running target's memory. */
1626 if (ops->to_has_all_memory (ops))
1627 break;
1628
1629 ops = ops->beneath;
1630 }
1631 while (ops != NULL);
1632
1633 /* Make sure the cache gets updated no matter what - if we are writing
1634 to the stack. Even if this write is not tagged as such, we still need
1635 to update the cache. */
1636
1637 if (res > 0
1638 && inf != NULL
1639 && writebuf != NULL
1640 && !region->attrib.cache
1641 && stack_cache_enabled_p
1642 && object != TARGET_OBJECT_STACK_MEMORY)
1643 {
1644 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1645 }
1646
1647 /* If we still haven't got anything, return the last error. We
1648 give up. */
1649 return res;
1650 }
1651
1652 /* Perform a partial memory transfer. For docs see target.h,
1653 to_xfer_partial. */
1654
1655 static LONGEST
1656 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1657 void *readbuf, const void *writebuf, ULONGEST memaddr,
1658 LONGEST len)
1659 {
1660 int res;
1661
1662 /* Zero length requests are ok and require no work. */
1663 if (len == 0)
1664 return 0;
1665
1666 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1667 breakpoint insns, thus hiding out from higher layers whether
1668 there are software breakpoints inserted in the code stream. */
1669 if (readbuf != NULL)
1670 {
1671 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1672
1673 if (res > 0 && !show_memory_breakpoints)
1674 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1675 }
1676 else
1677 {
1678 void *buf;
1679 struct cleanup *old_chain;
1680
1681 buf = xmalloc (len);
1682 old_chain = make_cleanup (xfree, buf);
1683 memcpy (buf, writebuf, len);
1684
1685 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1686 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1687
1688 do_cleanups (old_chain);
1689 }
1690
1691 return res;
1692 }
1693
1694 static void
1695 restore_show_memory_breakpoints (void *arg)
1696 {
1697 show_memory_breakpoints = (uintptr_t) arg;
1698 }
1699
1700 struct cleanup *
1701 make_show_memory_breakpoints_cleanup (int show)
1702 {
1703 int current = show_memory_breakpoints;
1704
1705 show_memory_breakpoints = show;
1706 return make_cleanup (restore_show_memory_breakpoints,
1707 (void *) (uintptr_t) current);
1708 }
1709
1710 /* For docs see target.h, to_xfer_partial. */
1711
1712 LONGEST
1713 target_xfer_partial (struct target_ops *ops,
1714 enum target_object object, const char *annex,
1715 void *readbuf, const void *writebuf,
1716 ULONGEST offset, LONGEST len)
1717 {
1718 LONGEST retval;
1719
1720 gdb_assert (ops->to_xfer_partial != NULL);
1721
1722 if (writebuf && !may_write_memory)
1723 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1724 core_addr_to_string_nz (offset), plongest (len));
1725
1726 /* If this is a memory transfer, let the memory-specific code
1727 have a look at it instead. Memory transfers are more
1728 complicated. */
1729 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1730 retval = memory_xfer_partial (ops, object, readbuf,
1731 writebuf, offset, len);
1732 else
1733 {
1734 enum target_object raw_object = object;
1735
1736 /* If this is a raw memory transfer, request the normal
1737 memory object from other layers. */
1738 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1739 raw_object = TARGET_OBJECT_MEMORY;
1740
1741 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1742 writebuf, offset, len);
1743 }
1744
1745 if (targetdebug)
1746 {
1747 const unsigned char *myaddr = NULL;
1748
1749 fprintf_unfiltered (gdb_stdlog,
1750 "%s:target_xfer_partial "
1751 "(%d, %s, %s, %s, %s, %s) = %s",
1752 ops->to_shortname,
1753 (int) object,
1754 (annex ? annex : "(null)"),
1755 host_address_to_string (readbuf),
1756 host_address_to_string (writebuf),
1757 core_addr_to_string_nz (offset),
1758 plongest (len), plongest (retval));
1759
1760 if (readbuf)
1761 myaddr = readbuf;
1762 if (writebuf)
1763 myaddr = writebuf;
1764 if (retval > 0 && myaddr != NULL)
1765 {
1766 int i;
1767
1768 fputs_unfiltered (", bytes =", gdb_stdlog);
1769 for (i = 0; i < retval; i++)
1770 {
1771 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1772 {
1773 if (targetdebug < 2 && i > 0)
1774 {
1775 fprintf_unfiltered (gdb_stdlog, " ...");
1776 break;
1777 }
1778 fprintf_unfiltered (gdb_stdlog, "\n");
1779 }
1780
1781 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1782 }
1783 }
1784
1785 fputc_unfiltered ('\n', gdb_stdlog);
1786 }
1787 return retval;
1788 }
1789
1790 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1791 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1792 if any error occurs.
1793
1794 If an error occurs, no guarantee is made about the contents of the data at
1795 MYADDR. In particular, the caller should not depend upon partial reads
1796 filling the buffer with good data. There is no way for the caller to know
1797 how much good data might have been transfered anyway. Callers that can
1798 deal with partial reads should call target_read (which will retry until
1799 it makes no progress, and then return how much was transferred). */
1800
1801 int
1802 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1803 {
1804 /* Dispatch to the topmost target, not the flattened current_target.
1805 Memory accesses check target->to_has_(all_)memory, and the
1806 flattened target doesn't inherit those. */
1807 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1808 myaddr, memaddr, len) == len)
1809 return 0;
1810 else
1811 return EIO;
1812 }
1813
1814 /* Like target_read_memory, but specify explicitly that this is a read from
1815 the target's stack. This may trigger different cache behavior. */
1816
1817 int
1818 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1819 {
1820 /* Dispatch to the topmost target, not the flattened current_target.
1821 Memory accesses check target->to_has_(all_)memory, and the
1822 flattened target doesn't inherit those. */
1823
1824 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1825 myaddr, memaddr, len) == len)
1826 return 0;
1827 else
1828 return EIO;
1829 }
1830
1831 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1832 Returns either 0 for success or an errno value if any error occurs.
1833 If an error occurs, no guarantee is made about how much data got written.
1834 Callers that can deal with partial writes should call target_write. */
1835
1836 int
1837 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1838 {
1839 /* Dispatch to the topmost target, not the flattened current_target.
1840 Memory accesses check target->to_has_(all_)memory, and the
1841 flattened target doesn't inherit those. */
1842 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1843 myaddr, memaddr, len) == len)
1844 return 0;
1845 else
1846 return EIO;
1847 }
1848
1849 /* Write LEN bytes from MYADDR to target raw memory at address
1850 MEMADDR. Returns either 0 for success or an errno value if any
1851 error occurs. If an error occurs, no guarantee is made about how
1852 much data got written. Callers that can deal with partial writes
1853 should call target_write. */
1854
1855 int
1856 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1857 {
1858 /* Dispatch to the topmost target, not the flattened current_target.
1859 Memory accesses check target->to_has_(all_)memory, and the
1860 flattened target doesn't inherit those. */
1861 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1862 myaddr, memaddr, len) == len)
1863 return 0;
1864 else
1865 return EIO;
1866 }
1867
1868 /* Fetch the target's memory map. */
1869
1870 VEC(mem_region_s) *
1871 target_memory_map (void)
1872 {
1873 VEC(mem_region_s) *result;
1874 struct mem_region *last_one, *this_one;
1875 int ix;
1876 struct target_ops *t;
1877
1878 if (targetdebug)
1879 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1880
1881 for (t = current_target.beneath; t != NULL; t = t->beneath)
1882 if (t->to_memory_map != NULL)
1883 break;
1884
1885 if (t == NULL)
1886 return NULL;
1887
1888 result = t->to_memory_map (t);
1889 if (result == NULL)
1890 return NULL;
1891
1892 qsort (VEC_address (mem_region_s, result),
1893 VEC_length (mem_region_s, result),
1894 sizeof (struct mem_region), mem_region_cmp);
1895
1896 /* Check that regions do not overlap. Simultaneously assign
1897 a numbering for the "mem" commands to use to refer to
1898 each region. */
1899 last_one = NULL;
1900 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1901 {
1902 this_one->number = ix;
1903
1904 if (last_one && last_one->hi > this_one->lo)
1905 {
1906 warning (_("Overlapping regions in memory map: ignoring"));
1907 VEC_free (mem_region_s, result);
1908 return NULL;
1909 }
1910 last_one = this_one;
1911 }
1912
1913 return result;
1914 }
1915
1916 void
1917 target_flash_erase (ULONGEST address, LONGEST length)
1918 {
1919 struct target_ops *t;
1920
1921 for (t = current_target.beneath; t != NULL; t = t->beneath)
1922 if (t->to_flash_erase != NULL)
1923 {
1924 if (targetdebug)
1925 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1926 hex_string (address), phex (length, 0));
1927 t->to_flash_erase (t, address, length);
1928 return;
1929 }
1930
1931 tcomplain ();
1932 }
1933
1934 void
1935 target_flash_done (void)
1936 {
1937 struct target_ops *t;
1938
1939 for (t = current_target.beneath; t != NULL; t = t->beneath)
1940 if (t->to_flash_done != NULL)
1941 {
1942 if (targetdebug)
1943 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1944 t->to_flash_done (t);
1945 return;
1946 }
1947
1948 tcomplain ();
1949 }
1950
1951 static void
1952 show_trust_readonly (struct ui_file *file, int from_tty,
1953 struct cmd_list_element *c, const char *value)
1954 {
1955 fprintf_filtered (file,
1956 _("Mode for reading from readonly sections is %s.\n"),
1957 value);
1958 }
1959
1960 /* More generic transfers. */
1961
1962 static LONGEST
1963 default_xfer_partial (struct target_ops *ops, enum target_object object,
1964 const char *annex, gdb_byte *readbuf,
1965 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1966 {
1967 if (object == TARGET_OBJECT_MEMORY
1968 && ops->deprecated_xfer_memory != NULL)
1969 /* If available, fall back to the target's
1970 "deprecated_xfer_memory" method. */
1971 {
1972 int xfered = -1;
1973
1974 errno = 0;
1975 if (writebuf != NULL)
1976 {
1977 void *buffer = xmalloc (len);
1978 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1979
1980 memcpy (buffer, writebuf, len);
1981 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1982 1/*write*/, NULL, ops);
1983 do_cleanups (cleanup);
1984 }
1985 if (readbuf != NULL)
1986 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1987 0/*read*/, NULL, ops);
1988 if (xfered > 0)
1989 return xfered;
1990 else if (xfered == 0 && errno == 0)
1991 /* "deprecated_xfer_memory" uses 0, cross checked against
1992 ERRNO as one indication of an error. */
1993 return 0;
1994 else
1995 return -1;
1996 }
1997 else if (ops->beneath != NULL)
1998 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1999 readbuf, writebuf, offset, len);
2000 else
2001 return -1;
2002 }
2003
2004 /* The xfer_partial handler for the topmost target. Unlike the default,
2005 it does not need to handle memory specially; it just passes all
2006 requests down the stack. */
2007
2008 static LONGEST
2009 current_xfer_partial (struct target_ops *ops, enum target_object object,
2010 const char *annex, gdb_byte *readbuf,
2011 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
2012 {
2013 if (ops->beneath != NULL)
2014 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2015 readbuf, writebuf, offset, len);
2016 else
2017 return -1;
2018 }
2019
2020 /* Target vector read/write partial wrapper functions. */
2021
2022 static LONGEST
2023 target_read_partial (struct target_ops *ops,
2024 enum target_object object,
2025 const char *annex, gdb_byte *buf,
2026 ULONGEST offset, LONGEST len)
2027 {
2028 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2029 }
2030
2031 static LONGEST
2032 target_write_partial (struct target_ops *ops,
2033 enum target_object object,
2034 const char *annex, const gdb_byte *buf,
2035 ULONGEST offset, LONGEST len)
2036 {
2037 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2038 }
2039
2040 /* Wrappers to perform the full transfer. */
2041
2042 /* For docs on target_read see target.h. */
2043
2044 LONGEST
2045 target_read (struct target_ops *ops,
2046 enum target_object object,
2047 const char *annex, gdb_byte *buf,
2048 ULONGEST offset, LONGEST len)
2049 {
2050 LONGEST xfered = 0;
2051
2052 while (xfered < len)
2053 {
2054 LONGEST xfer = target_read_partial (ops, object, annex,
2055 (gdb_byte *) buf + xfered,
2056 offset + xfered, len - xfered);
2057
2058 /* Call an observer, notifying them of the xfer progress? */
2059 if (xfer == 0)
2060 return xfered;
2061 if (xfer < 0)
2062 return -1;
2063 xfered += xfer;
2064 QUIT;
2065 }
2066 return len;
2067 }
2068
2069 /* Assuming that the entire [begin, end) range of memory cannot be
2070 read, try to read whatever subrange is possible to read.
2071
2072 The function returns, in RESULT, either zero or one memory block.
2073 If there's a readable subrange at the beginning, it is completely
2074 read and returned. Any further readable subrange will not be read.
2075 Otherwise, if there's a readable subrange at the end, it will be
2076 completely read and returned. Any readable subranges before it
2077 (obviously, not starting at the beginning), will be ignored. In
2078 other cases -- either no readable subrange, or readable subrange(s)
2079 that is neither at the beginning, or end, nothing is returned.
2080
2081 The purpose of this function is to handle a read across a boundary
2082 of accessible memory in a case when memory map is not available.
2083 The above restrictions are fine for this case, but will give
2084 incorrect results if the memory is 'patchy'. However, supporting
2085 'patchy' memory would require trying to read every single byte,
2086 and it seems unacceptable solution. Explicit memory map is
2087 recommended for this case -- and target_read_memory_robust will
2088 take care of reading multiple ranges then. */
2089
2090 static void
2091 read_whatever_is_readable (struct target_ops *ops,
2092 ULONGEST begin, ULONGEST end,
2093 VEC(memory_read_result_s) **result)
2094 {
2095 gdb_byte *buf = xmalloc (end - begin);
2096 ULONGEST current_begin = begin;
2097 ULONGEST current_end = end;
2098 int forward;
2099 memory_read_result_s r;
2100
2101 /* If we previously failed to read 1 byte, nothing can be done here. */
2102 if (end - begin <= 1)
2103 {
2104 xfree (buf);
2105 return;
2106 }
2107
2108 /* Check that either first or the last byte is readable, and give up
2109 if not. This heuristic is meant to permit reading accessible memory
2110 at the boundary of accessible region. */
2111 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2112 buf, begin, 1) == 1)
2113 {
2114 forward = 1;
2115 ++current_begin;
2116 }
2117 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2118 buf + (end-begin) - 1, end - 1, 1) == 1)
2119 {
2120 forward = 0;
2121 --current_end;
2122 }
2123 else
2124 {
2125 xfree (buf);
2126 return;
2127 }
2128
2129 /* Loop invariant is that the [current_begin, current_end) was previously
2130 found to be not readable as a whole.
2131
2132 Note loop condition -- if the range has 1 byte, we can't divide the range
2133 so there's no point trying further. */
2134 while (current_end - current_begin > 1)
2135 {
2136 ULONGEST first_half_begin, first_half_end;
2137 ULONGEST second_half_begin, second_half_end;
2138 LONGEST xfer;
2139 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2140
2141 if (forward)
2142 {
2143 first_half_begin = current_begin;
2144 first_half_end = middle;
2145 second_half_begin = middle;
2146 second_half_end = current_end;
2147 }
2148 else
2149 {
2150 first_half_begin = middle;
2151 first_half_end = current_end;
2152 second_half_begin = current_begin;
2153 second_half_end = middle;
2154 }
2155
2156 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2157 buf + (first_half_begin - begin),
2158 first_half_begin,
2159 first_half_end - first_half_begin);
2160
2161 if (xfer == first_half_end - first_half_begin)
2162 {
2163 /* This half reads up fine. So, the error must be in the
2164 other half. */
2165 current_begin = second_half_begin;
2166 current_end = second_half_end;
2167 }
2168 else
2169 {
2170 /* This half is not readable. Because we've tried one byte, we
2171 know some part of this half if actually redable. Go to the next
2172 iteration to divide again and try to read.
2173
2174 We don't handle the other half, because this function only tries
2175 to read a single readable subrange. */
2176 current_begin = first_half_begin;
2177 current_end = first_half_end;
2178 }
2179 }
2180
2181 if (forward)
2182 {
2183 /* The [begin, current_begin) range has been read. */
2184 r.begin = begin;
2185 r.end = current_begin;
2186 r.data = buf;
2187 }
2188 else
2189 {
2190 /* The [current_end, end) range has been read. */
2191 LONGEST rlen = end - current_end;
2192
2193 r.data = xmalloc (rlen);
2194 memcpy (r.data, buf + current_end - begin, rlen);
2195 r.begin = current_end;
2196 r.end = end;
2197 xfree (buf);
2198 }
2199 VEC_safe_push(memory_read_result_s, (*result), &r);
2200 }
2201
2202 void
2203 free_memory_read_result_vector (void *x)
2204 {
2205 VEC(memory_read_result_s) *v = x;
2206 memory_read_result_s *current;
2207 int ix;
2208
2209 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2210 {
2211 xfree (current->data);
2212 }
2213 VEC_free (memory_read_result_s, v);
2214 }
2215
2216 VEC(memory_read_result_s) *
2217 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2218 {
2219 VEC(memory_read_result_s) *result = 0;
2220
2221 LONGEST xfered = 0;
2222 while (xfered < len)
2223 {
2224 struct mem_region *region = lookup_mem_region (offset + xfered);
2225 LONGEST rlen;
2226
2227 /* If there is no explicit region, a fake one should be created. */
2228 gdb_assert (region);
2229
2230 if (region->hi == 0)
2231 rlen = len - xfered;
2232 else
2233 rlen = region->hi - offset;
2234
2235 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2236 {
2237 /* Cannot read this region. Note that we can end up here only
2238 if the region is explicitly marked inaccessible, or
2239 'inaccessible-by-default' is in effect. */
2240 xfered += rlen;
2241 }
2242 else
2243 {
2244 LONGEST to_read = min (len - xfered, rlen);
2245 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2246
2247 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2248 (gdb_byte *) buffer,
2249 offset + xfered, to_read);
2250 /* Call an observer, notifying them of the xfer progress? */
2251 if (xfer <= 0)
2252 {
2253 /* Got an error reading full chunk. See if maybe we can read
2254 some subrange. */
2255 xfree (buffer);
2256 read_whatever_is_readable (ops, offset + xfered,
2257 offset + xfered + to_read, &result);
2258 xfered += to_read;
2259 }
2260 else
2261 {
2262 struct memory_read_result r;
2263 r.data = buffer;
2264 r.begin = offset + xfered;
2265 r.end = r.begin + xfer;
2266 VEC_safe_push (memory_read_result_s, result, &r);
2267 xfered += xfer;
2268 }
2269 QUIT;
2270 }
2271 }
2272 return result;
2273 }
2274
2275
2276 /* An alternative to target_write with progress callbacks. */
2277
2278 LONGEST
2279 target_write_with_progress (struct target_ops *ops,
2280 enum target_object object,
2281 const char *annex, const gdb_byte *buf,
2282 ULONGEST offset, LONGEST len,
2283 void (*progress) (ULONGEST, void *), void *baton)
2284 {
2285 LONGEST xfered = 0;
2286
2287 /* Give the progress callback a chance to set up. */
2288 if (progress)
2289 (*progress) (0, baton);
2290
2291 while (xfered < len)
2292 {
2293 LONGEST xfer = target_write_partial (ops, object, annex,
2294 (gdb_byte *) buf + xfered,
2295 offset + xfered, len - xfered);
2296
2297 if (xfer == 0)
2298 return xfered;
2299 if (xfer < 0)
2300 return -1;
2301
2302 if (progress)
2303 (*progress) (xfer, baton);
2304
2305 xfered += xfer;
2306 QUIT;
2307 }
2308 return len;
2309 }
2310
2311 /* For docs on target_write see target.h. */
2312
2313 LONGEST
2314 target_write (struct target_ops *ops,
2315 enum target_object object,
2316 const char *annex, const gdb_byte *buf,
2317 ULONGEST offset, LONGEST len)
2318 {
2319 return target_write_with_progress (ops, object, annex, buf, offset, len,
2320 NULL, NULL);
2321 }
2322
2323 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2324 the size of the transferred data. PADDING additional bytes are
2325 available in *BUF_P. This is a helper function for
2326 target_read_alloc; see the declaration of that function for more
2327 information. */
2328
2329 static LONGEST
2330 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2331 const char *annex, gdb_byte **buf_p, int padding)
2332 {
2333 size_t buf_alloc, buf_pos;
2334 gdb_byte *buf;
2335 LONGEST n;
2336
2337 /* This function does not have a length parameter; it reads the
2338 entire OBJECT). Also, it doesn't support objects fetched partly
2339 from one target and partly from another (in a different stratum,
2340 e.g. a core file and an executable). Both reasons make it
2341 unsuitable for reading memory. */
2342 gdb_assert (object != TARGET_OBJECT_MEMORY);
2343
2344 /* Start by reading up to 4K at a time. The target will throttle
2345 this number down if necessary. */
2346 buf_alloc = 4096;
2347 buf = xmalloc (buf_alloc);
2348 buf_pos = 0;
2349 while (1)
2350 {
2351 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2352 buf_pos, buf_alloc - buf_pos - padding);
2353 if (n < 0)
2354 {
2355 /* An error occurred. */
2356 xfree (buf);
2357 return -1;
2358 }
2359 else if (n == 0)
2360 {
2361 /* Read all there was. */
2362 if (buf_pos == 0)
2363 xfree (buf);
2364 else
2365 *buf_p = buf;
2366 return buf_pos;
2367 }
2368
2369 buf_pos += n;
2370
2371 /* If the buffer is filling up, expand it. */
2372 if (buf_alloc < buf_pos * 2)
2373 {
2374 buf_alloc *= 2;
2375 buf = xrealloc (buf, buf_alloc);
2376 }
2377
2378 QUIT;
2379 }
2380 }
2381
2382 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2383 the size of the transferred data. See the declaration in "target.h"
2384 function for more information about the return value. */
2385
2386 LONGEST
2387 target_read_alloc (struct target_ops *ops, enum target_object object,
2388 const char *annex, gdb_byte **buf_p)
2389 {
2390 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2391 }
2392
2393 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2394 returned as a string, allocated using xmalloc. If an error occurs
2395 or the transfer is unsupported, NULL is returned. Empty objects
2396 are returned as allocated but empty strings. A warning is issued
2397 if the result contains any embedded NUL bytes. */
2398
2399 char *
2400 target_read_stralloc (struct target_ops *ops, enum target_object object,
2401 const char *annex)
2402 {
2403 gdb_byte *buffer;
2404 char *bufstr;
2405 LONGEST i, transferred;
2406
2407 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2408 bufstr = (char *) buffer;
2409
2410 if (transferred < 0)
2411 return NULL;
2412
2413 if (transferred == 0)
2414 return xstrdup ("");
2415
2416 bufstr[transferred] = 0;
2417
2418 /* Check for embedded NUL bytes; but allow trailing NULs. */
2419 for (i = strlen (bufstr); i < transferred; i++)
2420 if (bufstr[i] != 0)
2421 {
2422 warning (_("target object %d, annex %s, "
2423 "contained unexpected null characters"),
2424 (int) object, annex ? annex : "(none)");
2425 break;
2426 }
2427
2428 return bufstr;
2429 }
2430
2431 /* Memory transfer methods. */
2432
2433 void
2434 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2435 LONGEST len)
2436 {
2437 /* This method is used to read from an alternate, non-current
2438 target. This read must bypass the overlay support (as symbols
2439 don't match this target), and GDB's internal cache (wrong cache
2440 for this target). */
2441 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2442 != len)
2443 memory_error (EIO, addr);
2444 }
2445
2446 ULONGEST
2447 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2448 int len, enum bfd_endian byte_order)
2449 {
2450 gdb_byte buf[sizeof (ULONGEST)];
2451
2452 gdb_assert (len <= sizeof (buf));
2453 get_target_memory (ops, addr, buf, len);
2454 return extract_unsigned_integer (buf, len, byte_order);
2455 }
2456
2457 int
2458 target_insert_breakpoint (struct gdbarch *gdbarch,
2459 struct bp_target_info *bp_tgt)
2460 {
2461 if (!may_insert_breakpoints)
2462 {
2463 warning (_("May not insert breakpoints"));
2464 return 1;
2465 }
2466
2467 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2468 }
2469
2470 int
2471 target_remove_breakpoint (struct gdbarch *gdbarch,
2472 struct bp_target_info *bp_tgt)
2473 {
2474 /* This is kind of a weird case to handle, but the permission might
2475 have been changed after breakpoints were inserted - in which case
2476 we should just take the user literally and assume that any
2477 breakpoints should be left in place. */
2478 if (!may_insert_breakpoints)
2479 {
2480 warning (_("May not remove breakpoints"));
2481 return 1;
2482 }
2483
2484 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2485 }
2486
2487 static void
2488 target_info (char *args, int from_tty)
2489 {
2490 struct target_ops *t;
2491 int has_all_mem = 0;
2492
2493 if (symfile_objfile != NULL)
2494 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2495
2496 for (t = target_stack; t != NULL; t = t->beneath)
2497 {
2498 if (!(*t->to_has_memory) (t))
2499 continue;
2500
2501 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2502 continue;
2503 if (has_all_mem)
2504 printf_unfiltered (_("\tWhile running this, "
2505 "GDB does not access memory from...\n"));
2506 printf_unfiltered ("%s:\n", t->to_longname);
2507 (t->to_files_info) (t);
2508 has_all_mem = (*t->to_has_all_memory) (t);
2509 }
2510 }
2511
2512 /* This function is called before any new inferior is created, e.g.
2513 by running a program, attaching, or connecting to a target.
2514 It cleans up any state from previous invocations which might
2515 change between runs. This is a subset of what target_preopen
2516 resets (things which might change between targets). */
2517
2518 void
2519 target_pre_inferior (int from_tty)
2520 {
2521 /* Clear out solib state. Otherwise the solib state of the previous
2522 inferior might have survived and is entirely wrong for the new
2523 target. This has been observed on GNU/Linux using glibc 2.3. How
2524 to reproduce:
2525
2526 bash$ ./foo&
2527 [1] 4711
2528 bash$ ./foo&
2529 [1] 4712
2530 bash$ gdb ./foo
2531 [...]
2532 (gdb) attach 4711
2533 (gdb) detach
2534 (gdb) attach 4712
2535 Cannot access memory at address 0xdeadbeef
2536 */
2537
2538 /* In some OSs, the shared library list is the same/global/shared
2539 across inferiors. If code is shared between processes, so are
2540 memory regions and features. */
2541 if (!gdbarch_has_global_solist (target_gdbarch ()))
2542 {
2543 no_shared_libraries (NULL, from_tty);
2544
2545 invalidate_target_mem_regions ();
2546
2547 target_clear_description ();
2548 }
2549
2550 agent_capability_invalidate ();
2551 }
2552
2553 /* Callback for iterate_over_inferiors. Gets rid of the given
2554 inferior. */
2555
2556 static int
2557 dispose_inferior (struct inferior *inf, void *args)
2558 {
2559 struct thread_info *thread;
2560
2561 thread = any_thread_of_process (inf->pid);
2562 if (thread)
2563 {
2564 switch_to_thread (thread->ptid);
2565
2566 /* Core inferiors actually should be detached, not killed. */
2567 if (target_has_execution)
2568 target_kill ();
2569 else
2570 target_detach (NULL, 0);
2571 }
2572
2573 return 0;
2574 }
2575
2576 /* This is to be called by the open routine before it does
2577 anything. */
2578
2579 void
2580 target_preopen (int from_tty)
2581 {
2582 dont_repeat ();
2583
2584 if (have_inferiors ())
2585 {
2586 if (!from_tty
2587 || !have_live_inferiors ()
2588 || query (_("A program is being debugged already. Kill it? ")))
2589 iterate_over_inferiors (dispose_inferior, NULL);
2590 else
2591 error (_("Program not killed."));
2592 }
2593
2594 /* Calling target_kill may remove the target from the stack. But if
2595 it doesn't (which seems like a win for UDI), remove it now. */
2596 /* Leave the exec target, though. The user may be switching from a
2597 live process to a core of the same program. */
2598 pop_all_targets_above (file_stratum);
2599
2600 target_pre_inferior (from_tty);
2601 }
2602
2603 /* Detach a target after doing deferred register stores. */
2604
2605 void
2606 target_detach (char *args, int from_tty)
2607 {
2608 struct target_ops* t;
2609
2610 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2611 /* Don't remove global breakpoints here. They're removed on
2612 disconnection from the target. */
2613 ;
2614 else
2615 /* If we're in breakpoints-always-inserted mode, have to remove
2616 them before detaching. */
2617 remove_breakpoints_pid (PIDGET (inferior_ptid));
2618
2619 prepare_for_detach ();
2620
2621 for (t = current_target.beneath; t != NULL; t = t->beneath)
2622 {
2623 if (t->to_detach != NULL)
2624 {
2625 t->to_detach (t, args, from_tty);
2626 if (targetdebug)
2627 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2628 args, from_tty);
2629 return;
2630 }
2631 }
2632
2633 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2634 }
2635
2636 void
2637 target_disconnect (char *args, int from_tty)
2638 {
2639 struct target_ops *t;
2640
2641 /* If we're in breakpoints-always-inserted mode or if breakpoints
2642 are global across processes, we have to remove them before
2643 disconnecting. */
2644 remove_breakpoints ();
2645
2646 for (t = current_target.beneath; t != NULL; t = t->beneath)
2647 if (t->to_disconnect != NULL)
2648 {
2649 if (targetdebug)
2650 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2651 args, from_tty);
2652 t->to_disconnect (t, args, from_tty);
2653 return;
2654 }
2655
2656 tcomplain ();
2657 }
2658
2659 ptid_t
2660 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2661 {
2662 struct target_ops *t;
2663
2664 for (t = current_target.beneath; t != NULL; t = t->beneath)
2665 {
2666 if (t->to_wait != NULL)
2667 {
2668 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2669
2670 if (targetdebug)
2671 {
2672 char *status_string;
2673 char *options_string;
2674
2675 status_string = target_waitstatus_to_string (status);
2676 options_string = target_options_to_string (options);
2677 fprintf_unfiltered (gdb_stdlog,
2678 "target_wait (%d, status, options={%s})"
2679 " = %d, %s\n",
2680 PIDGET (ptid), options_string,
2681 PIDGET (retval), status_string);
2682 xfree (status_string);
2683 xfree (options_string);
2684 }
2685
2686 return retval;
2687 }
2688 }
2689
2690 noprocess ();
2691 }
2692
2693 char *
2694 target_pid_to_str (ptid_t ptid)
2695 {
2696 struct target_ops *t;
2697
2698 for (t = current_target.beneath; t != NULL; t = t->beneath)
2699 {
2700 if (t->to_pid_to_str != NULL)
2701 return (*t->to_pid_to_str) (t, ptid);
2702 }
2703
2704 return normal_pid_to_str (ptid);
2705 }
2706
2707 char *
2708 target_thread_name (struct thread_info *info)
2709 {
2710 struct target_ops *t;
2711
2712 for (t = current_target.beneath; t != NULL; t = t->beneath)
2713 {
2714 if (t->to_thread_name != NULL)
2715 return (*t->to_thread_name) (info);
2716 }
2717
2718 return NULL;
2719 }
2720
2721 void
2722 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2723 {
2724 struct target_ops *t;
2725
2726 target_dcache_invalidate ();
2727
2728 for (t = current_target.beneath; t != NULL; t = t->beneath)
2729 {
2730 if (t->to_resume != NULL)
2731 {
2732 t->to_resume (t, ptid, step, signal);
2733 if (targetdebug)
2734 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2735 PIDGET (ptid),
2736 step ? "step" : "continue",
2737 gdb_signal_to_name (signal));
2738
2739 registers_changed_ptid (ptid);
2740 set_executing (ptid, 1);
2741 set_running (ptid, 1);
2742 clear_inline_frame_state (ptid);
2743 return;
2744 }
2745 }
2746
2747 noprocess ();
2748 }
2749
2750 void
2751 target_pass_signals (int numsigs, unsigned char *pass_signals)
2752 {
2753 struct target_ops *t;
2754
2755 for (t = current_target.beneath; t != NULL; t = t->beneath)
2756 {
2757 if (t->to_pass_signals != NULL)
2758 {
2759 if (targetdebug)
2760 {
2761 int i;
2762
2763 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2764 numsigs);
2765
2766 for (i = 0; i < numsigs; i++)
2767 if (pass_signals[i])
2768 fprintf_unfiltered (gdb_stdlog, " %s",
2769 gdb_signal_to_name (i));
2770
2771 fprintf_unfiltered (gdb_stdlog, " })\n");
2772 }
2773
2774 (*t->to_pass_signals) (numsigs, pass_signals);
2775 return;
2776 }
2777 }
2778 }
2779
2780 void
2781 target_program_signals (int numsigs, unsigned char *program_signals)
2782 {
2783 struct target_ops *t;
2784
2785 for (t = current_target.beneath; t != NULL; t = t->beneath)
2786 {
2787 if (t->to_program_signals != NULL)
2788 {
2789 if (targetdebug)
2790 {
2791 int i;
2792
2793 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2794 numsigs);
2795
2796 for (i = 0; i < numsigs; i++)
2797 if (program_signals[i])
2798 fprintf_unfiltered (gdb_stdlog, " %s",
2799 gdb_signal_to_name (i));
2800
2801 fprintf_unfiltered (gdb_stdlog, " })\n");
2802 }
2803
2804 (*t->to_program_signals) (numsigs, program_signals);
2805 return;
2806 }
2807 }
2808 }
2809
2810 /* Look through the list of possible targets for a target that can
2811 follow forks. */
2812
2813 int
2814 target_follow_fork (int follow_child)
2815 {
2816 struct target_ops *t;
2817
2818 for (t = current_target.beneath; t != NULL; t = t->beneath)
2819 {
2820 if (t->to_follow_fork != NULL)
2821 {
2822 int retval = t->to_follow_fork (t, follow_child);
2823
2824 if (targetdebug)
2825 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2826 follow_child, retval);
2827 return retval;
2828 }
2829 }
2830
2831 /* Some target returned a fork event, but did not know how to follow it. */
2832 internal_error (__FILE__, __LINE__,
2833 _("could not find a target to follow fork"));
2834 }
2835
2836 void
2837 target_mourn_inferior (void)
2838 {
2839 struct target_ops *t;
2840
2841 for (t = current_target.beneath; t != NULL; t = t->beneath)
2842 {
2843 if (t->to_mourn_inferior != NULL)
2844 {
2845 t->to_mourn_inferior (t);
2846 if (targetdebug)
2847 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2848
2849 /* We no longer need to keep handles on any of the object files.
2850 Make sure to release them to avoid unnecessarily locking any
2851 of them while we're not actually debugging. */
2852 bfd_cache_close_all ();
2853
2854 return;
2855 }
2856 }
2857
2858 internal_error (__FILE__, __LINE__,
2859 _("could not find a target to follow mourn inferior"));
2860 }
2861
2862 /* Look for a target which can describe architectural features, starting
2863 from TARGET. If we find one, return its description. */
2864
2865 const struct target_desc *
2866 target_read_description (struct target_ops *target)
2867 {
2868 struct target_ops *t;
2869
2870 for (t = target; t != NULL; t = t->beneath)
2871 if (t->to_read_description != NULL)
2872 {
2873 const struct target_desc *tdesc;
2874
2875 tdesc = t->to_read_description (t);
2876 if (tdesc)
2877 return tdesc;
2878 }
2879
2880 return NULL;
2881 }
2882
2883 /* The default implementation of to_search_memory.
2884 This implements a basic search of memory, reading target memory and
2885 performing the search here (as opposed to performing the search in on the
2886 target side with, for example, gdbserver). */
2887
2888 int
2889 simple_search_memory (struct target_ops *ops,
2890 CORE_ADDR start_addr, ULONGEST search_space_len,
2891 const gdb_byte *pattern, ULONGEST pattern_len,
2892 CORE_ADDR *found_addrp)
2893 {
2894 /* NOTE: also defined in find.c testcase. */
2895 #define SEARCH_CHUNK_SIZE 16000
2896 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2897 /* Buffer to hold memory contents for searching. */
2898 gdb_byte *search_buf;
2899 unsigned search_buf_size;
2900 struct cleanup *old_cleanups;
2901
2902 search_buf_size = chunk_size + pattern_len - 1;
2903
2904 /* No point in trying to allocate a buffer larger than the search space. */
2905 if (search_space_len < search_buf_size)
2906 search_buf_size = search_space_len;
2907
2908 search_buf = malloc (search_buf_size);
2909 if (search_buf == NULL)
2910 error (_("Unable to allocate memory to perform the search."));
2911 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2912
2913 /* Prime the search buffer. */
2914
2915 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2916 search_buf, start_addr, search_buf_size) != search_buf_size)
2917 {
2918 warning (_("Unable to access %s bytes of target "
2919 "memory at %s, halting search."),
2920 pulongest (search_buf_size), hex_string (start_addr));
2921 do_cleanups (old_cleanups);
2922 return -1;
2923 }
2924
2925 /* Perform the search.
2926
2927 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2928 When we've scanned N bytes we copy the trailing bytes to the start and
2929 read in another N bytes. */
2930
2931 while (search_space_len >= pattern_len)
2932 {
2933 gdb_byte *found_ptr;
2934 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2935
2936 found_ptr = memmem (search_buf, nr_search_bytes,
2937 pattern, pattern_len);
2938
2939 if (found_ptr != NULL)
2940 {
2941 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2942
2943 *found_addrp = found_addr;
2944 do_cleanups (old_cleanups);
2945 return 1;
2946 }
2947
2948 /* Not found in this chunk, skip to next chunk. */
2949
2950 /* Don't let search_space_len wrap here, it's unsigned. */
2951 if (search_space_len >= chunk_size)
2952 search_space_len -= chunk_size;
2953 else
2954 search_space_len = 0;
2955
2956 if (search_space_len >= pattern_len)
2957 {
2958 unsigned keep_len = search_buf_size - chunk_size;
2959 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2960 int nr_to_read;
2961
2962 /* Copy the trailing part of the previous iteration to the front
2963 of the buffer for the next iteration. */
2964 gdb_assert (keep_len == pattern_len - 1);
2965 memcpy (search_buf, search_buf + chunk_size, keep_len);
2966
2967 nr_to_read = min (search_space_len - keep_len, chunk_size);
2968
2969 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2970 search_buf + keep_len, read_addr,
2971 nr_to_read) != nr_to_read)
2972 {
2973 warning (_("Unable to access %s bytes of target "
2974 "memory at %s, halting search."),
2975 plongest (nr_to_read),
2976 hex_string (read_addr));
2977 do_cleanups (old_cleanups);
2978 return -1;
2979 }
2980
2981 start_addr += chunk_size;
2982 }
2983 }
2984
2985 /* Not found. */
2986
2987 do_cleanups (old_cleanups);
2988 return 0;
2989 }
2990
2991 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2992 sequence of bytes in PATTERN with length PATTERN_LEN.
2993
2994 The result is 1 if found, 0 if not found, and -1 if there was an error
2995 requiring halting of the search (e.g. memory read error).
2996 If the pattern is found the address is recorded in FOUND_ADDRP. */
2997
2998 int
2999 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3000 const gdb_byte *pattern, ULONGEST pattern_len,
3001 CORE_ADDR *found_addrp)
3002 {
3003 struct target_ops *t;
3004 int found;
3005
3006 /* We don't use INHERIT to set current_target.to_search_memory,
3007 so we have to scan the target stack and handle targetdebug
3008 ourselves. */
3009
3010 if (targetdebug)
3011 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3012 hex_string (start_addr));
3013
3014 for (t = current_target.beneath; t != NULL; t = t->beneath)
3015 if (t->to_search_memory != NULL)
3016 break;
3017
3018 if (t != NULL)
3019 {
3020 found = t->to_search_memory (t, start_addr, search_space_len,
3021 pattern, pattern_len, found_addrp);
3022 }
3023 else
3024 {
3025 /* If a special version of to_search_memory isn't available, use the
3026 simple version. */
3027 found = simple_search_memory (current_target.beneath,
3028 start_addr, search_space_len,
3029 pattern, pattern_len, found_addrp);
3030 }
3031
3032 if (targetdebug)
3033 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3034
3035 return found;
3036 }
3037
3038 /* Look through the currently pushed targets. If none of them will
3039 be able to restart the currently running process, issue an error
3040 message. */
3041
3042 void
3043 target_require_runnable (void)
3044 {
3045 struct target_ops *t;
3046
3047 for (t = target_stack; t != NULL; t = t->beneath)
3048 {
3049 /* If this target knows how to create a new program, then
3050 assume we will still be able to after killing the current
3051 one. Either killing and mourning will not pop T, or else
3052 find_default_run_target will find it again. */
3053 if (t->to_create_inferior != NULL)
3054 return;
3055
3056 /* Do not worry about thread_stratum targets that can not
3057 create inferiors. Assume they will be pushed again if
3058 necessary, and continue to the process_stratum. */
3059 if (t->to_stratum == thread_stratum
3060 || t->to_stratum == arch_stratum)
3061 continue;
3062
3063 error (_("The \"%s\" target does not support \"run\". "
3064 "Try \"help target\" or \"continue\"."),
3065 t->to_shortname);
3066 }
3067
3068 /* This function is only called if the target is running. In that
3069 case there should have been a process_stratum target and it
3070 should either know how to create inferiors, or not... */
3071 internal_error (__FILE__, __LINE__, _("No targets found"));
3072 }
3073
3074 /* Look through the list of possible targets for a target that can
3075 execute a run or attach command without any other data. This is
3076 used to locate the default process stratum.
3077
3078 If DO_MESG is not NULL, the result is always valid (error() is
3079 called for errors); else, return NULL on error. */
3080
3081 static struct target_ops *
3082 find_default_run_target (char *do_mesg)
3083 {
3084 struct target_ops **t;
3085 struct target_ops *runable = NULL;
3086 int count;
3087
3088 count = 0;
3089
3090 for (t = target_structs; t < target_structs + target_struct_size;
3091 ++t)
3092 {
3093 if ((*t)->to_can_run && target_can_run (*t))
3094 {
3095 runable = *t;
3096 ++count;
3097 }
3098 }
3099
3100 if (count != 1)
3101 {
3102 if (do_mesg)
3103 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3104 else
3105 return NULL;
3106 }
3107
3108 return runable;
3109 }
3110
3111 void
3112 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3113 {
3114 struct target_ops *t;
3115
3116 t = find_default_run_target ("attach");
3117 (t->to_attach) (t, args, from_tty);
3118 return;
3119 }
3120
3121 void
3122 find_default_create_inferior (struct target_ops *ops,
3123 char *exec_file, char *allargs, char **env,
3124 int from_tty)
3125 {
3126 struct target_ops *t;
3127
3128 t = find_default_run_target ("run");
3129 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3130 return;
3131 }
3132
3133 static int
3134 find_default_can_async_p (void)
3135 {
3136 struct target_ops *t;
3137
3138 /* This may be called before the target is pushed on the stack;
3139 look for the default process stratum. If there's none, gdb isn't
3140 configured with a native debugger, and target remote isn't
3141 connected yet. */
3142 t = find_default_run_target (NULL);
3143 if (t && t->to_can_async_p)
3144 return (t->to_can_async_p) ();
3145 return 0;
3146 }
3147
3148 static int
3149 find_default_is_async_p (void)
3150 {
3151 struct target_ops *t;
3152
3153 /* This may be called before the target is pushed on the stack;
3154 look for the default process stratum. If there's none, gdb isn't
3155 configured with a native debugger, and target remote isn't
3156 connected yet. */
3157 t = find_default_run_target (NULL);
3158 if (t && t->to_is_async_p)
3159 return (t->to_is_async_p) ();
3160 return 0;
3161 }
3162
3163 static int
3164 find_default_supports_non_stop (void)
3165 {
3166 struct target_ops *t;
3167
3168 t = find_default_run_target (NULL);
3169 if (t && t->to_supports_non_stop)
3170 return (t->to_supports_non_stop) ();
3171 return 0;
3172 }
3173
3174 int
3175 target_supports_non_stop (void)
3176 {
3177 struct target_ops *t;
3178
3179 for (t = &current_target; t != NULL; t = t->beneath)
3180 if (t->to_supports_non_stop)
3181 return t->to_supports_non_stop ();
3182
3183 return 0;
3184 }
3185
3186 /* Implement the "info proc" command. */
3187
3188 int
3189 target_info_proc (char *args, enum info_proc_what what)
3190 {
3191 struct target_ops *t;
3192
3193 /* If we're already connected to something that can get us OS
3194 related data, use it. Otherwise, try using the native
3195 target. */
3196 if (current_target.to_stratum >= process_stratum)
3197 t = current_target.beneath;
3198 else
3199 t = find_default_run_target (NULL);
3200
3201 for (; t != NULL; t = t->beneath)
3202 {
3203 if (t->to_info_proc != NULL)
3204 {
3205 t->to_info_proc (t, args, what);
3206
3207 if (targetdebug)
3208 fprintf_unfiltered (gdb_stdlog,
3209 "target_info_proc (\"%s\", %d)\n", args, what);
3210
3211 return 1;
3212 }
3213 }
3214
3215 return 0;
3216 }
3217
3218 static int
3219 find_default_supports_disable_randomization (void)
3220 {
3221 struct target_ops *t;
3222
3223 t = find_default_run_target (NULL);
3224 if (t && t->to_supports_disable_randomization)
3225 return (t->to_supports_disable_randomization) ();
3226 return 0;
3227 }
3228
3229 int
3230 target_supports_disable_randomization (void)
3231 {
3232 struct target_ops *t;
3233
3234 for (t = &current_target; t != NULL; t = t->beneath)
3235 if (t->to_supports_disable_randomization)
3236 return t->to_supports_disable_randomization ();
3237
3238 return 0;
3239 }
3240
3241 char *
3242 target_get_osdata (const char *type)
3243 {
3244 struct target_ops *t;
3245
3246 /* If we're already connected to something that can get us OS
3247 related data, use it. Otherwise, try using the native
3248 target. */
3249 if (current_target.to_stratum >= process_stratum)
3250 t = current_target.beneath;
3251 else
3252 t = find_default_run_target ("get OS data");
3253
3254 if (!t)
3255 return NULL;
3256
3257 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3258 }
3259
3260 /* Determine the current address space of thread PTID. */
3261
3262 struct address_space *
3263 target_thread_address_space (ptid_t ptid)
3264 {
3265 struct address_space *aspace;
3266 struct inferior *inf;
3267 struct target_ops *t;
3268
3269 for (t = current_target.beneath; t != NULL; t = t->beneath)
3270 {
3271 if (t->to_thread_address_space != NULL)
3272 {
3273 aspace = t->to_thread_address_space (t, ptid);
3274 gdb_assert (aspace);
3275
3276 if (targetdebug)
3277 fprintf_unfiltered (gdb_stdlog,
3278 "target_thread_address_space (%s) = %d\n",
3279 target_pid_to_str (ptid),
3280 address_space_num (aspace));
3281 return aspace;
3282 }
3283 }
3284
3285 /* Fall-back to the "main" address space of the inferior. */
3286 inf = find_inferior_pid (ptid_get_pid (ptid));
3287
3288 if (inf == NULL || inf->aspace == NULL)
3289 internal_error (__FILE__, __LINE__,
3290 _("Can't determine the current "
3291 "address space of thread %s\n"),
3292 target_pid_to_str (ptid));
3293
3294 return inf->aspace;
3295 }
3296
3297
3298 /* Target file operations. */
3299
3300 static struct target_ops *
3301 default_fileio_target (void)
3302 {
3303 /* If we're already connected to something that can perform
3304 file I/O, use it. Otherwise, try using the native target. */
3305 if (current_target.to_stratum >= process_stratum)
3306 return current_target.beneath;
3307 else
3308 return find_default_run_target ("file I/O");
3309 }
3310
3311 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3312 target file descriptor, or -1 if an error occurs (and set
3313 *TARGET_ERRNO). */
3314 int
3315 target_fileio_open (const char *filename, int flags, int mode,
3316 int *target_errno)
3317 {
3318 struct target_ops *t;
3319
3320 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3321 {
3322 if (t->to_fileio_open != NULL)
3323 {
3324 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3325
3326 if (targetdebug)
3327 fprintf_unfiltered (gdb_stdlog,
3328 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3329 filename, flags, mode,
3330 fd, fd != -1 ? 0 : *target_errno);
3331 return fd;
3332 }
3333 }
3334
3335 *target_errno = FILEIO_ENOSYS;
3336 return -1;
3337 }
3338
3339 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3340 Return the number of bytes written, or -1 if an error occurs
3341 (and set *TARGET_ERRNO). */
3342 int
3343 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3344 ULONGEST offset, int *target_errno)
3345 {
3346 struct target_ops *t;
3347
3348 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3349 {
3350 if (t->to_fileio_pwrite != NULL)
3351 {
3352 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3353 target_errno);
3354
3355 if (targetdebug)
3356 fprintf_unfiltered (gdb_stdlog,
3357 "target_fileio_pwrite (%d,...,%d,%s) "
3358 "= %d (%d)\n",
3359 fd, len, pulongest (offset),
3360 ret, ret != -1 ? 0 : *target_errno);
3361 return ret;
3362 }
3363 }
3364
3365 *target_errno = FILEIO_ENOSYS;
3366 return -1;
3367 }
3368
3369 /* Read up to LEN bytes FD on the target into READ_BUF.
3370 Return the number of bytes read, or -1 if an error occurs
3371 (and set *TARGET_ERRNO). */
3372 int
3373 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3374 ULONGEST offset, int *target_errno)
3375 {
3376 struct target_ops *t;
3377
3378 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3379 {
3380 if (t->to_fileio_pread != NULL)
3381 {
3382 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3383 target_errno);
3384
3385 if (targetdebug)
3386 fprintf_unfiltered (gdb_stdlog,
3387 "target_fileio_pread (%d,...,%d,%s) "
3388 "= %d (%d)\n",
3389 fd, len, pulongest (offset),
3390 ret, ret != -1 ? 0 : *target_errno);
3391 return ret;
3392 }
3393 }
3394
3395 *target_errno = FILEIO_ENOSYS;
3396 return -1;
3397 }
3398
3399 /* Close FD on the target. Return 0, or -1 if an error occurs
3400 (and set *TARGET_ERRNO). */
3401 int
3402 target_fileio_close (int fd, int *target_errno)
3403 {
3404 struct target_ops *t;
3405
3406 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3407 {
3408 if (t->to_fileio_close != NULL)
3409 {
3410 int ret = t->to_fileio_close (fd, target_errno);
3411
3412 if (targetdebug)
3413 fprintf_unfiltered (gdb_stdlog,
3414 "target_fileio_close (%d) = %d (%d)\n",
3415 fd, ret, ret != -1 ? 0 : *target_errno);
3416 return ret;
3417 }
3418 }
3419
3420 *target_errno = FILEIO_ENOSYS;
3421 return -1;
3422 }
3423
3424 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3425 occurs (and set *TARGET_ERRNO). */
3426 int
3427 target_fileio_unlink (const char *filename, int *target_errno)
3428 {
3429 struct target_ops *t;
3430
3431 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3432 {
3433 if (t->to_fileio_unlink != NULL)
3434 {
3435 int ret = t->to_fileio_unlink (filename, target_errno);
3436
3437 if (targetdebug)
3438 fprintf_unfiltered (gdb_stdlog,
3439 "target_fileio_unlink (%s) = %d (%d)\n",
3440 filename, ret, ret != -1 ? 0 : *target_errno);
3441 return ret;
3442 }
3443 }
3444
3445 *target_errno = FILEIO_ENOSYS;
3446 return -1;
3447 }
3448
3449 /* Read value of symbolic link FILENAME on the target. Return a
3450 null-terminated string allocated via xmalloc, or NULL if an error
3451 occurs (and set *TARGET_ERRNO). */
3452 char *
3453 target_fileio_readlink (const char *filename, int *target_errno)
3454 {
3455 struct target_ops *t;
3456
3457 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3458 {
3459 if (t->to_fileio_readlink != NULL)
3460 {
3461 char *ret = t->to_fileio_readlink (filename, target_errno);
3462
3463 if (targetdebug)
3464 fprintf_unfiltered (gdb_stdlog,
3465 "target_fileio_readlink (%s) = %s (%d)\n",
3466 filename, ret? ret : "(nil)",
3467 ret? 0 : *target_errno);
3468 return ret;
3469 }
3470 }
3471
3472 *target_errno = FILEIO_ENOSYS;
3473 return NULL;
3474 }
3475
3476 static void
3477 target_fileio_close_cleanup (void *opaque)
3478 {
3479 int fd = *(int *) opaque;
3480 int target_errno;
3481
3482 target_fileio_close (fd, &target_errno);
3483 }
3484
3485 /* Read target file FILENAME. Store the result in *BUF_P and
3486 return the size of the transferred data. PADDING additional bytes are
3487 available in *BUF_P. This is a helper function for
3488 target_fileio_read_alloc; see the declaration of that function for more
3489 information. */
3490
3491 static LONGEST
3492 target_fileio_read_alloc_1 (const char *filename,
3493 gdb_byte **buf_p, int padding)
3494 {
3495 struct cleanup *close_cleanup;
3496 size_t buf_alloc, buf_pos;
3497 gdb_byte *buf;
3498 LONGEST n;
3499 int fd;
3500 int target_errno;
3501
3502 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3503 if (fd == -1)
3504 return -1;
3505
3506 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3507
3508 /* Start by reading up to 4K at a time. The target will throttle
3509 this number down if necessary. */
3510 buf_alloc = 4096;
3511 buf = xmalloc (buf_alloc);
3512 buf_pos = 0;
3513 while (1)
3514 {
3515 n = target_fileio_pread (fd, &buf[buf_pos],
3516 buf_alloc - buf_pos - padding, buf_pos,
3517 &target_errno);
3518 if (n < 0)
3519 {
3520 /* An error occurred. */
3521 do_cleanups (close_cleanup);
3522 xfree (buf);
3523 return -1;
3524 }
3525 else if (n == 0)
3526 {
3527 /* Read all there was. */
3528 do_cleanups (close_cleanup);
3529 if (buf_pos == 0)
3530 xfree (buf);
3531 else
3532 *buf_p = buf;
3533 return buf_pos;
3534 }
3535
3536 buf_pos += n;
3537
3538 /* If the buffer is filling up, expand it. */
3539 if (buf_alloc < buf_pos * 2)
3540 {
3541 buf_alloc *= 2;
3542 buf = xrealloc (buf, buf_alloc);
3543 }
3544
3545 QUIT;
3546 }
3547 }
3548
3549 /* Read target file FILENAME. Store the result in *BUF_P and return
3550 the size of the transferred data. See the declaration in "target.h"
3551 function for more information about the return value. */
3552
3553 LONGEST
3554 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3555 {
3556 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3557 }
3558
3559 /* Read target file FILENAME. The result is NUL-terminated and
3560 returned as a string, allocated using xmalloc. If an error occurs
3561 or the transfer is unsupported, NULL is returned. Empty objects
3562 are returned as allocated but empty strings. A warning is issued
3563 if the result contains any embedded NUL bytes. */
3564
3565 char *
3566 target_fileio_read_stralloc (const char *filename)
3567 {
3568 gdb_byte *buffer;
3569 char *bufstr;
3570 LONGEST i, transferred;
3571
3572 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3573 bufstr = (char *) buffer;
3574
3575 if (transferred < 0)
3576 return NULL;
3577
3578 if (transferred == 0)
3579 return xstrdup ("");
3580
3581 bufstr[transferred] = 0;
3582
3583 /* Check for embedded NUL bytes; but allow trailing NULs. */
3584 for (i = strlen (bufstr); i < transferred; i++)
3585 if (bufstr[i] != 0)
3586 {
3587 warning (_("target file %s "
3588 "contained unexpected null characters"),
3589 filename);
3590 break;
3591 }
3592
3593 return bufstr;
3594 }
3595
3596
3597 static int
3598 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3599 {
3600 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3601 }
3602
3603 static int
3604 default_watchpoint_addr_within_range (struct target_ops *target,
3605 CORE_ADDR addr,
3606 CORE_ADDR start, int length)
3607 {
3608 return addr >= start && addr < start + length;
3609 }
3610
3611 static struct gdbarch *
3612 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3613 {
3614 return target_gdbarch ();
3615 }
3616
3617 static int
3618 return_zero (void)
3619 {
3620 return 0;
3621 }
3622
3623 static int
3624 return_one (void)
3625 {
3626 return 1;
3627 }
3628
3629 static int
3630 return_minus_one (void)
3631 {
3632 return -1;
3633 }
3634
3635 /*
3636 * Find the next target down the stack from the specified target.
3637 */
3638
3639 struct target_ops *
3640 find_target_beneath (struct target_ops *t)
3641 {
3642 return t->beneath;
3643 }
3644
3645 \f
3646 /* The inferior process has died. Long live the inferior! */
3647
3648 void
3649 generic_mourn_inferior (void)
3650 {
3651 ptid_t ptid;
3652
3653 ptid = inferior_ptid;
3654 inferior_ptid = null_ptid;
3655
3656 /* Mark breakpoints uninserted in case something tries to delete a
3657 breakpoint while we delete the inferior's threads (which would
3658 fail, since the inferior is long gone). */
3659 mark_breakpoints_out ();
3660
3661 if (!ptid_equal (ptid, null_ptid))
3662 {
3663 int pid = ptid_get_pid (ptid);
3664 exit_inferior (pid);
3665 }
3666
3667 /* Note this wipes step-resume breakpoints, so needs to be done
3668 after exit_inferior, which ends up referencing the step-resume
3669 breakpoints through clear_thread_inferior_resources. */
3670 breakpoint_init_inferior (inf_exited);
3671
3672 registers_changed ();
3673
3674 reopen_exec_file ();
3675 reinit_frame_cache ();
3676
3677 if (deprecated_detach_hook)
3678 deprecated_detach_hook ();
3679 }
3680 \f
3681 /* Convert a normal process ID to a string. Returns the string in a
3682 static buffer. */
3683
3684 char *
3685 normal_pid_to_str (ptid_t ptid)
3686 {
3687 static char buf[32];
3688
3689 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3690 return buf;
3691 }
3692
3693 static char *
3694 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3695 {
3696 return normal_pid_to_str (ptid);
3697 }
3698
3699 /* Error-catcher for target_find_memory_regions. */
3700 static int
3701 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3702 {
3703 error (_("Command not implemented for this target."));
3704 return 0;
3705 }
3706
3707 /* Error-catcher for target_make_corefile_notes. */
3708 static char *
3709 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3710 {
3711 error (_("Command not implemented for this target."));
3712 return NULL;
3713 }
3714
3715 /* Error-catcher for target_get_bookmark. */
3716 static gdb_byte *
3717 dummy_get_bookmark (char *ignore1, int ignore2)
3718 {
3719 tcomplain ();
3720 return NULL;
3721 }
3722
3723 /* Error-catcher for target_goto_bookmark. */
3724 static void
3725 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3726 {
3727 tcomplain ();
3728 }
3729
3730 /* Set up the handful of non-empty slots needed by the dummy target
3731 vector. */
3732
3733 static void
3734 init_dummy_target (void)
3735 {
3736 dummy_target.to_shortname = "None";
3737 dummy_target.to_longname = "None";
3738 dummy_target.to_doc = "";
3739 dummy_target.to_attach = find_default_attach;
3740 dummy_target.to_detach =
3741 (void (*)(struct target_ops *, char *, int))target_ignore;
3742 dummy_target.to_create_inferior = find_default_create_inferior;
3743 dummy_target.to_can_async_p = find_default_can_async_p;
3744 dummy_target.to_is_async_p = find_default_is_async_p;
3745 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3746 dummy_target.to_supports_disable_randomization
3747 = find_default_supports_disable_randomization;
3748 dummy_target.to_pid_to_str = dummy_pid_to_str;
3749 dummy_target.to_stratum = dummy_stratum;
3750 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3751 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3752 dummy_target.to_get_bookmark = dummy_get_bookmark;
3753 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3754 dummy_target.to_xfer_partial = default_xfer_partial;
3755 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3756 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3757 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3758 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3759 dummy_target.to_has_execution
3760 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3761 dummy_target.to_stopped_by_watchpoint = return_zero;
3762 dummy_target.to_stopped_data_address =
3763 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3764 dummy_target.to_magic = OPS_MAGIC;
3765 }
3766 \f
3767 static void
3768 debug_to_open (char *args, int from_tty)
3769 {
3770 debug_target.to_open (args, from_tty);
3771
3772 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3773 }
3774
3775 void
3776 target_close (struct target_ops *targ)
3777 {
3778 gdb_assert (!target_is_pushed (targ));
3779
3780 if (targ->to_xclose != NULL)
3781 targ->to_xclose (targ);
3782 else if (targ->to_close != NULL)
3783 targ->to_close ();
3784
3785 if (targetdebug)
3786 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3787 }
3788
3789 void
3790 target_attach (char *args, int from_tty)
3791 {
3792 struct target_ops *t;
3793
3794 for (t = current_target.beneath; t != NULL; t = t->beneath)
3795 {
3796 if (t->to_attach != NULL)
3797 {
3798 t->to_attach (t, args, from_tty);
3799 if (targetdebug)
3800 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3801 args, from_tty);
3802 return;
3803 }
3804 }
3805
3806 internal_error (__FILE__, __LINE__,
3807 _("could not find a target to attach"));
3808 }
3809
3810 int
3811 target_thread_alive (ptid_t ptid)
3812 {
3813 struct target_ops *t;
3814
3815 for (t = current_target.beneath; t != NULL; t = t->beneath)
3816 {
3817 if (t->to_thread_alive != NULL)
3818 {
3819 int retval;
3820
3821 retval = t->to_thread_alive (t, ptid);
3822 if (targetdebug)
3823 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3824 PIDGET (ptid), retval);
3825
3826 return retval;
3827 }
3828 }
3829
3830 return 0;
3831 }
3832
3833 void
3834 target_find_new_threads (void)
3835 {
3836 struct target_ops *t;
3837
3838 for (t = current_target.beneath; t != NULL; t = t->beneath)
3839 {
3840 if (t->to_find_new_threads != NULL)
3841 {
3842 t->to_find_new_threads (t);
3843 if (targetdebug)
3844 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3845
3846 return;
3847 }
3848 }
3849 }
3850
3851 void
3852 target_stop (ptid_t ptid)
3853 {
3854 if (!may_stop)
3855 {
3856 warning (_("May not interrupt or stop the target, ignoring attempt"));
3857 return;
3858 }
3859
3860 (*current_target.to_stop) (ptid);
3861 }
3862
3863 static void
3864 debug_to_post_attach (int pid)
3865 {
3866 debug_target.to_post_attach (pid);
3867
3868 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3869 }
3870
3871 /* Concatenate ELEM to LIST, a comma separate list, and return the
3872 result. The LIST incoming argument is released. */
3873
3874 static char *
3875 str_comma_list_concat_elem (char *list, const char *elem)
3876 {
3877 if (list == NULL)
3878 return xstrdup (elem);
3879 else
3880 return reconcat (list, list, ", ", elem, (char *) NULL);
3881 }
3882
3883 /* Helper for target_options_to_string. If OPT is present in
3884 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3885 Returns the new resulting string. OPT is removed from
3886 TARGET_OPTIONS. */
3887
3888 static char *
3889 do_option (int *target_options, char *ret,
3890 int opt, char *opt_str)
3891 {
3892 if ((*target_options & opt) != 0)
3893 {
3894 ret = str_comma_list_concat_elem (ret, opt_str);
3895 *target_options &= ~opt;
3896 }
3897
3898 return ret;
3899 }
3900
3901 char *
3902 target_options_to_string (int target_options)
3903 {
3904 char *ret = NULL;
3905
3906 #define DO_TARG_OPTION(OPT) \
3907 ret = do_option (&target_options, ret, OPT, #OPT)
3908
3909 DO_TARG_OPTION (TARGET_WNOHANG);
3910
3911 if (target_options != 0)
3912 ret = str_comma_list_concat_elem (ret, "unknown???");
3913
3914 if (ret == NULL)
3915 ret = xstrdup ("");
3916 return ret;
3917 }
3918
3919 static void
3920 debug_print_register (const char * func,
3921 struct regcache *regcache, int regno)
3922 {
3923 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3924
3925 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3926 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3927 && gdbarch_register_name (gdbarch, regno) != NULL
3928 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3929 fprintf_unfiltered (gdb_stdlog, "(%s)",
3930 gdbarch_register_name (gdbarch, regno));
3931 else
3932 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3933 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3934 {
3935 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3936 int i, size = register_size (gdbarch, regno);
3937 gdb_byte buf[MAX_REGISTER_SIZE];
3938
3939 regcache_raw_collect (regcache, regno, buf);
3940 fprintf_unfiltered (gdb_stdlog, " = ");
3941 for (i = 0; i < size; i++)
3942 {
3943 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3944 }
3945 if (size <= sizeof (LONGEST))
3946 {
3947 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3948
3949 fprintf_unfiltered (gdb_stdlog, " %s %s",
3950 core_addr_to_string_nz (val), plongest (val));
3951 }
3952 }
3953 fprintf_unfiltered (gdb_stdlog, "\n");
3954 }
3955
3956 void
3957 target_fetch_registers (struct regcache *regcache, int regno)
3958 {
3959 struct target_ops *t;
3960
3961 for (t = current_target.beneath; t != NULL; t = t->beneath)
3962 {
3963 if (t->to_fetch_registers != NULL)
3964 {
3965 t->to_fetch_registers (t, regcache, regno);
3966 if (targetdebug)
3967 debug_print_register ("target_fetch_registers", regcache, regno);
3968 return;
3969 }
3970 }
3971 }
3972
3973 void
3974 target_store_registers (struct regcache *regcache, int regno)
3975 {
3976 struct target_ops *t;
3977
3978 if (!may_write_registers)
3979 error (_("Writing to registers is not allowed (regno %d)"), regno);
3980
3981 for (t = current_target.beneath; t != NULL; t = t->beneath)
3982 {
3983 if (t->to_store_registers != NULL)
3984 {
3985 t->to_store_registers (t, regcache, regno);
3986 if (targetdebug)
3987 {
3988 debug_print_register ("target_store_registers", regcache, regno);
3989 }
3990 return;
3991 }
3992 }
3993
3994 noprocess ();
3995 }
3996
3997 int
3998 target_core_of_thread (ptid_t ptid)
3999 {
4000 struct target_ops *t;
4001
4002 for (t = current_target.beneath; t != NULL; t = t->beneath)
4003 {
4004 if (t->to_core_of_thread != NULL)
4005 {
4006 int retval = t->to_core_of_thread (t, ptid);
4007
4008 if (targetdebug)
4009 fprintf_unfiltered (gdb_stdlog,
4010 "target_core_of_thread (%d) = %d\n",
4011 PIDGET (ptid), retval);
4012 return retval;
4013 }
4014 }
4015
4016 return -1;
4017 }
4018
4019 int
4020 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4021 {
4022 struct target_ops *t;
4023
4024 for (t = current_target.beneath; t != NULL; t = t->beneath)
4025 {
4026 if (t->to_verify_memory != NULL)
4027 {
4028 int retval = t->to_verify_memory (t, data, memaddr, size);
4029
4030 if (targetdebug)
4031 fprintf_unfiltered (gdb_stdlog,
4032 "target_verify_memory (%s, %s) = %d\n",
4033 paddress (target_gdbarch (), memaddr),
4034 pulongest (size),
4035 retval);
4036 return retval;
4037 }
4038 }
4039
4040 tcomplain ();
4041 }
4042
4043 /* The documentation for this function is in its prototype declaration in
4044 target.h. */
4045
4046 int
4047 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4048 {
4049 struct target_ops *t;
4050
4051 for (t = current_target.beneath; t != NULL; t = t->beneath)
4052 if (t->to_insert_mask_watchpoint != NULL)
4053 {
4054 int ret;
4055
4056 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4057
4058 if (targetdebug)
4059 fprintf_unfiltered (gdb_stdlog, "\
4060 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4061 core_addr_to_string (addr),
4062 core_addr_to_string (mask), rw, ret);
4063
4064 return ret;
4065 }
4066
4067 return 1;
4068 }
4069
4070 /* The documentation for this function is in its prototype declaration in
4071 target.h. */
4072
4073 int
4074 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4075 {
4076 struct target_ops *t;
4077
4078 for (t = current_target.beneath; t != NULL; t = t->beneath)
4079 if (t->to_remove_mask_watchpoint != NULL)
4080 {
4081 int ret;
4082
4083 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4084
4085 if (targetdebug)
4086 fprintf_unfiltered (gdb_stdlog, "\
4087 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4088 core_addr_to_string (addr),
4089 core_addr_to_string (mask), rw, ret);
4090
4091 return ret;
4092 }
4093
4094 return 1;
4095 }
4096
4097 /* The documentation for this function is in its prototype declaration
4098 in target.h. */
4099
4100 int
4101 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4102 {
4103 struct target_ops *t;
4104
4105 for (t = current_target.beneath; t != NULL; t = t->beneath)
4106 if (t->to_masked_watch_num_registers != NULL)
4107 return t->to_masked_watch_num_registers (t, addr, mask);
4108
4109 return -1;
4110 }
4111
4112 /* The documentation for this function is in its prototype declaration
4113 in target.h. */
4114
4115 int
4116 target_ranged_break_num_registers (void)
4117 {
4118 struct target_ops *t;
4119
4120 for (t = current_target.beneath; t != NULL; t = t->beneath)
4121 if (t->to_ranged_break_num_registers != NULL)
4122 return t->to_ranged_break_num_registers (t);
4123
4124 return -1;
4125 }
4126
4127 /* See target.h. */
4128
4129 int
4130 target_supports_btrace (void)
4131 {
4132 struct target_ops *t;
4133
4134 for (t = current_target.beneath; t != NULL; t = t->beneath)
4135 if (t->to_supports_btrace != NULL)
4136 return t->to_supports_btrace ();
4137
4138 return 0;
4139 }
4140
4141 /* See target.h. */
4142
4143 struct btrace_target_info *
4144 target_enable_btrace (ptid_t ptid)
4145 {
4146 struct target_ops *t;
4147
4148 for (t = current_target.beneath; t != NULL; t = t->beneath)
4149 if (t->to_enable_btrace != NULL)
4150 return t->to_enable_btrace (ptid);
4151
4152 tcomplain ();
4153 return NULL;
4154 }
4155
4156 /* See target.h. */
4157
4158 void
4159 target_disable_btrace (struct btrace_target_info *btinfo)
4160 {
4161 struct target_ops *t;
4162
4163 for (t = current_target.beneath; t != NULL; t = t->beneath)
4164 if (t->to_disable_btrace != NULL)
4165 return t->to_disable_btrace (btinfo);
4166
4167 tcomplain ();
4168 }
4169
4170 /* See target.h. */
4171
4172 void
4173 target_teardown_btrace (struct btrace_target_info *btinfo)
4174 {
4175 struct target_ops *t;
4176
4177 for (t = current_target.beneath; t != NULL; t = t->beneath)
4178 if (t->to_teardown_btrace != NULL)
4179 return t->to_teardown_btrace (btinfo);
4180
4181 tcomplain ();
4182 }
4183
4184 /* See target.h. */
4185
4186 VEC (btrace_block_s) *
4187 target_read_btrace (struct btrace_target_info *btinfo,
4188 enum btrace_read_type type)
4189 {
4190 struct target_ops *t;
4191
4192 for (t = current_target.beneath; t != NULL; t = t->beneath)
4193 if (t->to_read_btrace != NULL)
4194 return t->to_read_btrace (btinfo, type);
4195
4196 tcomplain ();
4197 return NULL;
4198 }
4199
4200 /* See target.h. */
4201
4202 void
4203 target_stop_recording (void)
4204 {
4205 struct target_ops *t;
4206
4207 for (t = current_target.beneath; t != NULL; t = t->beneath)
4208 if (t->to_stop_recording != NULL)
4209 {
4210 t->to_stop_recording ();
4211 return;
4212 }
4213
4214 /* This is optional. */
4215 }
4216
4217 /* See target.h. */
4218
4219 void
4220 target_info_record (void)
4221 {
4222 struct target_ops *t;
4223
4224 for (t = current_target.beneath; t != NULL; t = t->beneath)
4225 if (t->to_info_record != NULL)
4226 {
4227 t->to_info_record ();
4228 return;
4229 }
4230
4231 tcomplain ();
4232 }
4233
4234 /* See target.h. */
4235
4236 void
4237 target_save_record (const char *filename)
4238 {
4239 struct target_ops *t;
4240
4241 for (t = current_target.beneath; t != NULL; t = t->beneath)
4242 if (t->to_save_record != NULL)
4243 {
4244 t->to_save_record (filename);
4245 return;
4246 }
4247
4248 tcomplain ();
4249 }
4250
4251 /* See target.h. */
4252
4253 int
4254 target_supports_delete_record (void)
4255 {
4256 struct target_ops *t;
4257
4258 for (t = current_target.beneath; t != NULL; t = t->beneath)
4259 if (t->to_delete_record != NULL)
4260 return 1;
4261
4262 return 0;
4263 }
4264
4265 /* See target.h. */
4266
4267 void
4268 target_delete_record (void)
4269 {
4270 struct target_ops *t;
4271
4272 for (t = current_target.beneath; t != NULL; t = t->beneath)
4273 if (t->to_delete_record != NULL)
4274 {
4275 t->to_delete_record ();
4276 return;
4277 }
4278
4279 tcomplain ();
4280 }
4281
4282 /* See target.h. */
4283
4284 int
4285 target_record_is_replaying (void)
4286 {
4287 struct target_ops *t;
4288
4289 for (t = current_target.beneath; t != NULL; t = t->beneath)
4290 if (t->to_record_is_replaying != NULL)
4291 return t->to_record_is_replaying ();
4292
4293 return 0;
4294 }
4295
4296 /* See target.h. */
4297
4298 void
4299 target_goto_record_begin (void)
4300 {
4301 struct target_ops *t;
4302
4303 for (t = current_target.beneath; t != NULL; t = t->beneath)
4304 if (t->to_goto_record_begin != NULL)
4305 {
4306 t->to_goto_record_begin ();
4307 return;
4308 }
4309
4310 tcomplain ();
4311 }
4312
4313 /* See target.h. */
4314
4315 void
4316 target_goto_record_end (void)
4317 {
4318 struct target_ops *t;
4319
4320 for (t = current_target.beneath; t != NULL; t = t->beneath)
4321 if (t->to_goto_record_end != NULL)
4322 {
4323 t->to_goto_record_end ();
4324 return;
4325 }
4326
4327 tcomplain ();
4328 }
4329
4330 /* See target.h. */
4331
4332 void
4333 target_goto_record (ULONGEST insn)
4334 {
4335 struct target_ops *t;
4336
4337 for (t = current_target.beneath; t != NULL; t = t->beneath)
4338 if (t->to_goto_record != NULL)
4339 {
4340 t->to_goto_record (insn);
4341 return;
4342 }
4343
4344 tcomplain ();
4345 }
4346
4347 /* See target.h. */
4348
4349 void
4350 target_insn_history (int size, int flags)
4351 {
4352 struct target_ops *t;
4353
4354 for (t = current_target.beneath; t != NULL; t = t->beneath)
4355 if (t->to_insn_history != NULL)
4356 {
4357 t->to_insn_history (size, flags);
4358 return;
4359 }
4360
4361 tcomplain ();
4362 }
4363
4364 /* See target.h. */
4365
4366 void
4367 target_insn_history_from (ULONGEST from, int size, int flags)
4368 {
4369 struct target_ops *t;
4370
4371 for (t = current_target.beneath; t != NULL; t = t->beneath)
4372 if (t->to_insn_history_from != NULL)
4373 {
4374 t->to_insn_history_from (from, size, flags);
4375 return;
4376 }
4377
4378 tcomplain ();
4379 }
4380
4381 /* See target.h. */
4382
4383 void
4384 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4385 {
4386 struct target_ops *t;
4387
4388 for (t = current_target.beneath; t != NULL; t = t->beneath)
4389 if (t->to_insn_history_range != NULL)
4390 {
4391 t->to_insn_history_range (begin, end, flags);
4392 return;
4393 }
4394
4395 tcomplain ();
4396 }
4397
4398 /* See target.h. */
4399
4400 void
4401 target_call_history (int size, int flags)
4402 {
4403 struct target_ops *t;
4404
4405 for (t = current_target.beneath; t != NULL; t = t->beneath)
4406 if (t->to_call_history != NULL)
4407 {
4408 t->to_call_history (size, flags);
4409 return;
4410 }
4411
4412 tcomplain ();
4413 }
4414
4415 /* See target.h. */
4416
4417 void
4418 target_call_history_from (ULONGEST begin, int size, int flags)
4419 {
4420 struct target_ops *t;
4421
4422 for (t = current_target.beneath; t != NULL; t = t->beneath)
4423 if (t->to_call_history_from != NULL)
4424 {
4425 t->to_call_history_from (begin, size, flags);
4426 return;
4427 }
4428
4429 tcomplain ();
4430 }
4431
4432 /* See target.h. */
4433
4434 void
4435 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4436 {
4437 struct target_ops *t;
4438
4439 for (t = current_target.beneath; t != NULL; t = t->beneath)
4440 if (t->to_call_history_range != NULL)
4441 {
4442 t->to_call_history_range (begin, end, flags);
4443 return;
4444 }
4445
4446 tcomplain ();
4447 }
4448
4449 static void
4450 debug_to_prepare_to_store (struct regcache *regcache)
4451 {
4452 debug_target.to_prepare_to_store (regcache);
4453
4454 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4455 }
4456
4457 static int
4458 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4459 int write, struct mem_attrib *attrib,
4460 struct target_ops *target)
4461 {
4462 int retval;
4463
4464 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4465 attrib, target);
4466
4467 fprintf_unfiltered (gdb_stdlog,
4468 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4469 paddress (target_gdbarch (), memaddr), len,
4470 write ? "write" : "read", retval);
4471
4472 if (retval > 0)
4473 {
4474 int i;
4475
4476 fputs_unfiltered (", bytes =", gdb_stdlog);
4477 for (i = 0; i < retval; i++)
4478 {
4479 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4480 {
4481 if (targetdebug < 2 && i > 0)
4482 {
4483 fprintf_unfiltered (gdb_stdlog, " ...");
4484 break;
4485 }
4486 fprintf_unfiltered (gdb_stdlog, "\n");
4487 }
4488
4489 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4490 }
4491 }
4492
4493 fputc_unfiltered ('\n', gdb_stdlog);
4494
4495 return retval;
4496 }
4497
4498 static void
4499 debug_to_files_info (struct target_ops *target)
4500 {
4501 debug_target.to_files_info (target);
4502
4503 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4504 }
4505
4506 static int
4507 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4508 struct bp_target_info *bp_tgt)
4509 {
4510 int retval;
4511
4512 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4513
4514 fprintf_unfiltered (gdb_stdlog,
4515 "target_insert_breakpoint (%s, xxx) = %ld\n",
4516 core_addr_to_string (bp_tgt->placed_address),
4517 (unsigned long) retval);
4518 return retval;
4519 }
4520
4521 static int
4522 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4523 struct bp_target_info *bp_tgt)
4524 {
4525 int retval;
4526
4527 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4528
4529 fprintf_unfiltered (gdb_stdlog,
4530 "target_remove_breakpoint (%s, xxx) = %ld\n",
4531 core_addr_to_string (bp_tgt->placed_address),
4532 (unsigned long) retval);
4533 return retval;
4534 }
4535
4536 static int
4537 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4538 {
4539 int retval;
4540
4541 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4542
4543 fprintf_unfiltered (gdb_stdlog,
4544 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4545 (unsigned long) type,
4546 (unsigned long) cnt,
4547 (unsigned long) from_tty,
4548 (unsigned long) retval);
4549 return retval;
4550 }
4551
4552 static int
4553 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4554 {
4555 CORE_ADDR retval;
4556
4557 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4558
4559 fprintf_unfiltered (gdb_stdlog,
4560 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4561 core_addr_to_string (addr), (unsigned long) len,
4562 core_addr_to_string (retval));
4563 return retval;
4564 }
4565
4566 static int
4567 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4568 struct expression *cond)
4569 {
4570 int retval;
4571
4572 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4573 rw, cond);
4574
4575 fprintf_unfiltered (gdb_stdlog,
4576 "target_can_accel_watchpoint_condition "
4577 "(%s, %d, %d, %s) = %ld\n",
4578 core_addr_to_string (addr), len, rw,
4579 host_address_to_string (cond), (unsigned long) retval);
4580 return retval;
4581 }
4582
4583 static int
4584 debug_to_stopped_by_watchpoint (void)
4585 {
4586 int retval;
4587
4588 retval = debug_target.to_stopped_by_watchpoint ();
4589
4590 fprintf_unfiltered (gdb_stdlog,
4591 "target_stopped_by_watchpoint () = %ld\n",
4592 (unsigned long) retval);
4593 return retval;
4594 }
4595
4596 static int
4597 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4598 {
4599 int retval;
4600
4601 retval = debug_target.to_stopped_data_address (target, addr);
4602
4603 fprintf_unfiltered (gdb_stdlog,
4604 "target_stopped_data_address ([%s]) = %ld\n",
4605 core_addr_to_string (*addr),
4606 (unsigned long)retval);
4607 return retval;
4608 }
4609
4610 static int
4611 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4612 CORE_ADDR addr,
4613 CORE_ADDR start, int length)
4614 {
4615 int retval;
4616
4617 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4618 start, length);
4619
4620 fprintf_filtered (gdb_stdlog,
4621 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4622 core_addr_to_string (addr), core_addr_to_string (start),
4623 length, retval);
4624 return retval;
4625 }
4626
4627 static int
4628 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4629 struct bp_target_info *bp_tgt)
4630 {
4631 int retval;
4632
4633 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4634
4635 fprintf_unfiltered (gdb_stdlog,
4636 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4637 core_addr_to_string (bp_tgt->placed_address),
4638 (unsigned long) retval);
4639 return retval;
4640 }
4641
4642 static int
4643 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4644 struct bp_target_info *bp_tgt)
4645 {
4646 int retval;
4647
4648 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4649
4650 fprintf_unfiltered (gdb_stdlog,
4651 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4652 core_addr_to_string (bp_tgt->placed_address),
4653 (unsigned long) retval);
4654 return retval;
4655 }
4656
4657 static int
4658 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4659 struct expression *cond)
4660 {
4661 int retval;
4662
4663 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4664
4665 fprintf_unfiltered (gdb_stdlog,
4666 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4667 core_addr_to_string (addr), len, type,
4668 host_address_to_string (cond), (unsigned long) retval);
4669 return retval;
4670 }
4671
4672 static int
4673 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4674 struct expression *cond)
4675 {
4676 int retval;
4677
4678 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4679
4680 fprintf_unfiltered (gdb_stdlog,
4681 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4682 core_addr_to_string (addr), len, type,
4683 host_address_to_string (cond), (unsigned long) retval);
4684 return retval;
4685 }
4686
4687 static void
4688 debug_to_terminal_init (void)
4689 {
4690 debug_target.to_terminal_init ();
4691
4692 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4693 }
4694
4695 static void
4696 debug_to_terminal_inferior (void)
4697 {
4698 debug_target.to_terminal_inferior ();
4699
4700 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4701 }
4702
4703 static void
4704 debug_to_terminal_ours_for_output (void)
4705 {
4706 debug_target.to_terminal_ours_for_output ();
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4709 }
4710
4711 static void
4712 debug_to_terminal_ours (void)
4713 {
4714 debug_target.to_terminal_ours ();
4715
4716 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4717 }
4718
4719 static void
4720 debug_to_terminal_save_ours (void)
4721 {
4722 debug_target.to_terminal_save_ours ();
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4725 }
4726
4727 static void
4728 debug_to_terminal_info (const char *arg, int from_tty)
4729 {
4730 debug_target.to_terminal_info (arg, from_tty);
4731
4732 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4733 from_tty);
4734 }
4735
4736 static void
4737 debug_to_load (char *args, int from_tty)
4738 {
4739 debug_target.to_load (args, from_tty);
4740
4741 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4742 }
4743
4744 static void
4745 debug_to_post_startup_inferior (ptid_t ptid)
4746 {
4747 debug_target.to_post_startup_inferior (ptid);
4748
4749 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4750 PIDGET (ptid));
4751 }
4752
4753 static int
4754 debug_to_insert_fork_catchpoint (int pid)
4755 {
4756 int retval;
4757
4758 retval = debug_target.to_insert_fork_catchpoint (pid);
4759
4760 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4761 pid, retval);
4762
4763 return retval;
4764 }
4765
4766 static int
4767 debug_to_remove_fork_catchpoint (int pid)
4768 {
4769 int retval;
4770
4771 retval = debug_target.to_remove_fork_catchpoint (pid);
4772
4773 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4774 pid, retval);
4775
4776 return retval;
4777 }
4778
4779 static int
4780 debug_to_insert_vfork_catchpoint (int pid)
4781 {
4782 int retval;
4783
4784 retval = debug_target.to_insert_vfork_catchpoint (pid);
4785
4786 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4787 pid, retval);
4788
4789 return retval;
4790 }
4791
4792 static int
4793 debug_to_remove_vfork_catchpoint (int pid)
4794 {
4795 int retval;
4796
4797 retval = debug_target.to_remove_vfork_catchpoint (pid);
4798
4799 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4800 pid, retval);
4801
4802 return retval;
4803 }
4804
4805 static int
4806 debug_to_insert_exec_catchpoint (int pid)
4807 {
4808 int retval;
4809
4810 retval = debug_target.to_insert_exec_catchpoint (pid);
4811
4812 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4813 pid, retval);
4814
4815 return retval;
4816 }
4817
4818 static int
4819 debug_to_remove_exec_catchpoint (int pid)
4820 {
4821 int retval;
4822
4823 retval = debug_target.to_remove_exec_catchpoint (pid);
4824
4825 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4826 pid, retval);
4827
4828 return retval;
4829 }
4830
4831 static int
4832 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4833 {
4834 int has_exited;
4835
4836 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4837
4838 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4839 pid, wait_status, *exit_status, has_exited);
4840
4841 return has_exited;
4842 }
4843
4844 static int
4845 debug_to_can_run (void)
4846 {
4847 int retval;
4848
4849 retval = debug_target.to_can_run ();
4850
4851 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4852
4853 return retval;
4854 }
4855
4856 static struct gdbarch *
4857 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4858 {
4859 struct gdbarch *retval;
4860
4861 retval = debug_target.to_thread_architecture (ops, ptid);
4862
4863 fprintf_unfiltered (gdb_stdlog,
4864 "target_thread_architecture (%s) = %s [%s]\n",
4865 target_pid_to_str (ptid),
4866 host_address_to_string (retval),
4867 gdbarch_bfd_arch_info (retval)->printable_name);
4868 return retval;
4869 }
4870
4871 static void
4872 debug_to_stop (ptid_t ptid)
4873 {
4874 debug_target.to_stop (ptid);
4875
4876 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4877 target_pid_to_str (ptid));
4878 }
4879
4880 static void
4881 debug_to_rcmd (char *command,
4882 struct ui_file *outbuf)
4883 {
4884 debug_target.to_rcmd (command, outbuf);
4885 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4886 }
4887
4888 static char *
4889 debug_to_pid_to_exec_file (int pid)
4890 {
4891 char *exec_file;
4892
4893 exec_file = debug_target.to_pid_to_exec_file (pid);
4894
4895 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4896 pid, exec_file);
4897
4898 return exec_file;
4899 }
4900
4901 static void
4902 setup_target_debug (void)
4903 {
4904 memcpy (&debug_target, &current_target, sizeof debug_target);
4905
4906 current_target.to_open = debug_to_open;
4907 current_target.to_post_attach = debug_to_post_attach;
4908 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4909 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4910 current_target.to_files_info = debug_to_files_info;
4911 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4912 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4913 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4914 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4915 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4916 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4917 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4918 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4919 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4920 current_target.to_watchpoint_addr_within_range
4921 = debug_to_watchpoint_addr_within_range;
4922 current_target.to_region_ok_for_hw_watchpoint
4923 = debug_to_region_ok_for_hw_watchpoint;
4924 current_target.to_can_accel_watchpoint_condition
4925 = debug_to_can_accel_watchpoint_condition;
4926 current_target.to_terminal_init = debug_to_terminal_init;
4927 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4928 current_target.to_terminal_ours_for_output
4929 = debug_to_terminal_ours_for_output;
4930 current_target.to_terminal_ours = debug_to_terminal_ours;
4931 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4932 current_target.to_terminal_info = debug_to_terminal_info;
4933 current_target.to_load = debug_to_load;
4934 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4935 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4936 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4937 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4938 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4939 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4940 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4941 current_target.to_has_exited = debug_to_has_exited;
4942 current_target.to_can_run = debug_to_can_run;
4943 current_target.to_stop = debug_to_stop;
4944 current_target.to_rcmd = debug_to_rcmd;
4945 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4946 current_target.to_thread_architecture = debug_to_thread_architecture;
4947 }
4948 \f
4949
4950 static char targ_desc[] =
4951 "Names of targets and files being debugged.\nShows the entire \
4952 stack of targets currently in use (including the exec-file,\n\
4953 core-file, and process, if any), as well as the symbol file name.";
4954
4955 static void
4956 do_monitor_command (char *cmd,
4957 int from_tty)
4958 {
4959 if ((current_target.to_rcmd
4960 == (void (*) (char *, struct ui_file *)) tcomplain)
4961 || (current_target.to_rcmd == debug_to_rcmd
4962 && (debug_target.to_rcmd
4963 == (void (*) (char *, struct ui_file *)) tcomplain)))
4964 error (_("\"monitor\" command not supported by this target."));
4965 target_rcmd (cmd, gdb_stdtarg);
4966 }
4967
4968 /* Print the name of each layers of our target stack. */
4969
4970 static void
4971 maintenance_print_target_stack (char *cmd, int from_tty)
4972 {
4973 struct target_ops *t;
4974
4975 printf_filtered (_("The current target stack is:\n"));
4976
4977 for (t = target_stack; t != NULL; t = t->beneath)
4978 {
4979 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4980 }
4981 }
4982
4983 /* Controls if async mode is permitted. */
4984 int target_async_permitted = 0;
4985
4986 /* The set command writes to this variable. If the inferior is
4987 executing, target_async_permitted is *not* updated. */
4988 static int target_async_permitted_1 = 0;
4989
4990 static void
4991 set_target_async_command (char *args, int from_tty,
4992 struct cmd_list_element *c)
4993 {
4994 if (have_live_inferiors ())
4995 {
4996 target_async_permitted_1 = target_async_permitted;
4997 error (_("Cannot change this setting while the inferior is running."));
4998 }
4999
5000 target_async_permitted = target_async_permitted_1;
5001 }
5002
5003 static void
5004 show_target_async_command (struct ui_file *file, int from_tty,
5005 struct cmd_list_element *c,
5006 const char *value)
5007 {
5008 fprintf_filtered (file,
5009 _("Controlling the inferior in "
5010 "asynchronous mode is %s.\n"), value);
5011 }
5012
5013 /* Temporary copies of permission settings. */
5014
5015 static int may_write_registers_1 = 1;
5016 static int may_write_memory_1 = 1;
5017 static int may_insert_breakpoints_1 = 1;
5018 static int may_insert_tracepoints_1 = 1;
5019 static int may_insert_fast_tracepoints_1 = 1;
5020 static int may_stop_1 = 1;
5021
5022 /* Make the user-set values match the real values again. */
5023
5024 void
5025 update_target_permissions (void)
5026 {
5027 may_write_registers_1 = may_write_registers;
5028 may_write_memory_1 = may_write_memory;
5029 may_insert_breakpoints_1 = may_insert_breakpoints;
5030 may_insert_tracepoints_1 = may_insert_tracepoints;
5031 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5032 may_stop_1 = may_stop;
5033 }
5034
5035 /* The one function handles (most of) the permission flags in the same
5036 way. */
5037
5038 static void
5039 set_target_permissions (char *args, int from_tty,
5040 struct cmd_list_element *c)
5041 {
5042 if (target_has_execution)
5043 {
5044 update_target_permissions ();
5045 error (_("Cannot change this setting while the inferior is running."));
5046 }
5047
5048 /* Make the real values match the user-changed values. */
5049 may_write_registers = may_write_registers_1;
5050 may_insert_breakpoints = may_insert_breakpoints_1;
5051 may_insert_tracepoints = may_insert_tracepoints_1;
5052 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5053 may_stop = may_stop_1;
5054 update_observer_mode ();
5055 }
5056
5057 /* Set memory write permission independently of observer mode. */
5058
5059 static void
5060 set_write_memory_permission (char *args, int from_tty,
5061 struct cmd_list_element *c)
5062 {
5063 /* Make the real values match the user-changed values. */
5064 may_write_memory = may_write_memory_1;
5065 update_observer_mode ();
5066 }
5067
5068
5069 void
5070 initialize_targets (void)
5071 {
5072 init_dummy_target ();
5073 push_target (&dummy_target);
5074
5075 add_info ("target", target_info, targ_desc);
5076 add_info ("files", target_info, targ_desc);
5077
5078 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5079 Set target debugging."), _("\
5080 Show target debugging."), _("\
5081 When non-zero, target debugging is enabled. Higher numbers are more\n\
5082 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5083 command."),
5084 NULL,
5085 show_targetdebug,
5086 &setdebuglist, &showdebuglist);
5087
5088 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5089 &trust_readonly, _("\
5090 Set mode for reading from readonly sections."), _("\
5091 Show mode for reading from readonly sections."), _("\
5092 When this mode is on, memory reads from readonly sections (such as .text)\n\
5093 will be read from the object file instead of from the target. This will\n\
5094 result in significant performance improvement for remote targets."),
5095 NULL,
5096 show_trust_readonly,
5097 &setlist, &showlist);
5098
5099 add_com ("monitor", class_obscure, do_monitor_command,
5100 _("Send a command to the remote monitor (remote targets only)."));
5101
5102 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5103 _("Print the name of each layer of the internal target stack."),
5104 &maintenanceprintlist);
5105
5106 add_setshow_boolean_cmd ("target-async", no_class,
5107 &target_async_permitted_1, _("\
5108 Set whether gdb controls the inferior in asynchronous mode."), _("\
5109 Show whether gdb controls the inferior in asynchronous mode."), _("\
5110 Tells gdb whether to control the inferior in asynchronous mode."),
5111 set_target_async_command,
5112 show_target_async_command,
5113 &setlist,
5114 &showlist);
5115
5116 add_setshow_boolean_cmd ("stack-cache", class_support,
5117 &stack_cache_enabled_p_1, _("\
5118 Set cache use for stack access."), _("\
5119 Show cache use for stack access."), _("\
5120 When on, use the data cache for all stack access, regardless of any\n\
5121 configured memory regions. This improves remote performance significantly.\n\
5122 By default, caching for stack access is on."),
5123 set_stack_cache_enabled_p,
5124 show_stack_cache_enabled_p,
5125 &setlist, &showlist);
5126
5127 add_setshow_boolean_cmd ("may-write-registers", class_support,
5128 &may_write_registers_1, _("\
5129 Set permission to write into registers."), _("\
5130 Show permission to write into registers."), _("\
5131 When this permission is on, GDB may write into the target's registers.\n\
5132 Otherwise, any sort of write attempt will result in an error."),
5133 set_target_permissions, NULL,
5134 &setlist, &showlist);
5135
5136 add_setshow_boolean_cmd ("may-write-memory", class_support,
5137 &may_write_memory_1, _("\
5138 Set permission to write into target memory."), _("\
5139 Show permission to write into target memory."), _("\
5140 When this permission is on, GDB may write into the target's memory.\n\
5141 Otherwise, any sort of write attempt will result in an error."),
5142 set_write_memory_permission, NULL,
5143 &setlist, &showlist);
5144
5145 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5146 &may_insert_breakpoints_1, _("\
5147 Set permission to insert breakpoints in the target."), _("\
5148 Show permission to insert breakpoints in the target."), _("\
5149 When this permission is on, GDB may insert breakpoints in the program.\n\
5150 Otherwise, any sort of insertion attempt will result in an error."),
5151 set_target_permissions, NULL,
5152 &setlist, &showlist);
5153
5154 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5155 &may_insert_tracepoints_1, _("\
5156 Set permission to insert tracepoints in the target."), _("\
5157 Show permission to insert tracepoints in the target."), _("\
5158 When this permission is on, GDB may insert tracepoints in the program.\n\
5159 Otherwise, any sort of insertion attempt will result in an error."),
5160 set_target_permissions, NULL,
5161 &setlist, &showlist);
5162
5163 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5164 &may_insert_fast_tracepoints_1, _("\
5165 Set permission to insert fast tracepoints in the target."), _("\
5166 Show permission to insert fast tracepoints in the target."), _("\
5167 When this permission is on, GDB may insert fast tracepoints.\n\
5168 Otherwise, any sort of insertion attempt will result in an error."),
5169 set_target_permissions, NULL,
5170 &setlist, &showlist);
5171
5172 add_setshow_boolean_cmd ("may-interrupt", class_support,
5173 &may_stop_1, _("\
5174 Set permission to interrupt or signal the target."), _("\
5175 Show permission to interrupt or signal the target."), _("\
5176 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5177 Otherwise, any attempt to interrupt or stop will be ignored."),
5178 set_target_permissions, NULL,
5179 &setlist, &showlist);
5180
5181
5182 target_dcache = dcache_init ();
5183 }
This page took 0.163749 seconds and 4 git commands to generate.