Add new target_read_raw_memory function, and consolidate comments.
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 void target_ignore (void);
68
69 static void target_command (char *, int);
70
71 static struct target_ops *find_default_run_target (char *);
72
73 static LONGEST default_xfer_partial (struct target_ops *ops,
74 enum target_object object,
75 const char *annex, gdb_byte *readbuf,
76 const gdb_byte *writebuf,
77 ULONGEST offset, LONGEST len);
78
79 static LONGEST current_xfer_partial (struct target_ops *ops,
80 enum target_object object,
81 const char *annex, gdb_byte *readbuf,
82 const gdb_byte *writebuf,
83 ULONGEST offset, LONGEST len);
84
85 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
86 ptid_t ptid);
87
88 static void init_dummy_target (void);
89
90 static struct target_ops debug_target;
91
92 static void debug_to_open (char *, int);
93
94 static void debug_to_prepare_to_store (struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (int, int, int);
105
106 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
113 struct expression *);
114
115 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
116 struct expression *);
117
118 static int debug_to_stopped_by_watchpoint (void);
119
120 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
121
122 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
123 CORE_ADDR, CORE_ADDR, int);
124
125 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
126
127 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
128 struct expression *);
129
130 static void debug_to_terminal_init (void);
131
132 static void debug_to_terminal_inferior (void);
133
134 static void debug_to_terminal_ours_for_output (void);
135
136 static void debug_to_terminal_save_ours (void);
137
138 static void debug_to_terminal_ours (void);
139
140 static void debug_to_load (char *, int);
141
142 static int debug_to_can_run (void);
143
144 static void debug_to_stop (ptid_t);
145
146 /* Pointer to array of target architecture structures; the size of the
147 array; the current index into the array; the allocated size of the
148 array. */
149 struct target_ops **target_structs;
150 unsigned target_struct_size;
151 unsigned target_struct_allocsize;
152 #define DEFAULT_ALLOCSIZE 10
153
154 /* The initial current target, so that there is always a semi-valid
155 current target. */
156
157 static struct target_ops dummy_target;
158
159 /* Top of target stack. */
160
161 static struct target_ops *target_stack;
162
163 /* The target structure we are currently using to talk to a process
164 or file or whatever "inferior" we have. */
165
166 struct target_ops current_target;
167
168 /* Command list for target. */
169
170 static struct cmd_list_element *targetlist = NULL;
171
172 /* Nonzero if we should trust readonly sections from the
173 executable when reading memory. */
174
175 static int trust_readonly = 0;
176
177 /* Nonzero if we should show true memory content including
178 memory breakpoint inserted by gdb. */
179
180 static int show_memory_breakpoints = 0;
181
182 /* These globals control whether GDB attempts to perform these
183 operations; they are useful for targets that need to prevent
184 inadvertant disruption, such as in non-stop mode. */
185
186 int may_write_registers = 1;
187
188 int may_write_memory = 1;
189
190 int may_insert_breakpoints = 1;
191
192 int may_insert_tracepoints = 1;
193
194 int may_insert_fast_tracepoints = 1;
195
196 int may_stop = 1;
197
198 /* Non-zero if we want to see trace of target level stuff. */
199
200 static unsigned int targetdebug = 0;
201 static void
202 show_targetdebug (struct ui_file *file, int from_tty,
203 struct cmd_list_element *c, const char *value)
204 {
205 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
206 }
207
208 static void setup_target_debug (void);
209
210 /* The user just typed 'target' without the name of a target. */
211
212 static void
213 target_command (char *arg, int from_tty)
214 {
215 fputs_filtered ("Argument required (target name). Try `help target'\n",
216 gdb_stdout);
217 }
218
219 /* Default target_has_* methods for process_stratum targets. */
220
221 int
222 default_child_has_all_memory (struct target_ops *ops)
223 {
224 /* If no inferior selected, then we can't read memory here. */
225 if (ptid_equal (inferior_ptid, null_ptid))
226 return 0;
227
228 return 1;
229 }
230
231 int
232 default_child_has_memory (struct target_ops *ops)
233 {
234 /* If no inferior selected, then we can't read memory here. */
235 if (ptid_equal (inferior_ptid, null_ptid))
236 return 0;
237
238 return 1;
239 }
240
241 int
242 default_child_has_stack (struct target_ops *ops)
243 {
244 /* If no inferior selected, there's no stack. */
245 if (ptid_equal (inferior_ptid, null_ptid))
246 return 0;
247
248 return 1;
249 }
250
251 int
252 default_child_has_registers (struct target_ops *ops)
253 {
254 /* Can't read registers from no inferior. */
255 if (ptid_equal (inferior_ptid, null_ptid))
256 return 0;
257
258 return 1;
259 }
260
261 int
262 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
263 {
264 /* If there's no thread selected, then we can't make it run through
265 hoops. */
266 if (ptid_equal (the_ptid, null_ptid))
267 return 0;
268
269 return 1;
270 }
271
272
273 int
274 target_has_all_memory_1 (void)
275 {
276 struct target_ops *t;
277
278 for (t = current_target.beneath; t != NULL; t = t->beneath)
279 if (t->to_has_all_memory (t))
280 return 1;
281
282 return 0;
283 }
284
285 int
286 target_has_memory_1 (void)
287 {
288 struct target_ops *t;
289
290 for (t = current_target.beneath; t != NULL; t = t->beneath)
291 if (t->to_has_memory (t))
292 return 1;
293
294 return 0;
295 }
296
297 int
298 target_has_stack_1 (void)
299 {
300 struct target_ops *t;
301
302 for (t = current_target.beneath; t != NULL; t = t->beneath)
303 if (t->to_has_stack (t))
304 return 1;
305
306 return 0;
307 }
308
309 int
310 target_has_registers_1 (void)
311 {
312 struct target_ops *t;
313
314 for (t = current_target.beneath; t != NULL; t = t->beneath)
315 if (t->to_has_registers (t))
316 return 1;
317
318 return 0;
319 }
320
321 int
322 target_has_execution_1 (ptid_t the_ptid)
323 {
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_execution (t, the_ptid))
328 return 1;
329
330 return 0;
331 }
332
333 int
334 target_has_execution_current (void)
335 {
336 return target_has_execution_1 (inferior_ptid);
337 }
338
339 /* Complete initialization of T. This ensures that various fields in
340 T are set, if needed by the target implementation. */
341
342 void
343 complete_target_initialization (struct target_ops *t)
344 {
345 /* Provide default values for all "must have" methods. */
346 if (t->to_xfer_partial == NULL)
347 t->to_xfer_partial = default_xfer_partial;
348
349 if (t->to_has_all_memory == NULL)
350 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
351
352 if (t->to_has_memory == NULL)
353 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
354
355 if (t->to_has_stack == NULL)
356 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
357
358 if (t->to_has_registers == NULL)
359 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
360
361 if (t->to_has_execution == NULL)
362 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
363 }
364
365 /* Add possible target architecture T to the list and add a new
366 command 'target T->to_shortname'. Set COMPLETER as the command's
367 completer if not NULL. */
368
369 void
370 add_target_with_completer (struct target_ops *t,
371 completer_ftype *completer)
372 {
373 struct cmd_list_element *c;
374
375 complete_target_initialization (t);
376
377 if (!target_structs)
378 {
379 target_struct_allocsize = DEFAULT_ALLOCSIZE;
380 target_structs = (struct target_ops **) xmalloc
381 (target_struct_allocsize * sizeof (*target_structs));
382 }
383 if (target_struct_size >= target_struct_allocsize)
384 {
385 target_struct_allocsize *= 2;
386 target_structs = (struct target_ops **)
387 xrealloc ((char *) target_structs,
388 target_struct_allocsize * sizeof (*target_structs));
389 }
390 target_structs[target_struct_size++] = t;
391
392 if (targetlist == NULL)
393 add_prefix_cmd ("target", class_run, target_command, _("\
394 Connect to a target machine or process.\n\
395 The first argument is the type or protocol of the target machine.\n\
396 Remaining arguments are interpreted by the target protocol. For more\n\
397 information on the arguments for a particular protocol, type\n\
398 `help target ' followed by the protocol name."),
399 &targetlist, "target ", 0, &cmdlist);
400 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
401 &targetlist);
402 if (completer != NULL)
403 set_cmd_completer (c, completer);
404 }
405
406 /* Add a possible target architecture to the list. */
407
408 void
409 add_target (struct target_ops *t)
410 {
411 add_target_with_completer (t, NULL);
412 }
413
414 /* See target.h. */
415
416 void
417 add_deprecated_target_alias (struct target_ops *t, char *alias)
418 {
419 struct cmd_list_element *c;
420 char *alt;
421
422 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
423 see PR cli/15104. */
424 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
425 alt = xstrprintf ("target %s", t->to_shortname);
426 deprecate_cmd (c, alt);
427 }
428
429 /* Stub functions */
430
431 void
432 target_ignore (void)
433 {
434 }
435
436 void
437 target_kill (void)
438 {
439 struct target_ops *t;
440
441 for (t = current_target.beneath; t != NULL; t = t->beneath)
442 if (t->to_kill != NULL)
443 {
444 if (targetdebug)
445 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
446
447 t->to_kill (t);
448 return;
449 }
450
451 noprocess ();
452 }
453
454 void
455 target_load (char *arg, int from_tty)
456 {
457 target_dcache_invalidate ();
458 (*current_target.to_load) (arg, from_tty);
459 }
460
461 void
462 target_create_inferior (char *exec_file, char *args,
463 char **env, int from_tty)
464 {
465 struct target_ops *t;
466
467 for (t = current_target.beneath; t != NULL; t = t->beneath)
468 {
469 if (t->to_create_inferior != NULL)
470 {
471 t->to_create_inferior (t, exec_file, args, env, from_tty);
472 if (targetdebug)
473 fprintf_unfiltered (gdb_stdlog,
474 "target_create_inferior (%s, %s, xxx, %d)\n",
475 exec_file, args, from_tty);
476 return;
477 }
478 }
479
480 internal_error (__FILE__, __LINE__,
481 _("could not find a target to create inferior"));
482 }
483
484 void
485 target_terminal_inferior (void)
486 {
487 /* A background resume (``run&'') should leave GDB in control of the
488 terminal. Use target_can_async_p, not target_is_async_p, since at
489 this point the target is not async yet. However, if sync_execution
490 is not set, we know it will become async prior to resume. */
491 if (target_can_async_p () && !sync_execution)
492 return;
493
494 /* If GDB is resuming the inferior in the foreground, install
495 inferior's terminal modes. */
496 (*current_target.to_terminal_inferior) ();
497 }
498
499 static int
500 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
501 struct target_ops *t)
502 {
503 errno = EIO; /* Can't read/write this location. */
504 return 0; /* No bytes handled. */
505 }
506
507 static void
508 tcomplain (void)
509 {
510 error (_("You can't do that when your target is `%s'"),
511 current_target.to_shortname);
512 }
513
514 void
515 noprocess (void)
516 {
517 error (_("You can't do that without a process to debug."));
518 }
519
520 static void
521 default_terminal_info (const char *args, int from_tty)
522 {
523 printf_unfiltered (_("No saved terminal information.\n"));
524 }
525
526 /* A default implementation for the to_get_ada_task_ptid target method.
527
528 This function builds the PTID by using both LWP and TID as part of
529 the PTID lwp and tid elements. The pid used is the pid of the
530 inferior_ptid. */
531
532 static ptid_t
533 default_get_ada_task_ptid (long lwp, long tid)
534 {
535 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
536 }
537
538 static enum exec_direction_kind
539 default_execution_direction (void)
540 {
541 if (!target_can_execute_reverse)
542 return EXEC_FORWARD;
543 else if (!target_can_async_p ())
544 return EXEC_FORWARD;
545 else
546 gdb_assert_not_reached ("\
547 to_execution_direction must be implemented for reverse async");
548 }
549
550 /* Go through the target stack from top to bottom, copying over zero
551 entries in current_target, then filling in still empty entries. In
552 effect, we are doing class inheritance through the pushed target
553 vectors.
554
555 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
556 is currently implemented, is that it discards any knowledge of
557 which target an inherited method originally belonged to.
558 Consequently, new new target methods should instead explicitly and
559 locally search the target stack for the target that can handle the
560 request. */
561
562 static void
563 update_current_target (void)
564 {
565 struct target_ops *t;
566
567 /* First, reset current's contents. */
568 memset (&current_target, 0, sizeof (current_target));
569
570 #define INHERIT(FIELD, TARGET) \
571 if (!current_target.FIELD) \
572 current_target.FIELD = (TARGET)->FIELD
573
574 for (t = target_stack; t; t = t->beneath)
575 {
576 INHERIT (to_shortname, t);
577 INHERIT (to_longname, t);
578 INHERIT (to_doc, t);
579 /* Do not inherit to_open. */
580 /* Do not inherit to_close. */
581 /* Do not inherit to_attach. */
582 INHERIT (to_post_attach, t);
583 INHERIT (to_attach_no_wait, t);
584 /* Do not inherit to_detach. */
585 /* Do not inherit to_disconnect. */
586 /* Do not inherit to_resume. */
587 /* Do not inherit to_wait. */
588 /* Do not inherit to_fetch_registers. */
589 /* Do not inherit to_store_registers. */
590 INHERIT (to_prepare_to_store, t);
591 INHERIT (deprecated_xfer_memory, t);
592 INHERIT (to_files_info, t);
593 INHERIT (to_insert_breakpoint, t);
594 INHERIT (to_remove_breakpoint, t);
595 INHERIT (to_can_use_hw_breakpoint, t);
596 INHERIT (to_insert_hw_breakpoint, t);
597 INHERIT (to_remove_hw_breakpoint, t);
598 /* Do not inherit to_ranged_break_num_registers. */
599 INHERIT (to_insert_watchpoint, t);
600 INHERIT (to_remove_watchpoint, t);
601 /* Do not inherit to_insert_mask_watchpoint. */
602 /* Do not inherit to_remove_mask_watchpoint. */
603 INHERIT (to_stopped_data_address, t);
604 INHERIT (to_have_steppable_watchpoint, t);
605 INHERIT (to_have_continuable_watchpoint, t);
606 INHERIT (to_stopped_by_watchpoint, t);
607 INHERIT (to_watchpoint_addr_within_range, t);
608 INHERIT (to_region_ok_for_hw_watchpoint, t);
609 INHERIT (to_can_accel_watchpoint_condition, t);
610 /* Do not inherit to_masked_watch_num_registers. */
611 INHERIT (to_terminal_init, t);
612 INHERIT (to_terminal_inferior, t);
613 INHERIT (to_terminal_ours_for_output, t);
614 INHERIT (to_terminal_ours, t);
615 INHERIT (to_terminal_save_ours, t);
616 INHERIT (to_terminal_info, t);
617 /* Do not inherit to_kill. */
618 INHERIT (to_load, t);
619 /* Do no inherit to_create_inferior. */
620 INHERIT (to_post_startup_inferior, t);
621 INHERIT (to_insert_fork_catchpoint, t);
622 INHERIT (to_remove_fork_catchpoint, t);
623 INHERIT (to_insert_vfork_catchpoint, t);
624 INHERIT (to_remove_vfork_catchpoint, t);
625 /* Do not inherit to_follow_fork. */
626 INHERIT (to_insert_exec_catchpoint, t);
627 INHERIT (to_remove_exec_catchpoint, t);
628 INHERIT (to_set_syscall_catchpoint, t);
629 INHERIT (to_has_exited, t);
630 /* Do not inherit to_mourn_inferior. */
631 INHERIT (to_can_run, t);
632 /* Do not inherit to_pass_signals. */
633 /* Do not inherit to_program_signals. */
634 /* Do not inherit to_thread_alive. */
635 /* Do not inherit to_find_new_threads. */
636 /* Do not inherit to_pid_to_str. */
637 INHERIT (to_extra_thread_info, t);
638 INHERIT (to_thread_name, t);
639 INHERIT (to_stop, t);
640 /* Do not inherit to_xfer_partial. */
641 INHERIT (to_rcmd, t);
642 INHERIT (to_pid_to_exec_file, t);
643 INHERIT (to_log_command, t);
644 INHERIT (to_stratum, t);
645 /* Do not inherit to_has_all_memory. */
646 /* Do not inherit to_has_memory. */
647 /* Do not inherit to_has_stack. */
648 /* Do not inherit to_has_registers. */
649 /* Do not inherit to_has_execution. */
650 INHERIT (to_has_thread_control, t);
651 INHERIT (to_can_async_p, t);
652 INHERIT (to_is_async_p, t);
653 INHERIT (to_async, t);
654 INHERIT (to_find_memory_regions, t);
655 INHERIT (to_make_corefile_notes, t);
656 INHERIT (to_get_bookmark, t);
657 INHERIT (to_goto_bookmark, t);
658 /* Do not inherit to_get_thread_local_address. */
659 INHERIT (to_can_execute_reverse, t);
660 INHERIT (to_execution_direction, t);
661 INHERIT (to_thread_architecture, t);
662 /* Do not inherit to_read_description. */
663 INHERIT (to_get_ada_task_ptid, t);
664 /* Do not inherit to_search_memory. */
665 INHERIT (to_supports_multi_process, t);
666 INHERIT (to_supports_enable_disable_tracepoint, t);
667 INHERIT (to_supports_string_tracing, t);
668 INHERIT (to_trace_init, t);
669 INHERIT (to_download_tracepoint, t);
670 INHERIT (to_can_download_tracepoint, t);
671 INHERIT (to_download_trace_state_variable, t);
672 INHERIT (to_enable_tracepoint, t);
673 INHERIT (to_disable_tracepoint, t);
674 INHERIT (to_trace_set_readonly_regions, t);
675 INHERIT (to_trace_start, t);
676 INHERIT (to_get_trace_status, t);
677 INHERIT (to_get_tracepoint_status, t);
678 INHERIT (to_trace_stop, t);
679 INHERIT (to_trace_find, t);
680 INHERIT (to_get_trace_state_variable_value, t);
681 INHERIT (to_save_trace_data, t);
682 INHERIT (to_upload_tracepoints, t);
683 INHERIT (to_upload_trace_state_variables, t);
684 INHERIT (to_get_raw_trace_data, t);
685 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
686 INHERIT (to_set_disconnected_tracing, t);
687 INHERIT (to_set_circular_trace_buffer, t);
688 INHERIT (to_set_trace_buffer_size, t);
689 INHERIT (to_set_trace_notes, t);
690 INHERIT (to_get_tib_address, t);
691 INHERIT (to_set_permissions, t);
692 INHERIT (to_static_tracepoint_marker_at, t);
693 INHERIT (to_static_tracepoint_markers_by_strid, t);
694 INHERIT (to_traceframe_info, t);
695 INHERIT (to_use_agent, t);
696 INHERIT (to_can_use_agent, t);
697 INHERIT (to_augmented_libraries_svr4_read, t);
698 INHERIT (to_magic, t);
699 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
700 INHERIT (to_can_run_breakpoint_commands, t);
701 /* Do not inherit to_memory_map. */
702 /* Do not inherit to_flash_erase. */
703 /* Do not inherit to_flash_done. */
704 }
705 #undef INHERIT
706
707 /* Clean up a target struct so it no longer has any zero pointers in
708 it. Some entries are defaulted to a method that print an error,
709 others are hard-wired to a standard recursive default. */
710
711 #define de_fault(field, value) \
712 if (!current_target.field) \
713 current_target.field = value
714
715 de_fault (to_open,
716 (void (*) (char *, int))
717 tcomplain);
718 de_fault (to_close,
719 (void (*) (void))
720 target_ignore);
721 de_fault (to_post_attach,
722 (void (*) (int))
723 target_ignore);
724 de_fault (to_prepare_to_store,
725 (void (*) (struct regcache *))
726 noprocess);
727 de_fault (deprecated_xfer_memory,
728 (int (*) (CORE_ADDR, gdb_byte *, int, int,
729 struct mem_attrib *, struct target_ops *))
730 nomemory);
731 de_fault (to_files_info,
732 (void (*) (struct target_ops *))
733 target_ignore);
734 de_fault (to_insert_breakpoint,
735 memory_insert_breakpoint);
736 de_fault (to_remove_breakpoint,
737 memory_remove_breakpoint);
738 de_fault (to_can_use_hw_breakpoint,
739 (int (*) (int, int, int))
740 return_zero);
741 de_fault (to_insert_hw_breakpoint,
742 (int (*) (struct gdbarch *, struct bp_target_info *))
743 return_minus_one);
744 de_fault (to_remove_hw_breakpoint,
745 (int (*) (struct gdbarch *, struct bp_target_info *))
746 return_minus_one);
747 de_fault (to_insert_watchpoint,
748 (int (*) (CORE_ADDR, int, int, struct expression *))
749 return_minus_one);
750 de_fault (to_remove_watchpoint,
751 (int (*) (CORE_ADDR, int, int, struct expression *))
752 return_minus_one);
753 de_fault (to_stopped_by_watchpoint,
754 (int (*) (void))
755 return_zero);
756 de_fault (to_stopped_data_address,
757 (int (*) (struct target_ops *, CORE_ADDR *))
758 return_zero);
759 de_fault (to_watchpoint_addr_within_range,
760 default_watchpoint_addr_within_range);
761 de_fault (to_region_ok_for_hw_watchpoint,
762 default_region_ok_for_hw_watchpoint);
763 de_fault (to_can_accel_watchpoint_condition,
764 (int (*) (CORE_ADDR, int, int, struct expression *))
765 return_zero);
766 de_fault (to_terminal_init,
767 (void (*) (void))
768 target_ignore);
769 de_fault (to_terminal_inferior,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_ours_for_output,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_ours,
776 (void (*) (void))
777 target_ignore);
778 de_fault (to_terminal_save_ours,
779 (void (*) (void))
780 target_ignore);
781 de_fault (to_terminal_info,
782 default_terminal_info);
783 de_fault (to_load,
784 (void (*) (char *, int))
785 tcomplain);
786 de_fault (to_post_startup_inferior,
787 (void (*) (ptid_t))
788 target_ignore);
789 de_fault (to_insert_fork_catchpoint,
790 (int (*) (int))
791 return_one);
792 de_fault (to_remove_fork_catchpoint,
793 (int (*) (int))
794 return_one);
795 de_fault (to_insert_vfork_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_remove_vfork_catchpoint,
799 (int (*) (int))
800 return_one);
801 de_fault (to_insert_exec_catchpoint,
802 (int (*) (int))
803 return_one);
804 de_fault (to_remove_exec_catchpoint,
805 (int (*) (int))
806 return_one);
807 de_fault (to_set_syscall_catchpoint,
808 (int (*) (int, int, int, int, int *))
809 return_one);
810 de_fault (to_has_exited,
811 (int (*) (int, int, int *))
812 return_zero);
813 de_fault (to_can_run,
814 return_zero);
815 de_fault (to_extra_thread_info,
816 (char *(*) (struct thread_info *))
817 return_zero);
818 de_fault (to_thread_name,
819 (char *(*) (struct thread_info *))
820 return_zero);
821 de_fault (to_stop,
822 (void (*) (ptid_t))
823 target_ignore);
824 current_target.to_xfer_partial = current_xfer_partial;
825 de_fault (to_rcmd,
826 (void (*) (char *, struct ui_file *))
827 tcomplain);
828 de_fault (to_pid_to_exec_file,
829 (char *(*) (int))
830 return_zero);
831 de_fault (to_async,
832 (void (*) (void (*) (enum inferior_event_type, void*), void*))
833 tcomplain);
834 de_fault (to_thread_architecture,
835 default_thread_architecture);
836 current_target.to_read_description = NULL;
837 de_fault (to_get_ada_task_ptid,
838 (ptid_t (*) (long, long))
839 default_get_ada_task_ptid);
840 de_fault (to_supports_multi_process,
841 (int (*) (void))
842 return_zero);
843 de_fault (to_supports_enable_disable_tracepoint,
844 (int (*) (void))
845 return_zero);
846 de_fault (to_supports_string_tracing,
847 (int (*) (void))
848 return_zero);
849 de_fault (to_trace_init,
850 (void (*) (void))
851 tcomplain);
852 de_fault (to_download_tracepoint,
853 (void (*) (struct bp_location *))
854 tcomplain);
855 de_fault (to_can_download_tracepoint,
856 (int (*) (void))
857 return_zero);
858 de_fault (to_download_trace_state_variable,
859 (void (*) (struct trace_state_variable *))
860 tcomplain);
861 de_fault (to_enable_tracepoint,
862 (void (*) (struct bp_location *))
863 tcomplain);
864 de_fault (to_disable_tracepoint,
865 (void (*) (struct bp_location *))
866 tcomplain);
867 de_fault (to_trace_set_readonly_regions,
868 (void (*) (void))
869 tcomplain);
870 de_fault (to_trace_start,
871 (void (*) (void))
872 tcomplain);
873 de_fault (to_get_trace_status,
874 (int (*) (struct trace_status *))
875 return_minus_one);
876 de_fault (to_get_tracepoint_status,
877 (void (*) (struct breakpoint *, struct uploaded_tp *))
878 tcomplain);
879 de_fault (to_trace_stop,
880 (void (*) (void))
881 tcomplain);
882 de_fault (to_trace_find,
883 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
884 return_minus_one);
885 de_fault (to_get_trace_state_variable_value,
886 (int (*) (int, LONGEST *))
887 return_zero);
888 de_fault (to_save_trace_data,
889 (int (*) (const char *))
890 tcomplain);
891 de_fault (to_upload_tracepoints,
892 (int (*) (struct uploaded_tp **))
893 return_zero);
894 de_fault (to_upload_trace_state_variables,
895 (int (*) (struct uploaded_tsv **))
896 return_zero);
897 de_fault (to_get_raw_trace_data,
898 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
899 tcomplain);
900 de_fault (to_get_min_fast_tracepoint_insn_len,
901 (int (*) (void))
902 return_minus_one);
903 de_fault (to_set_disconnected_tracing,
904 (void (*) (int))
905 target_ignore);
906 de_fault (to_set_circular_trace_buffer,
907 (void (*) (int))
908 target_ignore);
909 de_fault (to_set_trace_buffer_size,
910 (void (*) (LONGEST))
911 target_ignore);
912 de_fault (to_set_trace_notes,
913 (int (*) (const char *, const char *, const char *))
914 return_zero);
915 de_fault (to_get_tib_address,
916 (int (*) (ptid_t, CORE_ADDR *))
917 tcomplain);
918 de_fault (to_set_permissions,
919 (void (*) (void))
920 target_ignore);
921 de_fault (to_static_tracepoint_marker_at,
922 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
923 return_zero);
924 de_fault (to_static_tracepoint_markers_by_strid,
925 (VEC(static_tracepoint_marker_p) * (*) (const char *))
926 tcomplain);
927 de_fault (to_traceframe_info,
928 (struct traceframe_info * (*) (void))
929 return_zero);
930 de_fault (to_supports_evaluation_of_breakpoint_conditions,
931 (int (*) (void))
932 return_zero);
933 de_fault (to_can_run_breakpoint_commands,
934 (int (*) (void))
935 return_zero);
936 de_fault (to_use_agent,
937 (int (*) (int))
938 tcomplain);
939 de_fault (to_can_use_agent,
940 (int (*) (void))
941 return_zero);
942 de_fault (to_augmented_libraries_svr4_read,
943 (int (*) (void))
944 return_zero);
945 de_fault (to_execution_direction, default_execution_direction);
946
947 #undef de_fault
948
949 /* Finally, position the target-stack beneath the squashed
950 "current_target". That way code looking for a non-inherited
951 target method can quickly and simply find it. */
952 current_target.beneath = target_stack;
953
954 if (targetdebug)
955 setup_target_debug ();
956 }
957
958 /* Push a new target type into the stack of the existing target accessors,
959 possibly superseding some of the existing accessors.
960
961 Rather than allow an empty stack, we always have the dummy target at
962 the bottom stratum, so we can call the function vectors without
963 checking them. */
964
965 void
966 push_target (struct target_ops *t)
967 {
968 struct target_ops **cur;
969
970 /* Check magic number. If wrong, it probably means someone changed
971 the struct definition, but not all the places that initialize one. */
972 if (t->to_magic != OPS_MAGIC)
973 {
974 fprintf_unfiltered (gdb_stderr,
975 "Magic number of %s target struct wrong\n",
976 t->to_shortname);
977 internal_error (__FILE__, __LINE__,
978 _("failed internal consistency check"));
979 }
980
981 /* Find the proper stratum to install this target in. */
982 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
983 {
984 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
985 break;
986 }
987
988 /* If there's already targets at this stratum, remove them. */
989 /* FIXME: cagney/2003-10-15: I think this should be popping all
990 targets to CUR, and not just those at this stratum level. */
991 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
992 {
993 /* There's already something at this stratum level. Close it,
994 and un-hook it from the stack. */
995 struct target_ops *tmp = (*cur);
996
997 (*cur) = (*cur)->beneath;
998 tmp->beneath = NULL;
999 target_close (tmp);
1000 }
1001
1002 /* We have removed all targets in our stratum, now add the new one. */
1003 t->beneath = (*cur);
1004 (*cur) = t;
1005
1006 update_current_target ();
1007 }
1008
1009 /* Remove a target_ops vector from the stack, wherever it may be.
1010 Return how many times it was removed (0 or 1). */
1011
1012 int
1013 unpush_target (struct target_ops *t)
1014 {
1015 struct target_ops **cur;
1016 struct target_ops *tmp;
1017
1018 if (t->to_stratum == dummy_stratum)
1019 internal_error (__FILE__, __LINE__,
1020 _("Attempt to unpush the dummy target"));
1021
1022 /* Look for the specified target. Note that we assume that a target
1023 can only occur once in the target stack. */
1024
1025 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1026 {
1027 if ((*cur) == t)
1028 break;
1029 }
1030
1031 /* If we don't find target_ops, quit. Only open targets should be
1032 closed. */
1033 if ((*cur) == NULL)
1034 return 0;
1035
1036 /* Unchain the target. */
1037 tmp = (*cur);
1038 (*cur) = (*cur)->beneath;
1039 tmp->beneath = NULL;
1040
1041 update_current_target ();
1042
1043 /* Finally close the target. Note we do this after unchaining, so
1044 any target method calls from within the target_close
1045 implementation don't end up in T anymore. */
1046 target_close (t);
1047
1048 return 1;
1049 }
1050
1051 void
1052 pop_all_targets_above (enum strata above_stratum)
1053 {
1054 while ((int) (current_target.to_stratum) > (int) above_stratum)
1055 {
1056 if (!unpush_target (target_stack))
1057 {
1058 fprintf_unfiltered (gdb_stderr,
1059 "pop_all_targets couldn't find target %s\n",
1060 target_stack->to_shortname);
1061 internal_error (__FILE__, __LINE__,
1062 _("failed internal consistency check"));
1063 break;
1064 }
1065 }
1066 }
1067
1068 void
1069 pop_all_targets (void)
1070 {
1071 pop_all_targets_above (dummy_stratum);
1072 }
1073
1074 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1075
1076 int
1077 target_is_pushed (struct target_ops *t)
1078 {
1079 struct target_ops **cur;
1080
1081 /* Check magic number. If wrong, it probably means someone changed
1082 the struct definition, but not all the places that initialize one. */
1083 if (t->to_magic != OPS_MAGIC)
1084 {
1085 fprintf_unfiltered (gdb_stderr,
1086 "Magic number of %s target struct wrong\n",
1087 t->to_shortname);
1088 internal_error (__FILE__, __LINE__,
1089 _("failed internal consistency check"));
1090 }
1091
1092 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1093 if (*cur == t)
1094 return 1;
1095
1096 return 0;
1097 }
1098
1099 /* Using the objfile specified in OBJFILE, find the address for the
1100 current thread's thread-local storage with offset OFFSET. */
1101 CORE_ADDR
1102 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1103 {
1104 volatile CORE_ADDR addr = 0;
1105 struct target_ops *target;
1106
1107 for (target = current_target.beneath;
1108 target != NULL;
1109 target = target->beneath)
1110 {
1111 if (target->to_get_thread_local_address != NULL)
1112 break;
1113 }
1114
1115 if (target != NULL
1116 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1117 {
1118 ptid_t ptid = inferior_ptid;
1119 volatile struct gdb_exception ex;
1120
1121 TRY_CATCH (ex, RETURN_MASK_ALL)
1122 {
1123 CORE_ADDR lm_addr;
1124
1125 /* Fetch the load module address for this objfile. */
1126 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1127 objfile);
1128 /* If it's 0, throw the appropriate exception. */
1129 if (lm_addr == 0)
1130 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1131 _("TLS load module not found"));
1132
1133 addr = target->to_get_thread_local_address (target, ptid,
1134 lm_addr, offset);
1135 }
1136 /* If an error occurred, print TLS related messages here. Otherwise,
1137 throw the error to some higher catcher. */
1138 if (ex.reason < 0)
1139 {
1140 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1141
1142 switch (ex.error)
1143 {
1144 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1145 error (_("Cannot find thread-local variables "
1146 "in this thread library."));
1147 break;
1148 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1149 if (objfile_is_library)
1150 error (_("Cannot find shared library `%s' in dynamic"
1151 " linker's load module list"), objfile_name (objfile));
1152 else
1153 error (_("Cannot find executable file `%s' in dynamic"
1154 " linker's load module list"), objfile_name (objfile));
1155 break;
1156 case TLS_NOT_ALLOCATED_YET_ERROR:
1157 if (objfile_is_library)
1158 error (_("The inferior has not yet allocated storage for"
1159 " thread-local variables in\n"
1160 "the shared library `%s'\n"
1161 "for %s"),
1162 objfile_name (objfile), target_pid_to_str (ptid));
1163 else
1164 error (_("The inferior has not yet allocated storage for"
1165 " thread-local variables in\n"
1166 "the executable `%s'\n"
1167 "for %s"),
1168 objfile_name (objfile), target_pid_to_str (ptid));
1169 break;
1170 case TLS_GENERIC_ERROR:
1171 if (objfile_is_library)
1172 error (_("Cannot find thread-local storage for %s, "
1173 "shared library %s:\n%s"),
1174 target_pid_to_str (ptid),
1175 objfile_name (objfile), ex.message);
1176 else
1177 error (_("Cannot find thread-local storage for %s, "
1178 "executable file %s:\n%s"),
1179 target_pid_to_str (ptid),
1180 objfile_name (objfile), ex.message);
1181 break;
1182 default:
1183 throw_exception (ex);
1184 break;
1185 }
1186 }
1187 }
1188 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1189 TLS is an ABI-specific thing. But we don't do that yet. */
1190 else
1191 error (_("Cannot find thread-local variables on this target"));
1192
1193 return addr;
1194 }
1195
1196 const char *
1197 target_xfer_error_to_string (enum target_xfer_error err)
1198 {
1199 #define CASE(X) case X: return #X
1200 switch (err)
1201 {
1202 CASE(TARGET_XFER_E_IO);
1203 CASE(TARGET_XFER_E_UNAVAILABLE);
1204 default:
1205 return "<unknown>";
1206 }
1207 #undef CASE
1208 };
1209
1210
1211 #undef MIN
1212 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1213
1214 /* target_read_string -- read a null terminated string, up to LEN bytes,
1215 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1216 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1217 is responsible for freeing it. Return the number of bytes successfully
1218 read. */
1219
1220 int
1221 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1222 {
1223 int tlen, offset, i;
1224 gdb_byte buf[4];
1225 int errcode = 0;
1226 char *buffer;
1227 int buffer_allocated;
1228 char *bufptr;
1229 unsigned int nbytes_read = 0;
1230
1231 gdb_assert (string);
1232
1233 /* Small for testing. */
1234 buffer_allocated = 4;
1235 buffer = xmalloc (buffer_allocated);
1236 bufptr = buffer;
1237
1238 while (len > 0)
1239 {
1240 tlen = MIN (len, 4 - (memaddr & 3));
1241 offset = memaddr & 3;
1242
1243 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1244 if (errcode != 0)
1245 {
1246 /* The transfer request might have crossed the boundary to an
1247 unallocated region of memory. Retry the transfer, requesting
1248 a single byte. */
1249 tlen = 1;
1250 offset = 0;
1251 errcode = target_read_memory (memaddr, buf, 1);
1252 if (errcode != 0)
1253 goto done;
1254 }
1255
1256 if (bufptr - buffer + tlen > buffer_allocated)
1257 {
1258 unsigned int bytes;
1259
1260 bytes = bufptr - buffer;
1261 buffer_allocated *= 2;
1262 buffer = xrealloc (buffer, buffer_allocated);
1263 bufptr = buffer + bytes;
1264 }
1265
1266 for (i = 0; i < tlen; i++)
1267 {
1268 *bufptr++ = buf[i + offset];
1269 if (buf[i + offset] == '\000')
1270 {
1271 nbytes_read += i + 1;
1272 goto done;
1273 }
1274 }
1275
1276 memaddr += tlen;
1277 len -= tlen;
1278 nbytes_read += tlen;
1279 }
1280 done:
1281 *string = buffer;
1282 if (errnop != NULL)
1283 *errnop = errcode;
1284 return nbytes_read;
1285 }
1286
1287 struct target_section_table *
1288 target_get_section_table (struct target_ops *target)
1289 {
1290 struct target_ops *t;
1291
1292 if (targetdebug)
1293 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1294
1295 for (t = target; t != NULL; t = t->beneath)
1296 if (t->to_get_section_table != NULL)
1297 return (*t->to_get_section_table) (t);
1298
1299 return NULL;
1300 }
1301
1302 /* Find a section containing ADDR. */
1303
1304 struct target_section *
1305 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1306 {
1307 struct target_section_table *table = target_get_section_table (target);
1308 struct target_section *secp;
1309
1310 if (table == NULL)
1311 return NULL;
1312
1313 for (secp = table->sections; secp < table->sections_end; secp++)
1314 {
1315 if (addr >= secp->addr && addr < secp->endaddr)
1316 return secp;
1317 }
1318 return NULL;
1319 }
1320
1321 /* Read memory from the live target, even if currently inspecting a
1322 traceframe. The return is the same as that of target_read. */
1323
1324 static LONGEST
1325 target_read_live_memory (enum target_object object,
1326 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1327 {
1328 LONGEST ret;
1329 struct cleanup *cleanup;
1330
1331 /* Switch momentarily out of tfind mode so to access live memory.
1332 Note that this must not clear global state, such as the frame
1333 cache, which must still remain valid for the previous traceframe.
1334 We may be _building_ the frame cache at this point. */
1335 cleanup = make_cleanup_restore_traceframe_number ();
1336 set_traceframe_number (-1);
1337
1338 ret = target_read (current_target.beneath, object, NULL,
1339 myaddr, memaddr, len);
1340
1341 do_cleanups (cleanup);
1342 return ret;
1343 }
1344
1345 /* Using the set of read-only target sections of OPS, read live
1346 read-only memory. Note that the actual reads start from the
1347 top-most target again.
1348
1349 For interface/parameters/return description see target.h,
1350 to_xfer_partial. */
1351
1352 static LONGEST
1353 memory_xfer_live_readonly_partial (struct target_ops *ops,
1354 enum target_object object,
1355 gdb_byte *readbuf, ULONGEST memaddr,
1356 LONGEST len)
1357 {
1358 struct target_section *secp;
1359 struct target_section_table *table;
1360
1361 secp = target_section_by_addr (ops, memaddr);
1362 if (secp != NULL
1363 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1364 secp->the_bfd_section)
1365 & SEC_READONLY))
1366 {
1367 struct target_section *p;
1368 ULONGEST memend = memaddr + len;
1369
1370 table = target_get_section_table (ops);
1371
1372 for (p = table->sections; p < table->sections_end; p++)
1373 {
1374 if (memaddr >= p->addr)
1375 {
1376 if (memend <= p->endaddr)
1377 {
1378 /* Entire transfer is within this section. */
1379 return target_read_live_memory (object, memaddr,
1380 readbuf, len);
1381 }
1382 else if (memaddr >= p->endaddr)
1383 {
1384 /* This section ends before the transfer starts. */
1385 continue;
1386 }
1387 else
1388 {
1389 /* This section overlaps the transfer. Just do half. */
1390 len = p->endaddr - memaddr;
1391 return target_read_live_memory (object, memaddr,
1392 readbuf, len);
1393 }
1394 }
1395 }
1396 }
1397
1398 return 0;
1399 }
1400
1401 /* Read memory from more than one valid target. A core file, for
1402 instance, could have some of memory but delegate other bits to
1403 the target below it. So, we must manually try all targets. */
1404
1405 static LONGEST
1406 raw_memory_xfer_partial (struct target_ops *ops, void *readbuf,
1407 const void *writebuf, ULONGEST memaddr, LONGEST len)
1408 {
1409 LONGEST res;
1410
1411 do
1412 {
1413 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1414 readbuf, writebuf, memaddr, len);
1415 if (res > 0)
1416 break;
1417
1418 /* We want to continue past core files to executables, but not
1419 past a running target's memory. */
1420 if (ops->to_has_all_memory (ops))
1421 break;
1422
1423 ops = ops->beneath;
1424 }
1425 while (ops != NULL);
1426
1427 return res;
1428 }
1429
1430 /* Perform a partial memory transfer.
1431 For docs see target.h, to_xfer_partial. */
1432
1433 static LONGEST
1434 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1435 void *readbuf, const void *writebuf, ULONGEST memaddr,
1436 LONGEST len)
1437 {
1438 LONGEST res;
1439 int reg_len;
1440 struct mem_region *region;
1441 struct inferior *inf;
1442
1443 /* For accesses to unmapped overlay sections, read directly from
1444 files. Must do this first, as MEMADDR may need adjustment. */
1445 if (readbuf != NULL && overlay_debugging)
1446 {
1447 struct obj_section *section = find_pc_overlay (memaddr);
1448
1449 if (pc_in_unmapped_range (memaddr, section))
1450 {
1451 struct target_section_table *table
1452 = target_get_section_table (ops);
1453 const char *section_name = section->the_bfd_section->name;
1454
1455 memaddr = overlay_mapped_address (memaddr, section);
1456 return section_table_xfer_memory_partial (readbuf, writebuf,
1457 memaddr, len,
1458 table->sections,
1459 table->sections_end,
1460 section_name);
1461 }
1462 }
1463
1464 /* Try the executable files, if "trust-readonly-sections" is set. */
1465 if (readbuf != NULL && trust_readonly)
1466 {
1467 struct target_section *secp;
1468 struct target_section_table *table;
1469
1470 secp = target_section_by_addr (ops, memaddr);
1471 if (secp != NULL
1472 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1473 secp->the_bfd_section)
1474 & SEC_READONLY))
1475 {
1476 table = target_get_section_table (ops);
1477 return section_table_xfer_memory_partial (readbuf, writebuf,
1478 memaddr, len,
1479 table->sections,
1480 table->sections_end,
1481 NULL);
1482 }
1483 }
1484
1485 /* If reading unavailable memory in the context of traceframes, and
1486 this address falls within a read-only section, fallback to
1487 reading from live memory. */
1488 if (readbuf != NULL && get_traceframe_number () != -1)
1489 {
1490 VEC(mem_range_s) *available;
1491
1492 /* If we fail to get the set of available memory, then the
1493 target does not support querying traceframe info, and so we
1494 attempt reading from the traceframe anyway (assuming the
1495 target implements the old QTro packet then). */
1496 if (traceframe_available_memory (&available, memaddr, len))
1497 {
1498 struct cleanup *old_chain;
1499
1500 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1501
1502 if (VEC_empty (mem_range_s, available)
1503 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1504 {
1505 /* Don't read into the traceframe's available
1506 memory. */
1507 if (!VEC_empty (mem_range_s, available))
1508 {
1509 LONGEST oldlen = len;
1510
1511 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1512 gdb_assert (len <= oldlen);
1513 }
1514
1515 do_cleanups (old_chain);
1516
1517 /* This goes through the topmost target again. */
1518 res = memory_xfer_live_readonly_partial (ops, object,
1519 readbuf, memaddr, len);
1520 if (res > 0)
1521 return res;
1522
1523 /* No use trying further, we know some memory starting
1524 at MEMADDR isn't available. */
1525 return TARGET_XFER_E_UNAVAILABLE;
1526 }
1527
1528 /* Don't try to read more than how much is available, in
1529 case the target implements the deprecated QTro packet to
1530 cater for older GDBs (the target's knowledge of read-only
1531 sections may be outdated by now). */
1532 len = VEC_index (mem_range_s, available, 0)->length;
1533
1534 do_cleanups (old_chain);
1535 }
1536 }
1537
1538 /* Try GDB's internal data cache. */
1539 region = lookup_mem_region (memaddr);
1540 /* region->hi == 0 means there's no upper bound. */
1541 if (memaddr + len < region->hi || region->hi == 0)
1542 reg_len = len;
1543 else
1544 reg_len = region->hi - memaddr;
1545
1546 switch (region->attrib.mode)
1547 {
1548 case MEM_RO:
1549 if (writebuf != NULL)
1550 return -1;
1551 break;
1552
1553 case MEM_WO:
1554 if (readbuf != NULL)
1555 return -1;
1556 break;
1557
1558 case MEM_FLASH:
1559 /* We only support writing to flash during "load" for now. */
1560 if (writebuf != NULL)
1561 error (_("Writing to flash memory forbidden in this context"));
1562 break;
1563
1564 case MEM_NONE:
1565 return -1;
1566 }
1567
1568 if (!ptid_equal (inferior_ptid, null_ptid))
1569 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1570 else
1571 inf = NULL;
1572
1573 if (inf != NULL
1574 /* The dcache reads whole cache lines; that doesn't play well
1575 with reading from a trace buffer, because reading outside of
1576 the collected memory range fails. */
1577 && get_traceframe_number () == -1
1578 && (region->attrib.cache
1579 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1580 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1581 {
1582 DCACHE *dcache = target_dcache_get_or_init ();
1583
1584 if (readbuf != NULL)
1585 res = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1586 else
1587 /* FIXME drow/2006-08-09: If we're going to preserve const
1588 correctness dcache_xfer_memory should take readbuf and
1589 writebuf. */
1590 res = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1591 reg_len, 1);
1592 if (res <= 0)
1593 return -1;
1594 else
1595 return res;
1596 }
1597
1598 /* If none of those methods found the memory we wanted, fall back
1599 to a target partial transfer. Normally a single call to
1600 to_xfer_partial is enough; if it doesn't recognize an object
1601 it will call the to_xfer_partial of the next target down.
1602 But for memory this won't do. Memory is the only target
1603 object which can be read from more than one valid target. */
1604 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len);
1605
1606 /* Make sure the cache gets updated no matter what - if we are writing
1607 to the stack. Even if this write is not tagged as such, we still need
1608 to update the cache. */
1609
1610 if (res > 0
1611 && inf != NULL
1612 && writebuf != NULL
1613 && target_dcache_init_p ()
1614 && !region->attrib.cache
1615 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1616 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1617 {
1618 DCACHE *dcache = target_dcache_get ();
1619
1620 dcache_update (dcache, memaddr, (void *) writebuf, res);
1621 }
1622
1623 /* If we still haven't got anything, return the last error. We
1624 give up. */
1625 return res;
1626 }
1627
1628 /* Perform a partial memory transfer. For docs see target.h,
1629 to_xfer_partial. */
1630
1631 static LONGEST
1632 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1633 void *readbuf, const void *writebuf, ULONGEST memaddr,
1634 LONGEST len)
1635 {
1636 int res;
1637
1638 /* Zero length requests are ok and require no work. */
1639 if (len == 0)
1640 return 0;
1641
1642 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1643 breakpoint insns, thus hiding out from higher layers whether
1644 there are software breakpoints inserted in the code stream. */
1645 if (readbuf != NULL)
1646 {
1647 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1648
1649 if (res > 0 && !show_memory_breakpoints)
1650 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1651 }
1652 else
1653 {
1654 void *buf;
1655 struct cleanup *old_chain;
1656
1657 /* A large write request is likely to be partially satisfied
1658 by memory_xfer_partial_1. We will continually malloc
1659 and free a copy of the entire write request for breakpoint
1660 shadow handling even though we only end up writing a small
1661 subset of it. Cap writes to 4KB to mitigate this. */
1662 len = min (4096, len);
1663
1664 buf = xmalloc (len);
1665 old_chain = make_cleanup (xfree, buf);
1666 memcpy (buf, writebuf, len);
1667
1668 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1669 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1670
1671 do_cleanups (old_chain);
1672 }
1673
1674 return res;
1675 }
1676
1677 static void
1678 restore_show_memory_breakpoints (void *arg)
1679 {
1680 show_memory_breakpoints = (uintptr_t) arg;
1681 }
1682
1683 struct cleanup *
1684 make_show_memory_breakpoints_cleanup (int show)
1685 {
1686 int current = show_memory_breakpoints;
1687
1688 show_memory_breakpoints = show;
1689 return make_cleanup (restore_show_memory_breakpoints,
1690 (void *) (uintptr_t) current);
1691 }
1692
1693 /* For docs see target.h, to_xfer_partial. */
1694
1695 LONGEST
1696 target_xfer_partial (struct target_ops *ops,
1697 enum target_object object, const char *annex,
1698 void *readbuf, const void *writebuf,
1699 ULONGEST offset, LONGEST len)
1700 {
1701 LONGEST retval;
1702
1703 gdb_assert (ops->to_xfer_partial != NULL);
1704
1705 if (writebuf && !may_write_memory)
1706 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1707 core_addr_to_string_nz (offset), plongest (len));
1708
1709 /* If this is a memory transfer, let the memory-specific code
1710 have a look at it instead. Memory transfers are more
1711 complicated. */
1712 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1713 || object == TARGET_OBJECT_CODE_MEMORY)
1714 retval = memory_xfer_partial (ops, object, readbuf,
1715 writebuf, offset, len);
1716 else if (object == TARGET_OBJECT_RAW_MEMORY)
1717 {
1718 /* Request the normal memory object from other layers. */
1719 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1720 }
1721 else
1722 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1723 writebuf, offset, len);
1724
1725 if (targetdebug)
1726 {
1727 const unsigned char *myaddr = NULL;
1728
1729 fprintf_unfiltered (gdb_stdlog,
1730 "%s:target_xfer_partial "
1731 "(%d, %s, %s, %s, %s, %s) = %s",
1732 ops->to_shortname,
1733 (int) object,
1734 (annex ? annex : "(null)"),
1735 host_address_to_string (readbuf),
1736 host_address_to_string (writebuf),
1737 core_addr_to_string_nz (offset),
1738 plongest (len), plongest (retval));
1739
1740 if (readbuf)
1741 myaddr = readbuf;
1742 if (writebuf)
1743 myaddr = writebuf;
1744 if (retval > 0 && myaddr != NULL)
1745 {
1746 int i;
1747
1748 fputs_unfiltered (", bytes =", gdb_stdlog);
1749 for (i = 0; i < retval; i++)
1750 {
1751 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1752 {
1753 if (targetdebug < 2 && i > 0)
1754 {
1755 fprintf_unfiltered (gdb_stdlog, " ...");
1756 break;
1757 }
1758 fprintf_unfiltered (gdb_stdlog, "\n");
1759 }
1760
1761 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1762 }
1763 }
1764
1765 fputc_unfiltered ('\n', gdb_stdlog);
1766 }
1767 return retval;
1768 }
1769
1770 /* Read LEN bytes of target memory at address MEMADDR, placing the
1771 results in GDB's memory at MYADDR. Returns either 0 for success or
1772 a target_xfer_error value if any error occurs.
1773
1774 If an error occurs, no guarantee is made about the contents of the data at
1775 MYADDR. In particular, the caller should not depend upon partial reads
1776 filling the buffer with good data. There is no way for the caller to know
1777 how much good data might have been transfered anyway. Callers that can
1778 deal with partial reads should call target_read (which will retry until
1779 it makes no progress, and then return how much was transferred). */
1780
1781 int
1782 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1783 {
1784 /* Dispatch to the topmost target, not the flattened current_target.
1785 Memory accesses check target->to_has_(all_)memory, and the
1786 flattened target doesn't inherit those. */
1787 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1788 myaddr, memaddr, len) == len)
1789 return 0;
1790 else
1791 return TARGET_XFER_E_IO;
1792 }
1793
1794 /* Like target_read_memory, but specify explicitly that this is a read
1795 from the target's raw memory. That is, this read bypasses the
1796 dcache, breakpoint shadowing, etc. */
1797
1798 int
1799 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1800 {
1801 /* See comment in target_read_memory about why the request starts at
1802 current_target.beneath. */
1803 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1804 myaddr, memaddr, len) == len)
1805 return 0;
1806 else
1807 return TARGET_XFER_E_IO;
1808 }
1809
1810 /* Like target_read_memory, but specify explicitly that this is a read from
1811 the target's stack. This may trigger different cache behavior. */
1812
1813 int
1814 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1815 {
1816 /* See comment in target_read_memory about why the request starts at
1817 current_target.beneath. */
1818 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1819 myaddr, memaddr, len) == len)
1820 return 0;
1821 else
1822 return TARGET_XFER_E_IO;
1823 }
1824
1825 /* Like target_read_memory, but specify explicitly that this is a read from
1826 the target's code. This may trigger different cache behavior. */
1827
1828 int
1829 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1830 {
1831 /* See comment in target_read_memory about why the request starts at
1832 current_target.beneath. */
1833 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1834 myaddr, memaddr, len) == len)
1835 return 0;
1836 else
1837 return TARGET_XFER_E_IO;
1838 }
1839
1840 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1841 Returns either 0 for success or a target_xfer_error value if any
1842 error occurs. If an error occurs, no guarantee is made about how
1843 much data got written. Callers that can deal with partial writes
1844 should call target_write. */
1845
1846 int
1847 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1848 {
1849 /* See comment in target_read_memory about why the request starts at
1850 current_target.beneath. */
1851 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1852 myaddr, memaddr, len) == len)
1853 return 0;
1854 else
1855 return TARGET_XFER_E_IO;
1856 }
1857
1858 /* Write LEN bytes from MYADDR to target raw memory at address
1859 MEMADDR. Returns either 0 for success or a target_xfer_error value
1860 if any error occurs. If an error occurs, no guarantee is made
1861 about how much data got written. Callers that can deal with
1862 partial writes should call target_write. */
1863
1864 int
1865 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1866 {
1867 /* See comment in target_read_memory about why the request starts at
1868 current_target.beneath. */
1869 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1870 myaddr, memaddr, len) == len)
1871 return 0;
1872 else
1873 return TARGET_XFER_E_IO;
1874 }
1875
1876 /* Fetch the target's memory map. */
1877
1878 VEC(mem_region_s) *
1879 target_memory_map (void)
1880 {
1881 VEC(mem_region_s) *result;
1882 struct mem_region *last_one, *this_one;
1883 int ix;
1884 struct target_ops *t;
1885
1886 if (targetdebug)
1887 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1888
1889 for (t = current_target.beneath; t != NULL; t = t->beneath)
1890 if (t->to_memory_map != NULL)
1891 break;
1892
1893 if (t == NULL)
1894 return NULL;
1895
1896 result = t->to_memory_map (t);
1897 if (result == NULL)
1898 return NULL;
1899
1900 qsort (VEC_address (mem_region_s, result),
1901 VEC_length (mem_region_s, result),
1902 sizeof (struct mem_region), mem_region_cmp);
1903
1904 /* Check that regions do not overlap. Simultaneously assign
1905 a numbering for the "mem" commands to use to refer to
1906 each region. */
1907 last_one = NULL;
1908 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1909 {
1910 this_one->number = ix;
1911
1912 if (last_one && last_one->hi > this_one->lo)
1913 {
1914 warning (_("Overlapping regions in memory map: ignoring"));
1915 VEC_free (mem_region_s, result);
1916 return NULL;
1917 }
1918 last_one = this_one;
1919 }
1920
1921 return result;
1922 }
1923
1924 void
1925 target_flash_erase (ULONGEST address, LONGEST length)
1926 {
1927 struct target_ops *t;
1928
1929 for (t = current_target.beneath; t != NULL; t = t->beneath)
1930 if (t->to_flash_erase != NULL)
1931 {
1932 if (targetdebug)
1933 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1934 hex_string (address), phex (length, 0));
1935 t->to_flash_erase (t, address, length);
1936 return;
1937 }
1938
1939 tcomplain ();
1940 }
1941
1942 void
1943 target_flash_done (void)
1944 {
1945 struct target_ops *t;
1946
1947 for (t = current_target.beneath; t != NULL; t = t->beneath)
1948 if (t->to_flash_done != NULL)
1949 {
1950 if (targetdebug)
1951 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1952 t->to_flash_done (t);
1953 return;
1954 }
1955
1956 tcomplain ();
1957 }
1958
1959 static void
1960 show_trust_readonly (struct ui_file *file, int from_tty,
1961 struct cmd_list_element *c, const char *value)
1962 {
1963 fprintf_filtered (file,
1964 _("Mode for reading from readonly sections is %s.\n"),
1965 value);
1966 }
1967
1968 /* More generic transfers. */
1969
1970 static LONGEST
1971 default_xfer_partial (struct target_ops *ops, enum target_object object,
1972 const char *annex, gdb_byte *readbuf,
1973 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1974 {
1975 if (object == TARGET_OBJECT_MEMORY
1976 && ops->deprecated_xfer_memory != NULL)
1977 /* If available, fall back to the target's
1978 "deprecated_xfer_memory" method. */
1979 {
1980 int xfered = -1;
1981
1982 errno = 0;
1983 if (writebuf != NULL)
1984 {
1985 void *buffer = xmalloc (len);
1986 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1987
1988 memcpy (buffer, writebuf, len);
1989 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1990 1/*write*/, NULL, ops);
1991 do_cleanups (cleanup);
1992 }
1993 if (readbuf != NULL)
1994 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1995 0/*read*/, NULL, ops);
1996 if (xfered > 0)
1997 return xfered;
1998 else if (xfered == 0 && errno == 0)
1999 /* "deprecated_xfer_memory" uses 0, cross checked against
2000 ERRNO as one indication of an error. */
2001 return 0;
2002 else
2003 return -1;
2004 }
2005 else if (ops->beneath != NULL)
2006 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2007 readbuf, writebuf, offset, len);
2008 else
2009 return -1;
2010 }
2011
2012 /* The xfer_partial handler for the topmost target. Unlike the default,
2013 it does not need to handle memory specially; it just passes all
2014 requests down the stack. */
2015
2016 static LONGEST
2017 current_xfer_partial (struct target_ops *ops, enum target_object object,
2018 const char *annex, gdb_byte *readbuf,
2019 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
2020 {
2021 if (ops->beneath != NULL)
2022 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2023 readbuf, writebuf, offset, len);
2024 else
2025 return -1;
2026 }
2027
2028 /* Target vector read/write partial wrapper functions. */
2029
2030 static LONGEST
2031 target_read_partial (struct target_ops *ops,
2032 enum target_object object,
2033 const char *annex, gdb_byte *buf,
2034 ULONGEST offset, LONGEST len)
2035 {
2036 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2037 }
2038
2039 static LONGEST
2040 target_write_partial (struct target_ops *ops,
2041 enum target_object object,
2042 const char *annex, const gdb_byte *buf,
2043 ULONGEST offset, LONGEST len)
2044 {
2045 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2046 }
2047
2048 /* Wrappers to perform the full transfer. */
2049
2050 /* For docs on target_read see target.h. */
2051
2052 LONGEST
2053 target_read (struct target_ops *ops,
2054 enum target_object object,
2055 const char *annex, gdb_byte *buf,
2056 ULONGEST offset, LONGEST len)
2057 {
2058 LONGEST xfered = 0;
2059
2060 while (xfered < len)
2061 {
2062 LONGEST xfer = target_read_partial (ops, object, annex,
2063 (gdb_byte *) buf + xfered,
2064 offset + xfered, len - xfered);
2065
2066 /* Call an observer, notifying them of the xfer progress? */
2067 if (xfer == 0)
2068 return xfered;
2069 if (xfer < 0)
2070 return -1;
2071 xfered += xfer;
2072 QUIT;
2073 }
2074 return len;
2075 }
2076
2077 /* Assuming that the entire [begin, end) range of memory cannot be
2078 read, try to read whatever subrange is possible to read.
2079
2080 The function returns, in RESULT, either zero or one memory block.
2081 If there's a readable subrange at the beginning, it is completely
2082 read and returned. Any further readable subrange will not be read.
2083 Otherwise, if there's a readable subrange at the end, it will be
2084 completely read and returned. Any readable subranges before it
2085 (obviously, not starting at the beginning), will be ignored. In
2086 other cases -- either no readable subrange, or readable subrange(s)
2087 that is neither at the beginning, or end, nothing is returned.
2088
2089 The purpose of this function is to handle a read across a boundary
2090 of accessible memory in a case when memory map is not available.
2091 The above restrictions are fine for this case, but will give
2092 incorrect results if the memory is 'patchy'. However, supporting
2093 'patchy' memory would require trying to read every single byte,
2094 and it seems unacceptable solution. Explicit memory map is
2095 recommended for this case -- and target_read_memory_robust will
2096 take care of reading multiple ranges then. */
2097
2098 static void
2099 read_whatever_is_readable (struct target_ops *ops,
2100 ULONGEST begin, ULONGEST end,
2101 VEC(memory_read_result_s) **result)
2102 {
2103 gdb_byte *buf = xmalloc (end - begin);
2104 ULONGEST current_begin = begin;
2105 ULONGEST current_end = end;
2106 int forward;
2107 memory_read_result_s r;
2108
2109 /* If we previously failed to read 1 byte, nothing can be done here. */
2110 if (end - begin <= 1)
2111 {
2112 xfree (buf);
2113 return;
2114 }
2115
2116 /* Check that either first or the last byte is readable, and give up
2117 if not. This heuristic is meant to permit reading accessible memory
2118 at the boundary of accessible region. */
2119 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2120 buf, begin, 1) == 1)
2121 {
2122 forward = 1;
2123 ++current_begin;
2124 }
2125 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2126 buf + (end-begin) - 1, end - 1, 1) == 1)
2127 {
2128 forward = 0;
2129 --current_end;
2130 }
2131 else
2132 {
2133 xfree (buf);
2134 return;
2135 }
2136
2137 /* Loop invariant is that the [current_begin, current_end) was previously
2138 found to be not readable as a whole.
2139
2140 Note loop condition -- if the range has 1 byte, we can't divide the range
2141 so there's no point trying further. */
2142 while (current_end - current_begin > 1)
2143 {
2144 ULONGEST first_half_begin, first_half_end;
2145 ULONGEST second_half_begin, second_half_end;
2146 LONGEST xfer;
2147 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2148
2149 if (forward)
2150 {
2151 first_half_begin = current_begin;
2152 first_half_end = middle;
2153 second_half_begin = middle;
2154 second_half_end = current_end;
2155 }
2156 else
2157 {
2158 first_half_begin = middle;
2159 first_half_end = current_end;
2160 second_half_begin = current_begin;
2161 second_half_end = middle;
2162 }
2163
2164 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2165 buf + (first_half_begin - begin),
2166 first_half_begin,
2167 first_half_end - first_half_begin);
2168
2169 if (xfer == first_half_end - first_half_begin)
2170 {
2171 /* This half reads up fine. So, the error must be in the
2172 other half. */
2173 current_begin = second_half_begin;
2174 current_end = second_half_end;
2175 }
2176 else
2177 {
2178 /* This half is not readable. Because we've tried one byte, we
2179 know some part of this half if actually redable. Go to the next
2180 iteration to divide again and try to read.
2181
2182 We don't handle the other half, because this function only tries
2183 to read a single readable subrange. */
2184 current_begin = first_half_begin;
2185 current_end = first_half_end;
2186 }
2187 }
2188
2189 if (forward)
2190 {
2191 /* The [begin, current_begin) range has been read. */
2192 r.begin = begin;
2193 r.end = current_begin;
2194 r.data = buf;
2195 }
2196 else
2197 {
2198 /* The [current_end, end) range has been read. */
2199 LONGEST rlen = end - current_end;
2200
2201 r.data = xmalloc (rlen);
2202 memcpy (r.data, buf + current_end - begin, rlen);
2203 r.begin = current_end;
2204 r.end = end;
2205 xfree (buf);
2206 }
2207 VEC_safe_push(memory_read_result_s, (*result), &r);
2208 }
2209
2210 void
2211 free_memory_read_result_vector (void *x)
2212 {
2213 VEC(memory_read_result_s) *v = x;
2214 memory_read_result_s *current;
2215 int ix;
2216
2217 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2218 {
2219 xfree (current->data);
2220 }
2221 VEC_free (memory_read_result_s, v);
2222 }
2223
2224 VEC(memory_read_result_s) *
2225 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2226 {
2227 VEC(memory_read_result_s) *result = 0;
2228
2229 LONGEST xfered = 0;
2230 while (xfered < len)
2231 {
2232 struct mem_region *region = lookup_mem_region (offset + xfered);
2233 LONGEST rlen;
2234
2235 /* If there is no explicit region, a fake one should be created. */
2236 gdb_assert (region);
2237
2238 if (region->hi == 0)
2239 rlen = len - xfered;
2240 else
2241 rlen = region->hi - offset;
2242
2243 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2244 {
2245 /* Cannot read this region. Note that we can end up here only
2246 if the region is explicitly marked inaccessible, or
2247 'inaccessible-by-default' is in effect. */
2248 xfered += rlen;
2249 }
2250 else
2251 {
2252 LONGEST to_read = min (len - xfered, rlen);
2253 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2254
2255 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2256 (gdb_byte *) buffer,
2257 offset + xfered, to_read);
2258 /* Call an observer, notifying them of the xfer progress? */
2259 if (xfer <= 0)
2260 {
2261 /* Got an error reading full chunk. See if maybe we can read
2262 some subrange. */
2263 xfree (buffer);
2264 read_whatever_is_readable (ops, offset + xfered,
2265 offset + xfered + to_read, &result);
2266 xfered += to_read;
2267 }
2268 else
2269 {
2270 struct memory_read_result r;
2271 r.data = buffer;
2272 r.begin = offset + xfered;
2273 r.end = r.begin + xfer;
2274 VEC_safe_push (memory_read_result_s, result, &r);
2275 xfered += xfer;
2276 }
2277 QUIT;
2278 }
2279 }
2280 return result;
2281 }
2282
2283
2284 /* An alternative to target_write with progress callbacks. */
2285
2286 LONGEST
2287 target_write_with_progress (struct target_ops *ops,
2288 enum target_object object,
2289 const char *annex, const gdb_byte *buf,
2290 ULONGEST offset, LONGEST len,
2291 void (*progress) (ULONGEST, void *), void *baton)
2292 {
2293 LONGEST xfered = 0;
2294
2295 /* Give the progress callback a chance to set up. */
2296 if (progress)
2297 (*progress) (0, baton);
2298
2299 while (xfered < len)
2300 {
2301 LONGEST xfer = target_write_partial (ops, object, annex,
2302 (gdb_byte *) buf + xfered,
2303 offset + xfered, len - xfered);
2304
2305 if (xfer == 0)
2306 return xfered;
2307 if (xfer < 0)
2308 return -1;
2309
2310 if (progress)
2311 (*progress) (xfer, baton);
2312
2313 xfered += xfer;
2314 QUIT;
2315 }
2316 return len;
2317 }
2318
2319 /* For docs on target_write see target.h. */
2320
2321 LONGEST
2322 target_write (struct target_ops *ops,
2323 enum target_object object,
2324 const char *annex, const gdb_byte *buf,
2325 ULONGEST offset, LONGEST len)
2326 {
2327 return target_write_with_progress (ops, object, annex, buf, offset, len,
2328 NULL, NULL);
2329 }
2330
2331 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2332 the size of the transferred data. PADDING additional bytes are
2333 available in *BUF_P. This is a helper function for
2334 target_read_alloc; see the declaration of that function for more
2335 information. */
2336
2337 static LONGEST
2338 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2339 const char *annex, gdb_byte **buf_p, int padding)
2340 {
2341 size_t buf_alloc, buf_pos;
2342 gdb_byte *buf;
2343 LONGEST n;
2344
2345 /* This function does not have a length parameter; it reads the
2346 entire OBJECT). Also, it doesn't support objects fetched partly
2347 from one target and partly from another (in a different stratum,
2348 e.g. a core file and an executable). Both reasons make it
2349 unsuitable for reading memory. */
2350 gdb_assert (object != TARGET_OBJECT_MEMORY);
2351
2352 /* Start by reading up to 4K at a time. The target will throttle
2353 this number down if necessary. */
2354 buf_alloc = 4096;
2355 buf = xmalloc (buf_alloc);
2356 buf_pos = 0;
2357 while (1)
2358 {
2359 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2360 buf_pos, buf_alloc - buf_pos - padding);
2361 if (n < 0)
2362 {
2363 /* An error occurred. */
2364 xfree (buf);
2365 return -1;
2366 }
2367 else if (n == 0)
2368 {
2369 /* Read all there was. */
2370 if (buf_pos == 0)
2371 xfree (buf);
2372 else
2373 *buf_p = buf;
2374 return buf_pos;
2375 }
2376
2377 buf_pos += n;
2378
2379 /* If the buffer is filling up, expand it. */
2380 if (buf_alloc < buf_pos * 2)
2381 {
2382 buf_alloc *= 2;
2383 buf = xrealloc (buf, buf_alloc);
2384 }
2385
2386 QUIT;
2387 }
2388 }
2389
2390 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2391 the size of the transferred data. See the declaration in "target.h"
2392 function for more information about the return value. */
2393
2394 LONGEST
2395 target_read_alloc (struct target_ops *ops, enum target_object object,
2396 const char *annex, gdb_byte **buf_p)
2397 {
2398 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2399 }
2400
2401 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2402 returned as a string, allocated using xmalloc. If an error occurs
2403 or the transfer is unsupported, NULL is returned. Empty objects
2404 are returned as allocated but empty strings. A warning is issued
2405 if the result contains any embedded NUL bytes. */
2406
2407 char *
2408 target_read_stralloc (struct target_ops *ops, enum target_object object,
2409 const char *annex)
2410 {
2411 gdb_byte *buffer;
2412 char *bufstr;
2413 LONGEST i, transferred;
2414
2415 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2416 bufstr = (char *) buffer;
2417
2418 if (transferred < 0)
2419 return NULL;
2420
2421 if (transferred == 0)
2422 return xstrdup ("");
2423
2424 bufstr[transferred] = 0;
2425
2426 /* Check for embedded NUL bytes; but allow trailing NULs. */
2427 for (i = strlen (bufstr); i < transferred; i++)
2428 if (bufstr[i] != 0)
2429 {
2430 warning (_("target object %d, annex %s, "
2431 "contained unexpected null characters"),
2432 (int) object, annex ? annex : "(none)");
2433 break;
2434 }
2435
2436 return bufstr;
2437 }
2438
2439 /* Memory transfer methods. */
2440
2441 void
2442 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2443 LONGEST len)
2444 {
2445 /* This method is used to read from an alternate, non-current
2446 target. This read must bypass the overlay support (as symbols
2447 don't match this target), and GDB's internal cache (wrong cache
2448 for this target). */
2449 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2450 != len)
2451 memory_error (TARGET_XFER_E_IO, addr);
2452 }
2453
2454 ULONGEST
2455 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2456 int len, enum bfd_endian byte_order)
2457 {
2458 gdb_byte buf[sizeof (ULONGEST)];
2459
2460 gdb_assert (len <= sizeof (buf));
2461 get_target_memory (ops, addr, buf, len);
2462 return extract_unsigned_integer (buf, len, byte_order);
2463 }
2464
2465 int
2466 target_insert_breakpoint (struct gdbarch *gdbarch,
2467 struct bp_target_info *bp_tgt)
2468 {
2469 if (!may_insert_breakpoints)
2470 {
2471 warning (_("May not insert breakpoints"));
2472 return 1;
2473 }
2474
2475 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2476 }
2477
2478 int
2479 target_remove_breakpoint (struct gdbarch *gdbarch,
2480 struct bp_target_info *bp_tgt)
2481 {
2482 /* This is kind of a weird case to handle, but the permission might
2483 have been changed after breakpoints were inserted - in which case
2484 we should just take the user literally and assume that any
2485 breakpoints should be left in place. */
2486 if (!may_insert_breakpoints)
2487 {
2488 warning (_("May not remove breakpoints"));
2489 return 1;
2490 }
2491
2492 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2493 }
2494
2495 static void
2496 target_info (char *args, int from_tty)
2497 {
2498 struct target_ops *t;
2499 int has_all_mem = 0;
2500
2501 if (symfile_objfile != NULL)
2502 printf_unfiltered (_("Symbols from \"%s\".\n"),
2503 objfile_name (symfile_objfile));
2504
2505 for (t = target_stack; t != NULL; t = t->beneath)
2506 {
2507 if (!(*t->to_has_memory) (t))
2508 continue;
2509
2510 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2511 continue;
2512 if (has_all_mem)
2513 printf_unfiltered (_("\tWhile running this, "
2514 "GDB does not access memory from...\n"));
2515 printf_unfiltered ("%s:\n", t->to_longname);
2516 (t->to_files_info) (t);
2517 has_all_mem = (*t->to_has_all_memory) (t);
2518 }
2519 }
2520
2521 /* This function is called before any new inferior is created, e.g.
2522 by running a program, attaching, or connecting to a target.
2523 It cleans up any state from previous invocations which might
2524 change between runs. This is a subset of what target_preopen
2525 resets (things which might change between targets). */
2526
2527 void
2528 target_pre_inferior (int from_tty)
2529 {
2530 /* Clear out solib state. Otherwise the solib state of the previous
2531 inferior might have survived and is entirely wrong for the new
2532 target. This has been observed on GNU/Linux using glibc 2.3. How
2533 to reproduce:
2534
2535 bash$ ./foo&
2536 [1] 4711
2537 bash$ ./foo&
2538 [1] 4712
2539 bash$ gdb ./foo
2540 [...]
2541 (gdb) attach 4711
2542 (gdb) detach
2543 (gdb) attach 4712
2544 Cannot access memory at address 0xdeadbeef
2545 */
2546
2547 /* In some OSs, the shared library list is the same/global/shared
2548 across inferiors. If code is shared between processes, so are
2549 memory regions and features. */
2550 if (!gdbarch_has_global_solist (target_gdbarch ()))
2551 {
2552 no_shared_libraries (NULL, from_tty);
2553
2554 invalidate_target_mem_regions ();
2555
2556 target_clear_description ();
2557 }
2558
2559 agent_capability_invalidate ();
2560 }
2561
2562 /* Callback for iterate_over_inferiors. Gets rid of the given
2563 inferior. */
2564
2565 static int
2566 dispose_inferior (struct inferior *inf, void *args)
2567 {
2568 struct thread_info *thread;
2569
2570 thread = any_thread_of_process (inf->pid);
2571 if (thread)
2572 {
2573 switch_to_thread (thread->ptid);
2574
2575 /* Core inferiors actually should be detached, not killed. */
2576 if (target_has_execution)
2577 target_kill ();
2578 else
2579 target_detach (NULL, 0);
2580 }
2581
2582 return 0;
2583 }
2584
2585 /* This is to be called by the open routine before it does
2586 anything. */
2587
2588 void
2589 target_preopen (int from_tty)
2590 {
2591 dont_repeat ();
2592
2593 if (have_inferiors ())
2594 {
2595 if (!from_tty
2596 || !have_live_inferiors ()
2597 || query (_("A program is being debugged already. Kill it? ")))
2598 iterate_over_inferiors (dispose_inferior, NULL);
2599 else
2600 error (_("Program not killed."));
2601 }
2602
2603 /* Calling target_kill may remove the target from the stack. But if
2604 it doesn't (which seems like a win for UDI), remove it now. */
2605 /* Leave the exec target, though. The user may be switching from a
2606 live process to a core of the same program. */
2607 pop_all_targets_above (file_stratum);
2608
2609 target_pre_inferior (from_tty);
2610 }
2611
2612 /* Detach a target after doing deferred register stores. */
2613
2614 void
2615 target_detach (const char *args, int from_tty)
2616 {
2617 struct target_ops* t;
2618
2619 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2620 /* Don't remove global breakpoints here. They're removed on
2621 disconnection from the target. */
2622 ;
2623 else
2624 /* If we're in breakpoints-always-inserted mode, have to remove
2625 them before detaching. */
2626 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2627
2628 prepare_for_detach ();
2629
2630 for (t = current_target.beneath; t != NULL; t = t->beneath)
2631 {
2632 if (t->to_detach != NULL)
2633 {
2634 t->to_detach (t, args, from_tty);
2635 if (targetdebug)
2636 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2637 args, from_tty);
2638 return;
2639 }
2640 }
2641
2642 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2643 }
2644
2645 void
2646 target_disconnect (char *args, int from_tty)
2647 {
2648 struct target_ops *t;
2649
2650 /* If we're in breakpoints-always-inserted mode or if breakpoints
2651 are global across processes, we have to remove them before
2652 disconnecting. */
2653 remove_breakpoints ();
2654
2655 for (t = current_target.beneath; t != NULL; t = t->beneath)
2656 if (t->to_disconnect != NULL)
2657 {
2658 if (targetdebug)
2659 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2660 args, from_tty);
2661 t->to_disconnect (t, args, from_tty);
2662 return;
2663 }
2664
2665 tcomplain ();
2666 }
2667
2668 ptid_t
2669 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2670 {
2671 struct target_ops *t;
2672
2673 for (t = current_target.beneath; t != NULL; t = t->beneath)
2674 {
2675 if (t->to_wait != NULL)
2676 {
2677 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2678
2679 if (targetdebug)
2680 {
2681 char *status_string;
2682 char *options_string;
2683
2684 status_string = target_waitstatus_to_string (status);
2685 options_string = target_options_to_string (options);
2686 fprintf_unfiltered (gdb_stdlog,
2687 "target_wait (%d, status, options={%s})"
2688 " = %d, %s\n",
2689 ptid_get_pid (ptid), options_string,
2690 ptid_get_pid (retval), status_string);
2691 xfree (status_string);
2692 xfree (options_string);
2693 }
2694
2695 return retval;
2696 }
2697 }
2698
2699 noprocess ();
2700 }
2701
2702 char *
2703 target_pid_to_str (ptid_t ptid)
2704 {
2705 struct target_ops *t;
2706
2707 for (t = current_target.beneath; t != NULL; t = t->beneath)
2708 {
2709 if (t->to_pid_to_str != NULL)
2710 return (*t->to_pid_to_str) (t, ptid);
2711 }
2712
2713 return normal_pid_to_str (ptid);
2714 }
2715
2716 char *
2717 target_thread_name (struct thread_info *info)
2718 {
2719 struct target_ops *t;
2720
2721 for (t = current_target.beneath; t != NULL; t = t->beneath)
2722 {
2723 if (t->to_thread_name != NULL)
2724 return (*t->to_thread_name) (info);
2725 }
2726
2727 return NULL;
2728 }
2729
2730 void
2731 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2732 {
2733 struct target_ops *t;
2734
2735 target_dcache_invalidate ();
2736
2737 for (t = current_target.beneath; t != NULL; t = t->beneath)
2738 {
2739 if (t->to_resume != NULL)
2740 {
2741 t->to_resume (t, ptid, step, signal);
2742 if (targetdebug)
2743 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2744 ptid_get_pid (ptid),
2745 step ? "step" : "continue",
2746 gdb_signal_to_name (signal));
2747
2748 registers_changed_ptid (ptid);
2749 set_executing (ptid, 1);
2750 set_running (ptid, 1);
2751 clear_inline_frame_state (ptid);
2752 return;
2753 }
2754 }
2755
2756 noprocess ();
2757 }
2758
2759 void
2760 target_pass_signals (int numsigs, unsigned char *pass_signals)
2761 {
2762 struct target_ops *t;
2763
2764 for (t = current_target.beneath; t != NULL; t = t->beneath)
2765 {
2766 if (t->to_pass_signals != NULL)
2767 {
2768 if (targetdebug)
2769 {
2770 int i;
2771
2772 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2773 numsigs);
2774
2775 for (i = 0; i < numsigs; i++)
2776 if (pass_signals[i])
2777 fprintf_unfiltered (gdb_stdlog, " %s",
2778 gdb_signal_to_name (i));
2779
2780 fprintf_unfiltered (gdb_stdlog, " })\n");
2781 }
2782
2783 (*t->to_pass_signals) (numsigs, pass_signals);
2784 return;
2785 }
2786 }
2787 }
2788
2789 void
2790 target_program_signals (int numsigs, unsigned char *program_signals)
2791 {
2792 struct target_ops *t;
2793
2794 for (t = current_target.beneath; t != NULL; t = t->beneath)
2795 {
2796 if (t->to_program_signals != NULL)
2797 {
2798 if (targetdebug)
2799 {
2800 int i;
2801
2802 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2803 numsigs);
2804
2805 for (i = 0; i < numsigs; i++)
2806 if (program_signals[i])
2807 fprintf_unfiltered (gdb_stdlog, " %s",
2808 gdb_signal_to_name (i));
2809
2810 fprintf_unfiltered (gdb_stdlog, " })\n");
2811 }
2812
2813 (*t->to_program_signals) (numsigs, program_signals);
2814 return;
2815 }
2816 }
2817 }
2818
2819 /* Look through the list of possible targets for a target that can
2820 follow forks. */
2821
2822 int
2823 target_follow_fork (int follow_child, int detach_fork)
2824 {
2825 struct target_ops *t;
2826
2827 for (t = current_target.beneath; t != NULL; t = t->beneath)
2828 {
2829 if (t->to_follow_fork != NULL)
2830 {
2831 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2832
2833 if (targetdebug)
2834 fprintf_unfiltered (gdb_stdlog,
2835 "target_follow_fork (%d, %d) = %d\n",
2836 follow_child, detach_fork, retval);
2837 return retval;
2838 }
2839 }
2840
2841 /* Some target returned a fork event, but did not know how to follow it. */
2842 internal_error (__FILE__, __LINE__,
2843 _("could not find a target to follow fork"));
2844 }
2845
2846 void
2847 target_mourn_inferior (void)
2848 {
2849 struct target_ops *t;
2850
2851 for (t = current_target.beneath; t != NULL; t = t->beneath)
2852 {
2853 if (t->to_mourn_inferior != NULL)
2854 {
2855 t->to_mourn_inferior (t);
2856 if (targetdebug)
2857 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2858
2859 /* We no longer need to keep handles on any of the object files.
2860 Make sure to release them to avoid unnecessarily locking any
2861 of them while we're not actually debugging. */
2862 bfd_cache_close_all ();
2863
2864 return;
2865 }
2866 }
2867
2868 internal_error (__FILE__, __LINE__,
2869 _("could not find a target to follow mourn inferior"));
2870 }
2871
2872 /* Look for a target which can describe architectural features, starting
2873 from TARGET. If we find one, return its description. */
2874
2875 const struct target_desc *
2876 target_read_description (struct target_ops *target)
2877 {
2878 struct target_ops *t;
2879
2880 for (t = target; t != NULL; t = t->beneath)
2881 if (t->to_read_description != NULL)
2882 {
2883 const struct target_desc *tdesc;
2884
2885 tdesc = t->to_read_description (t);
2886 if (tdesc)
2887 return tdesc;
2888 }
2889
2890 return NULL;
2891 }
2892
2893 /* The default implementation of to_search_memory.
2894 This implements a basic search of memory, reading target memory and
2895 performing the search here (as opposed to performing the search in on the
2896 target side with, for example, gdbserver). */
2897
2898 int
2899 simple_search_memory (struct target_ops *ops,
2900 CORE_ADDR start_addr, ULONGEST search_space_len,
2901 const gdb_byte *pattern, ULONGEST pattern_len,
2902 CORE_ADDR *found_addrp)
2903 {
2904 /* NOTE: also defined in find.c testcase. */
2905 #define SEARCH_CHUNK_SIZE 16000
2906 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2907 /* Buffer to hold memory contents for searching. */
2908 gdb_byte *search_buf;
2909 unsigned search_buf_size;
2910 struct cleanup *old_cleanups;
2911
2912 search_buf_size = chunk_size + pattern_len - 1;
2913
2914 /* No point in trying to allocate a buffer larger than the search space. */
2915 if (search_space_len < search_buf_size)
2916 search_buf_size = search_space_len;
2917
2918 search_buf = malloc (search_buf_size);
2919 if (search_buf == NULL)
2920 error (_("Unable to allocate memory to perform the search."));
2921 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2922
2923 /* Prime the search buffer. */
2924
2925 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2926 search_buf, start_addr, search_buf_size) != search_buf_size)
2927 {
2928 warning (_("Unable to access %s bytes of target "
2929 "memory at %s, halting search."),
2930 pulongest (search_buf_size), hex_string (start_addr));
2931 do_cleanups (old_cleanups);
2932 return -1;
2933 }
2934
2935 /* Perform the search.
2936
2937 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2938 When we've scanned N bytes we copy the trailing bytes to the start and
2939 read in another N bytes. */
2940
2941 while (search_space_len >= pattern_len)
2942 {
2943 gdb_byte *found_ptr;
2944 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2945
2946 found_ptr = memmem (search_buf, nr_search_bytes,
2947 pattern, pattern_len);
2948
2949 if (found_ptr != NULL)
2950 {
2951 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2952
2953 *found_addrp = found_addr;
2954 do_cleanups (old_cleanups);
2955 return 1;
2956 }
2957
2958 /* Not found in this chunk, skip to next chunk. */
2959
2960 /* Don't let search_space_len wrap here, it's unsigned. */
2961 if (search_space_len >= chunk_size)
2962 search_space_len -= chunk_size;
2963 else
2964 search_space_len = 0;
2965
2966 if (search_space_len >= pattern_len)
2967 {
2968 unsigned keep_len = search_buf_size - chunk_size;
2969 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2970 int nr_to_read;
2971
2972 /* Copy the trailing part of the previous iteration to the front
2973 of the buffer for the next iteration. */
2974 gdb_assert (keep_len == pattern_len - 1);
2975 memcpy (search_buf, search_buf + chunk_size, keep_len);
2976
2977 nr_to_read = min (search_space_len - keep_len, chunk_size);
2978
2979 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2980 search_buf + keep_len, read_addr,
2981 nr_to_read) != nr_to_read)
2982 {
2983 warning (_("Unable to access %s bytes of target "
2984 "memory at %s, halting search."),
2985 plongest (nr_to_read),
2986 hex_string (read_addr));
2987 do_cleanups (old_cleanups);
2988 return -1;
2989 }
2990
2991 start_addr += chunk_size;
2992 }
2993 }
2994
2995 /* Not found. */
2996
2997 do_cleanups (old_cleanups);
2998 return 0;
2999 }
3000
3001 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3002 sequence of bytes in PATTERN with length PATTERN_LEN.
3003
3004 The result is 1 if found, 0 if not found, and -1 if there was an error
3005 requiring halting of the search (e.g. memory read error).
3006 If the pattern is found the address is recorded in FOUND_ADDRP. */
3007
3008 int
3009 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3010 const gdb_byte *pattern, ULONGEST pattern_len,
3011 CORE_ADDR *found_addrp)
3012 {
3013 struct target_ops *t;
3014 int found;
3015
3016 /* We don't use INHERIT to set current_target.to_search_memory,
3017 so we have to scan the target stack and handle targetdebug
3018 ourselves. */
3019
3020 if (targetdebug)
3021 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3022 hex_string (start_addr));
3023
3024 for (t = current_target.beneath; t != NULL; t = t->beneath)
3025 if (t->to_search_memory != NULL)
3026 break;
3027
3028 if (t != NULL)
3029 {
3030 found = t->to_search_memory (t, start_addr, search_space_len,
3031 pattern, pattern_len, found_addrp);
3032 }
3033 else
3034 {
3035 /* If a special version of to_search_memory isn't available, use the
3036 simple version. */
3037 found = simple_search_memory (current_target.beneath,
3038 start_addr, search_space_len,
3039 pattern, pattern_len, found_addrp);
3040 }
3041
3042 if (targetdebug)
3043 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3044
3045 return found;
3046 }
3047
3048 /* Look through the currently pushed targets. If none of them will
3049 be able to restart the currently running process, issue an error
3050 message. */
3051
3052 void
3053 target_require_runnable (void)
3054 {
3055 struct target_ops *t;
3056
3057 for (t = target_stack; t != NULL; t = t->beneath)
3058 {
3059 /* If this target knows how to create a new program, then
3060 assume we will still be able to after killing the current
3061 one. Either killing and mourning will not pop T, or else
3062 find_default_run_target will find it again. */
3063 if (t->to_create_inferior != NULL)
3064 return;
3065
3066 /* Do not worry about thread_stratum targets that can not
3067 create inferiors. Assume they will be pushed again if
3068 necessary, and continue to the process_stratum. */
3069 if (t->to_stratum == thread_stratum
3070 || t->to_stratum == arch_stratum)
3071 continue;
3072
3073 error (_("The \"%s\" target does not support \"run\". "
3074 "Try \"help target\" or \"continue\"."),
3075 t->to_shortname);
3076 }
3077
3078 /* This function is only called if the target is running. In that
3079 case there should have been a process_stratum target and it
3080 should either know how to create inferiors, or not... */
3081 internal_error (__FILE__, __LINE__, _("No targets found"));
3082 }
3083
3084 /* Look through the list of possible targets for a target that can
3085 execute a run or attach command without any other data. This is
3086 used to locate the default process stratum.
3087
3088 If DO_MESG is not NULL, the result is always valid (error() is
3089 called for errors); else, return NULL on error. */
3090
3091 static struct target_ops *
3092 find_default_run_target (char *do_mesg)
3093 {
3094 struct target_ops **t;
3095 struct target_ops *runable = NULL;
3096 int count;
3097
3098 count = 0;
3099
3100 for (t = target_structs; t < target_structs + target_struct_size;
3101 ++t)
3102 {
3103 if ((*t)->to_can_run && target_can_run (*t))
3104 {
3105 runable = *t;
3106 ++count;
3107 }
3108 }
3109
3110 if (count != 1)
3111 {
3112 if (do_mesg)
3113 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3114 else
3115 return NULL;
3116 }
3117
3118 return runable;
3119 }
3120
3121 void
3122 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3123 {
3124 struct target_ops *t;
3125
3126 t = find_default_run_target ("attach");
3127 (t->to_attach) (t, args, from_tty);
3128 return;
3129 }
3130
3131 void
3132 find_default_create_inferior (struct target_ops *ops,
3133 char *exec_file, char *allargs, char **env,
3134 int from_tty)
3135 {
3136 struct target_ops *t;
3137
3138 t = find_default_run_target ("run");
3139 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3140 return;
3141 }
3142
3143 static int
3144 find_default_can_async_p (void)
3145 {
3146 struct target_ops *t;
3147
3148 /* This may be called before the target is pushed on the stack;
3149 look for the default process stratum. If there's none, gdb isn't
3150 configured with a native debugger, and target remote isn't
3151 connected yet. */
3152 t = find_default_run_target (NULL);
3153 if (t && t->to_can_async_p)
3154 return (t->to_can_async_p) ();
3155 return 0;
3156 }
3157
3158 static int
3159 find_default_is_async_p (void)
3160 {
3161 struct target_ops *t;
3162
3163 /* This may be called before the target is pushed on the stack;
3164 look for the default process stratum. If there's none, gdb isn't
3165 configured with a native debugger, and target remote isn't
3166 connected yet. */
3167 t = find_default_run_target (NULL);
3168 if (t && t->to_is_async_p)
3169 return (t->to_is_async_p) ();
3170 return 0;
3171 }
3172
3173 static int
3174 find_default_supports_non_stop (void)
3175 {
3176 struct target_ops *t;
3177
3178 t = find_default_run_target (NULL);
3179 if (t && t->to_supports_non_stop)
3180 return (t->to_supports_non_stop) ();
3181 return 0;
3182 }
3183
3184 int
3185 target_supports_non_stop (void)
3186 {
3187 struct target_ops *t;
3188
3189 for (t = &current_target; t != NULL; t = t->beneath)
3190 if (t->to_supports_non_stop)
3191 return t->to_supports_non_stop ();
3192
3193 return 0;
3194 }
3195
3196 /* Implement the "info proc" command. */
3197
3198 int
3199 target_info_proc (char *args, enum info_proc_what what)
3200 {
3201 struct target_ops *t;
3202
3203 /* If we're already connected to something that can get us OS
3204 related data, use it. Otherwise, try using the native
3205 target. */
3206 if (current_target.to_stratum >= process_stratum)
3207 t = current_target.beneath;
3208 else
3209 t = find_default_run_target (NULL);
3210
3211 for (; t != NULL; t = t->beneath)
3212 {
3213 if (t->to_info_proc != NULL)
3214 {
3215 t->to_info_proc (t, args, what);
3216
3217 if (targetdebug)
3218 fprintf_unfiltered (gdb_stdlog,
3219 "target_info_proc (\"%s\", %d)\n", args, what);
3220
3221 return 1;
3222 }
3223 }
3224
3225 return 0;
3226 }
3227
3228 static int
3229 find_default_supports_disable_randomization (void)
3230 {
3231 struct target_ops *t;
3232
3233 t = find_default_run_target (NULL);
3234 if (t && t->to_supports_disable_randomization)
3235 return (t->to_supports_disable_randomization) ();
3236 return 0;
3237 }
3238
3239 int
3240 target_supports_disable_randomization (void)
3241 {
3242 struct target_ops *t;
3243
3244 for (t = &current_target; t != NULL; t = t->beneath)
3245 if (t->to_supports_disable_randomization)
3246 return t->to_supports_disable_randomization ();
3247
3248 return 0;
3249 }
3250
3251 char *
3252 target_get_osdata (const char *type)
3253 {
3254 struct target_ops *t;
3255
3256 /* If we're already connected to something that can get us OS
3257 related data, use it. Otherwise, try using the native
3258 target. */
3259 if (current_target.to_stratum >= process_stratum)
3260 t = current_target.beneath;
3261 else
3262 t = find_default_run_target ("get OS data");
3263
3264 if (!t)
3265 return NULL;
3266
3267 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3268 }
3269
3270 /* Determine the current address space of thread PTID. */
3271
3272 struct address_space *
3273 target_thread_address_space (ptid_t ptid)
3274 {
3275 struct address_space *aspace;
3276 struct inferior *inf;
3277 struct target_ops *t;
3278
3279 for (t = current_target.beneath; t != NULL; t = t->beneath)
3280 {
3281 if (t->to_thread_address_space != NULL)
3282 {
3283 aspace = t->to_thread_address_space (t, ptid);
3284 gdb_assert (aspace);
3285
3286 if (targetdebug)
3287 fprintf_unfiltered (gdb_stdlog,
3288 "target_thread_address_space (%s) = %d\n",
3289 target_pid_to_str (ptid),
3290 address_space_num (aspace));
3291 return aspace;
3292 }
3293 }
3294
3295 /* Fall-back to the "main" address space of the inferior. */
3296 inf = find_inferior_pid (ptid_get_pid (ptid));
3297
3298 if (inf == NULL || inf->aspace == NULL)
3299 internal_error (__FILE__, __LINE__,
3300 _("Can't determine the current "
3301 "address space of thread %s\n"),
3302 target_pid_to_str (ptid));
3303
3304 return inf->aspace;
3305 }
3306
3307
3308 /* Target file operations. */
3309
3310 static struct target_ops *
3311 default_fileio_target (void)
3312 {
3313 /* If we're already connected to something that can perform
3314 file I/O, use it. Otherwise, try using the native target. */
3315 if (current_target.to_stratum >= process_stratum)
3316 return current_target.beneath;
3317 else
3318 return find_default_run_target ("file I/O");
3319 }
3320
3321 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3322 target file descriptor, or -1 if an error occurs (and set
3323 *TARGET_ERRNO). */
3324 int
3325 target_fileio_open (const char *filename, int flags, int mode,
3326 int *target_errno)
3327 {
3328 struct target_ops *t;
3329
3330 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3331 {
3332 if (t->to_fileio_open != NULL)
3333 {
3334 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3335
3336 if (targetdebug)
3337 fprintf_unfiltered (gdb_stdlog,
3338 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3339 filename, flags, mode,
3340 fd, fd != -1 ? 0 : *target_errno);
3341 return fd;
3342 }
3343 }
3344
3345 *target_errno = FILEIO_ENOSYS;
3346 return -1;
3347 }
3348
3349 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3350 Return the number of bytes written, or -1 if an error occurs
3351 (and set *TARGET_ERRNO). */
3352 int
3353 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3354 ULONGEST offset, int *target_errno)
3355 {
3356 struct target_ops *t;
3357
3358 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3359 {
3360 if (t->to_fileio_pwrite != NULL)
3361 {
3362 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3363 target_errno);
3364
3365 if (targetdebug)
3366 fprintf_unfiltered (gdb_stdlog,
3367 "target_fileio_pwrite (%d,...,%d,%s) "
3368 "= %d (%d)\n",
3369 fd, len, pulongest (offset),
3370 ret, ret != -1 ? 0 : *target_errno);
3371 return ret;
3372 }
3373 }
3374
3375 *target_errno = FILEIO_ENOSYS;
3376 return -1;
3377 }
3378
3379 /* Read up to LEN bytes FD on the target into READ_BUF.
3380 Return the number of bytes read, or -1 if an error occurs
3381 (and set *TARGET_ERRNO). */
3382 int
3383 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3384 ULONGEST offset, int *target_errno)
3385 {
3386 struct target_ops *t;
3387
3388 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3389 {
3390 if (t->to_fileio_pread != NULL)
3391 {
3392 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3393 target_errno);
3394
3395 if (targetdebug)
3396 fprintf_unfiltered (gdb_stdlog,
3397 "target_fileio_pread (%d,...,%d,%s) "
3398 "= %d (%d)\n",
3399 fd, len, pulongest (offset),
3400 ret, ret != -1 ? 0 : *target_errno);
3401 return ret;
3402 }
3403 }
3404
3405 *target_errno = FILEIO_ENOSYS;
3406 return -1;
3407 }
3408
3409 /* Close FD on the target. Return 0, or -1 if an error occurs
3410 (and set *TARGET_ERRNO). */
3411 int
3412 target_fileio_close (int fd, int *target_errno)
3413 {
3414 struct target_ops *t;
3415
3416 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3417 {
3418 if (t->to_fileio_close != NULL)
3419 {
3420 int ret = t->to_fileio_close (fd, target_errno);
3421
3422 if (targetdebug)
3423 fprintf_unfiltered (gdb_stdlog,
3424 "target_fileio_close (%d) = %d (%d)\n",
3425 fd, ret, ret != -1 ? 0 : *target_errno);
3426 return ret;
3427 }
3428 }
3429
3430 *target_errno = FILEIO_ENOSYS;
3431 return -1;
3432 }
3433
3434 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3435 occurs (and set *TARGET_ERRNO). */
3436 int
3437 target_fileio_unlink (const char *filename, int *target_errno)
3438 {
3439 struct target_ops *t;
3440
3441 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3442 {
3443 if (t->to_fileio_unlink != NULL)
3444 {
3445 int ret = t->to_fileio_unlink (filename, target_errno);
3446
3447 if (targetdebug)
3448 fprintf_unfiltered (gdb_stdlog,
3449 "target_fileio_unlink (%s) = %d (%d)\n",
3450 filename, ret, ret != -1 ? 0 : *target_errno);
3451 return ret;
3452 }
3453 }
3454
3455 *target_errno = FILEIO_ENOSYS;
3456 return -1;
3457 }
3458
3459 /* Read value of symbolic link FILENAME on the target. Return a
3460 null-terminated string allocated via xmalloc, or NULL if an error
3461 occurs (and set *TARGET_ERRNO). */
3462 char *
3463 target_fileio_readlink (const char *filename, int *target_errno)
3464 {
3465 struct target_ops *t;
3466
3467 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3468 {
3469 if (t->to_fileio_readlink != NULL)
3470 {
3471 char *ret = t->to_fileio_readlink (filename, target_errno);
3472
3473 if (targetdebug)
3474 fprintf_unfiltered (gdb_stdlog,
3475 "target_fileio_readlink (%s) = %s (%d)\n",
3476 filename, ret? ret : "(nil)",
3477 ret? 0 : *target_errno);
3478 return ret;
3479 }
3480 }
3481
3482 *target_errno = FILEIO_ENOSYS;
3483 return NULL;
3484 }
3485
3486 static void
3487 target_fileio_close_cleanup (void *opaque)
3488 {
3489 int fd = *(int *) opaque;
3490 int target_errno;
3491
3492 target_fileio_close (fd, &target_errno);
3493 }
3494
3495 /* Read target file FILENAME. Store the result in *BUF_P and
3496 return the size of the transferred data. PADDING additional bytes are
3497 available in *BUF_P. This is a helper function for
3498 target_fileio_read_alloc; see the declaration of that function for more
3499 information. */
3500
3501 static LONGEST
3502 target_fileio_read_alloc_1 (const char *filename,
3503 gdb_byte **buf_p, int padding)
3504 {
3505 struct cleanup *close_cleanup;
3506 size_t buf_alloc, buf_pos;
3507 gdb_byte *buf;
3508 LONGEST n;
3509 int fd;
3510 int target_errno;
3511
3512 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3513 if (fd == -1)
3514 return -1;
3515
3516 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3517
3518 /* Start by reading up to 4K at a time. The target will throttle
3519 this number down if necessary. */
3520 buf_alloc = 4096;
3521 buf = xmalloc (buf_alloc);
3522 buf_pos = 0;
3523 while (1)
3524 {
3525 n = target_fileio_pread (fd, &buf[buf_pos],
3526 buf_alloc - buf_pos - padding, buf_pos,
3527 &target_errno);
3528 if (n < 0)
3529 {
3530 /* An error occurred. */
3531 do_cleanups (close_cleanup);
3532 xfree (buf);
3533 return -1;
3534 }
3535 else if (n == 0)
3536 {
3537 /* Read all there was. */
3538 do_cleanups (close_cleanup);
3539 if (buf_pos == 0)
3540 xfree (buf);
3541 else
3542 *buf_p = buf;
3543 return buf_pos;
3544 }
3545
3546 buf_pos += n;
3547
3548 /* If the buffer is filling up, expand it. */
3549 if (buf_alloc < buf_pos * 2)
3550 {
3551 buf_alloc *= 2;
3552 buf = xrealloc (buf, buf_alloc);
3553 }
3554
3555 QUIT;
3556 }
3557 }
3558
3559 /* Read target file FILENAME. Store the result in *BUF_P and return
3560 the size of the transferred data. See the declaration in "target.h"
3561 function for more information about the return value. */
3562
3563 LONGEST
3564 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3565 {
3566 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3567 }
3568
3569 /* Read target file FILENAME. The result is NUL-terminated and
3570 returned as a string, allocated using xmalloc. If an error occurs
3571 or the transfer is unsupported, NULL is returned. Empty objects
3572 are returned as allocated but empty strings. A warning is issued
3573 if the result contains any embedded NUL bytes. */
3574
3575 char *
3576 target_fileio_read_stralloc (const char *filename)
3577 {
3578 gdb_byte *buffer;
3579 char *bufstr;
3580 LONGEST i, transferred;
3581
3582 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3583 bufstr = (char *) buffer;
3584
3585 if (transferred < 0)
3586 return NULL;
3587
3588 if (transferred == 0)
3589 return xstrdup ("");
3590
3591 bufstr[transferred] = 0;
3592
3593 /* Check for embedded NUL bytes; but allow trailing NULs. */
3594 for (i = strlen (bufstr); i < transferred; i++)
3595 if (bufstr[i] != 0)
3596 {
3597 warning (_("target file %s "
3598 "contained unexpected null characters"),
3599 filename);
3600 break;
3601 }
3602
3603 return bufstr;
3604 }
3605
3606
3607 static int
3608 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3609 {
3610 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3611 }
3612
3613 static int
3614 default_watchpoint_addr_within_range (struct target_ops *target,
3615 CORE_ADDR addr,
3616 CORE_ADDR start, int length)
3617 {
3618 return addr >= start && addr < start + length;
3619 }
3620
3621 static struct gdbarch *
3622 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3623 {
3624 return target_gdbarch ();
3625 }
3626
3627 static int
3628 return_zero (void)
3629 {
3630 return 0;
3631 }
3632
3633 static int
3634 return_one (void)
3635 {
3636 return 1;
3637 }
3638
3639 static int
3640 return_minus_one (void)
3641 {
3642 return -1;
3643 }
3644
3645 /*
3646 * Find the next target down the stack from the specified target.
3647 */
3648
3649 struct target_ops *
3650 find_target_beneath (struct target_ops *t)
3651 {
3652 return t->beneath;
3653 }
3654
3655 \f
3656 /* The inferior process has died. Long live the inferior! */
3657
3658 void
3659 generic_mourn_inferior (void)
3660 {
3661 ptid_t ptid;
3662
3663 ptid = inferior_ptid;
3664 inferior_ptid = null_ptid;
3665
3666 /* Mark breakpoints uninserted in case something tries to delete a
3667 breakpoint while we delete the inferior's threads (which would
3668 fail, since the inferior is long gone). */
3669 mark_breakpoints_out ();
3670
3671 if (!ptid_equal (ptid, null_ptid))
3672 {
3673 int pid = ptid_get_pid (ptid);
3674 exit_inferior (pid);
3675 }
3676
3677 /* Note this wipes step-resume breakpoints, so needs to be done
3678 after exit_inferior, which ends up referencing the step-resume
3679 breakpoints through clear_thread_inferior_resources. */
3680 breakpoint_init_inferior (inf_exited);
3681
3682 registers_changed ();
3683
3684 reopen_exec_file ();
3685 reinit_frame_cache ();
3686
3687 if (deprecated_detach_hook)
3688 deprecated_detach_hook ();
3689 }
3690 \f
3691 /* Convert a normal process ID to a string. Returns the string in a
3692 static buffer. */
3693
3694 char *
3695 normal_pid_to_str (ptid_t ptid)
3696 {
3697 static char buf[32];
3698
3699 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3700 return buf;
3701 }
3702
3703 static char *
3704 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3705 {
3706 return normal_pid_to_str (ptid);
3707 }
3708
3709 /* Error-catcher for target_find_memory_regions. */
3710 static int
3711 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3712 {
3713 error (_("Command not implemented for this target."));
3714 return 0;
3715 }
3716
3717 /* Error-catcher for target_make_corefile_notes. */
3718 static char *
3719 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3720 {
3721 error (_("Command not implemented for this target."));
3722 return NULL;
3723 }
3724
3725 /* Error-catcher for target_get_bookmark. */
3726 static gdb_byte *
3727 dummy_get_bookmark (char *ignore1, int ignore2)
3728 {
3729 tcomplain ();
3730 return NULL;
3731 }
3732
3733 /* Error-catcher for target_goto_bookmark. */
3734 static void
3735 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3736 {
3737 tcomplain ();
3738 }
3739
3740 /* Set up the handful of non-empty slots needed by the dummy target
3741 vector. */
3742
3743 static void
3744 init_dummy_target (void)
3745 {
3746 dummy_target.to_shortname = "None";
3747 dummy_target.to_longname = "None";
3748 dummy_target.to_doc = "";
3749 dummy_target.to_attach = find_default_attach;
3750 dummy_target.to_detach =
3751 (void (*)(struct target_ops *, const char *, int))target_ignore;
3752 dummy_target.to_create_inferior = find_default_create_inferior;
3753 dummy_target.to_can_async_p = find_default_can_async_p;
3754 dummy_target.to_is_async_p = find_default_is_async_p;
3755 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3756 dummy_target.to_supports_disable_randomization
3757 = find_default_supports_disable_randomization;
3758 dummy_target.to_pid_to_str = dummy_pid_to_str;
3759 dummy_target.to_stratum = dummy_stratum;
3760 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3761 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3762 dummy_target.to_get_bookmark = dummy_get_bookmark;
3763 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3764 dummy_target.to_xfer_partial = default_xfer_partial;
3765 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3766 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3767 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3768 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3769 dummy_target.to_has_execution
3770 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3771 dummy_target.to_stopped_by_watchpoint = return_zero;
3772 dummy_target.to_stopped_data_address =
3773 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3774 dummy_target.to_magic = OPS_MAGIC;
3775 }
3776 \f
3777 static void
3778 debug_to_open (char *args, int from_tty)
3779 {
3780 debug_target.to_open (args, from_tty);
3781
3782 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3783 }
3784
3785 void
3786 target_close (struct target_ops *targ)
3787 {
3788 gdb_assert (!target_is_pushed (targ));
3789
3790 if (targ->to_xclose != NULL)
3791 targ->to_xclose (targ);
3792 else if (targ->to_close != NULL)
3793 targ->to_close ();
3794
3795 if (targetdebug)
3796 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3797 }
3798
3799 void
3800 target_attach (char *args, int from_tty)
3801 {
3802 struct target_ops *t;
3803
3804 for (t = current_target.beneath; t != NULL; t = t->beneath)
3805 {
3806 if (t->to_attach != NULL)
3807 {
3808 t->to_attach (t, args, from_tty);
3809 if (targetdebug)
3810 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3811 args, from_tty);
3812 return;
3813 }
3814 }
3815
3816 internal_error (__FILE__, __LINE__,
3817 _("could not find a target to attach"));
3818 }
3819
3820 int
3821 target_thread_alive (ptid_t ptid)
3822 {
3823 struct target_ops *t;
3824
3825 for (t = current_target.beneath; t != NULL; t = t->beneath)
3826 {
3827 if (t->to_thread_alive != NULL)
3828 {
3829 int retval;
3830
3831 retval = t->to_thread_alive (t, ptid);
3832 if (targetdebug)
3833 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3834 ptid_get_pid (ptid), retval);
3835
3836 return retval;
3837 }
3838 }
3839
3840 return 0;
3841 }
3842
3843 void
3844 target_find_new_threads (void)
3845 {
3846 struct target_ops *t;
3847
3848 for (t = current_target.beneath; t != NULL; t = t->beneath)
3849 {
3850 if (t->to_find_new_threads != NULL)
3851 {
3852 t->to_find_new_threads (t);
3853 if (targetdebug)
3854 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3855
3856 return;
3857 }
3858 }
3859 }
3860
3861 void
3862 target_stop (ptid_t ptid)
3863 {
3864 if (!may_stop)
3865 {
3866 warning (_("May not interrupt or stop the target, ignoring attempt"));
3867 return;
3868 }
3869
3870 (*current_target.to_stop) (ptid);
3871 }
3872
3873 static void
3874 debug_to_post_attach (int pid)
3875 {
3876 debug_target.to_post_attach (pid);
3877
3878 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3879 }
3880
3881 /* Concatenate ELEM to LIST, a comma separate list, and return the
3882 result. The LIST incoming argument is released. */
3883
3884 static char *
3885 str_comma_list_concat_elem (char *list, const char *elem)
3886 {
3887 if (list == NULL)
3888 return xstrdup (elem);
3889 else
3890 return reconcat (list, list, ", ", elem, (char *) NULL);
3891 }
3892
3893 /* Helper for target_options_to_string. If OPT is present in
3894 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3895 Returns the new resulting string. OPT is removed from
3896 TARGET_OPTIONS. */
3897
3898 static char *
3899 do_option (int *target_options, char *ret,
3900 int opt, char *opt_str)
3901 {
3902 if ((*target_options & opt) != 0)
3903 {
3904 ret = str_comma_list_concat_elem (ret, opt_str);
3905 *target_options &= ~opt;
3906 }
3907
3908 return ret;
3909 }
3910
3911 char *
3912 target_options_to_string (int target_options)
3913 {
3914 char *ret = NULL;
3915
3916 #define DO_TARG_OPTION(OPT) \
3917 ret = do_option (&target_options, ret, OPT, #OPT)
3918
3919 DO_TARG_OPTION (TARGET_WNOHANG);
3920
3921 if (target_options != 0)
3922 ret = str_comma_list_concat_elem (ret, "unknown???");
3923
3924 if (ret == NULL)
3925 ret = xstrdup ("");
3926 return ret;
3927 }
3928
3929 static void
3930 debug_print_register (const char * func,
3931 struct regcache *regcache, int regno)
3932 {
3933 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3934
3935 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3936 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3937 && gdbarch_register_name (gdbarch, regno) != NULL
3938 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3939 fprintf_unfiltered (gdb_stdlog, "(%s)",
3940 gdbarch_register_name (gdbarch, regno));
3941 else
3942 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3943 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3944 {
3945 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3946 int i, size = register_size (gdbarch, regno);
3947 gdb_byte buf[MAX_REGISTER_SIZE];
3948
3949 regcache_raw_collect (regcache, regno, buf);
3950 fprintf_unfiltered (gdb_stdlog, " = ");
3951 for (i = 0; i < size; i++)
3952 {
3953 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3954 }
3955 if (size <= sizeof (LONGEST))
3956 {
3957 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3958
3959 fprintf_unfiltered (gdb_stdlog, " %s %s",
3960 core_addr_to_string_nz (val), plongest (val));
3961 }
3962 }
3963 fprintf_unfiltered (gdb_stdlog, "\n");
3964 }
3965
3966 void
3967 target_fetch_registers (struct regcache *regcache, int regno)
3968 {
3969 struct target_ops *t;
3970
3971 for (t = current_target.beneath; t != NULL; t = t->beneath)
3972 {
3973 if (t->to_fetch_registers != NULL)
3974 {
3975 t->to_fetch_registers (t, regcache, regno);
3976 if (targetdebug)
3977 debug_print_register ("target_fetch_registers", regcache, regno);
3978 return;
3979 }
3980 }
3981 }
3982
3983 void
3984 target_store_registers (struct regcache *regcache, int regno)
3985 {
3986 struct target_ops *t;
3987
3988 if (!may_write_registers)
3989 error (_("Writing to registers is not allowed (regno %d)"), regno);
3990
3991 for (t = current_target.beneath; t != NULL; t = t->beneath)
3992 {
3993 if (t->to_store_registers != NULL)
3994 {
3995 t->to_store_registers (t, regcache, regno);
3996 if (targetdebug)
3997 {
3998 debug_print_register ("target_store_registers", regcache, regno);
3999 }
4000 return;
4001 }
4002 }
4003
4004 noprocess ();
4005 }
4006
4007 int
4008 target_core_of_thread (ptid_t ptid)
4009 {
4010 struct target_ops *t;
4011
4012 for (t = current_target.beneath; t != NULL; t = t->beneath)
4013 {
4014 if (t->to_core_of_thread != NULL)
4015 {
4016 int retval = t->to_core_of_thread (t, ptid);
4017
4018 if (targetdebug)
4019 fprintf_unfiltered (gdb_stdlog,
4020 "target_core_of_thread (%d) = %d\n",
4021 ptid_get_pid (ptid), retval);
4022 return retval;
4023 }
4024 }
4025
4026 return -1;
4027 }
4028
4029 int
4030 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4031 {
4032 struct target_ops *t;
4033
4034 for (t = current_target.beneath; t != NULL; t = t->beneath)
4035 {
4036 if (t->to_verify_memory != NULL)
4037 {
4038 int retval = t->to_verify_memory (t, data, memaddr, size);
4039
4040 if (targetdebug)
4041 fprintf_unfiltered (gdb_stdlog,
4042 "target_verify_memory (%s, %s) = %d\n",
4043 paddress (target_gdbarch (), memaddr),
4044 pulongest (size),
4045 retval);
4046 return retval;
4047 }
4048 }
4049
4050 tcomplain ();
4051 }
4052
4053 /* The documentation for this function is in its prototype declaration in
4054 target.h. */
4055
4056 int
4057 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4058 {
4059 struct target_ops *t;
4060
4061 for (t = current_target.beneath; t != NULL; t = t->beneath)
4062 if (t->to_insert_mask_watchpoint != NULL)
4063 {
4064 int ret;
4065
4066 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4067
4068 if (targetdebug)
4069 fprintf_unfiltered (gdb_stdlog, "\
4070 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4071 core_addr_to_string (addr),
4072 core_addr_to_string (mask), rw, ret);
4073
4074 return ret;
4075 }
4076
4077 return 1;
4078 }
4079
4080 /* The documentation for this function is in its prototype declaration in
4081 target.h. */
4082
4083 int
4084 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4085 {
4086 struct target_ops *t;
4087
4088 for (t = current_target.beneath; t != NULL; t = t->beneath)
4089 if (t->to_remove_mask_watchpoint != NULL)
4090 {
4091 int ret;
4092
4093 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4094
4095 if (targetdebug)
4096 fprintf_unfiltered (gdb_stdlog, "\
4097 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4098 core_addr_to_string (addr),
4099 core_addr_to_string (mask), rw, ret);
4100
4101 return ret;
4102 }
4103
4104 return 1;
4105 }
4106
4107 /* The documentation for this function is in its prototype declaration
4108 in target.h. */
4109
4110 int
4111 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4112 {
4113 struct target_ops *t;
4114
4115 for (t = current_target.beneath; t != NULL; t = t->beneath)
4116 if (t->to_masked_watch_num_registers != NULL)
4117 return t->to_masked_watch_num_registers (t, addr, mask);
4118
4119 return -1;
4120 }
4121
4122 /* The documentation for this function is in its prototype declaration
4123 in target.h. */
4124
4125 int
4126 target_ranged_break_num_registers (void)
4127 {
4128 struct target_ops *t;
4129
4130 for (t = current_target.beneath; t != NULL; t = t->beneath)
4131 if (t->to_ranged_break_num_registers != NULL)
4132 return t->to_ranged_break_num_registers (t);
4133
4134 return -1;
4135 }
4136
4137 /* See target.h. */
4138
4139 int
4140 target_supports_btrace (void)
4141 {
4142 struct target_ops *t;
4143
4144 for (t = current_target.beneath; t != NULL; t = t->beneath)
4145 if (t->to_supports_btrace != NULL)
4146 return t->to_supports_btrace ();
4147
4148 return 0;
4149 }
4150
4151 /* See target.h. */
4152
4153 struct btrace_target_info *
4154 target_enable_btrace (ptid_t ptid)
4155 {
4156 struct target_ops *t;
4157
4158 for (t = current_target.beneath; t != NULL; t = t->beneath)
4159 if (t->to_enable_btrace != NULL)
4160 return t->to_enable_btrace (ptid);
4161
4162 tcomplain ();
4163 return NULL;
4164 }
4165
4166 /* See target.h. */
4167
4168 void
4169 target_disable_btrace (struct btrace_target_info *btinfo)
4170 {
4171 struct target_ops *t;
4172
4173 for (t = current_target.beneath; t != NULL; t = t->beneath)
4174 if (t->to_disable_btrace != NULL)
4175 {
4176 t->to_disable_btrace (btinfo);
4177 return;
4178 }
4179
4180 tcomplain ();
4181 }
4182
4183 /* See target.h. */
4184
4185 void
4186 target_teardown_btrace (struct btrace_target_info *btinfo)
4187 {
4188 struct target_ops *t;
4189
4190 for (t = current_target.beneath; t != NULL; t = t->beneath)
4191 if (t->to_teardown_btrace != NULL)
4192 {
4193 t->to_teardown_btrace (btinfo);
4194 return;
4195 }
4196
4197 tcomplain ();
4198 }
4199
4200 /* See target.h. */
4201
4202 VEC (btrace_block_s) *
4203 target_read_btrace (struct btrace_target_info *btinfo,
4204 enum btrace_read_type type)
4205 {
4206 struct target_ops *t;
4207
4208 for (t = current_target.beneath; t != NULL; t = t->beneath)
4209 if (t->to_read_btrace != NULL)
4210 return t->to_read_btrace (btinfo, type);
4211
4212 tcomplain ();
4213 return NULL;
4214 }
4215
4216 /* See target.h. */
4217
4218 void
4219 target_stop_recording (void)
4220 {
4221 struct target_ops *t;
4222
4223 for (t = current_target.beneath; t != NULL; t = t->beneath)
4224 if (t->to_stop_recording != NULL)
4225 {
4226 t->to_stop_recording ();
4227 return;
4228 }
4229
4230 /* This is optional. */
4231 }
4232
4233 /* See target.h. */
4234
4235 void
4236 target_info_record (void)
4237 {
4238 struct target_ops *t;
4239
4240 for (t = current_target.beneath; t != NULL; t = t->beneath)
4241 if (t->to_info_record != NULL)
4242 {
4243 t->to_info_record ();
4244 return;
4245 }
4246
4247 tcomplain ();
4248 }
4249
4250 /* See target.h. */
4251
4252 void
4253 target_save_record (const char *filename)
4254 {
4255 struct target_ops *t;
4256
4257 for (t = current_target.beneath; t != NULL; t = t->beneath)
4258 if (t->to_save_record != NULL)
4259 {
4260 t->to_save_record (filename);
4261 return;
4262 }
4263
4264 tcomplain ();
4265 }
4266
4267 /* See target.h. */
4268
4269 int
4270 target_supports_delete_record (void)
4271 {
4272 struct target_ops *t;
4273
4274 for (t = current_target.beneath; t != NULL; t = t->beneath)
4275 if (t->to_delete_record != NULL)
4276 return 1;
4277
4278 return 0;
4279 }
4280
4281 /* See target.h. */
4282
4283 void
4284 target_delete_record (void)
4285 {
4286 struct target_ops *t;
4287
4288 for (t = current_target.beneath; t != NULL; t = t->beneath)
4289 if (t->to_delete_record != NULL)
4290 {
4291 t->to_delete_record ();
4292 return;
4293 }
4294
4295 tcomplain ();
4296 }
4297
4298 /* See target.h. */
4299
4300 int
4301 target_record_is_replaying (void)
4302 {
4303 struct target_ops *t;
4304
4305 for (t = current_target.beneath; t != NULL; t = t->beneath)
4306 if (t->to_record_is_replaying != NULL)
4307 return t->to_record_is_replaying ();
4308
4309 return 0;
4310 }
4311
4312 /* See target.h. */
4313
4314 void
4315 target_goto_record_begin (void)
4316 {
4317 struct target_ops *t;
4318
4319 for (t = current_target.beneath; t != NULL; t = t->beneath)
4320 if (t->to_goto_record_begin != NULL)
4321 {
4322 t->to_goto_record_begin ();
4323 return;
4324 }
4325
4326 tcomplain ();
4327 }
4328
4329 /* See target.h. */
4330
4331 void
4332 target_goto_record_end (void)
4333 {
4334 struct target_ops *t;
4335
4336 for (t = current_target.beneath; t != NULL; t = t->beneath)
4337 if (t->to_goto_record_end != NULL)
4338 {
4339 t->to_goto_record_end ();
4340 return;
4341 }
4342
4343 tcomplain ();
4344 }
4345
4346 /* See target.h. */
4347
4348 void
4349 target_goto_record (ULONGEST insn)
4350 {
4351 struct target_ops *t;
4352
4353 for (t = current_target.beneath; t != NULL; t = t->beneath)
4354 if (t->to_goto_record != NULL)
4355 {
4356 t->to_goto_record (insn);
4357 return;
4358 }
4359
4360 tcomplain ();
4361 }
4362
4363 /* See target.h. */
4364
4365 void
4366 target_insn_history (int size, int flags)
4367 {
4368 struct target_ops *t;
4369
4370 for (t = current_target.beneath; t != NULL; t = t->beneath)
4371 if (t->to_insn_history != NULL)
4372 {
4373 t->to_insn_history (size, flags);
4374 return;
4375 }
4376
4377 tcomplain ();
4378 }
4379
4380 /* See target.h. */
4381
4382 void
4383 target_insn_history_from (ULONGEST from, int size, int flags)
4384 {
4385 struct target_ops *t;
4386
4387 for (t = current_target.beneath; t != NULL; t = t->beneath)
4388 if (t->to_insn_history_from != NULL)
4389 {
4390 t->to_insn_history_from (from, size, flags);
4391 return;
4392 }
4393
4394 tcomplain ();
4395 }
4396
4397 /* See target.h. */
4398
4399 void
4400 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4401 {
4402 struct target_ops *t;
4403
4404 for (t = current_target.beneath; t != NULL; t = t->beneath)
4405 if (t->to_insn_history_range != NULL)
4406 {
4407 t->to_insn_history_range (begin, end, flags);
4408 return;
4409 }
4410
4411 tcomplain ();
4412 }
4413
4414 /* See target.h. */
4415
4416 void
4417 target_call_history (int size, int flags)
4418 {
4419 struct target_ops *t;
4420
4421 for (t = current_target.beneath; t != NULL; t = t->beneath)
4422 if (t->to_call_history != NULL)
4423 {
4424 t->to_call_history (size, flags);
4425 return;
4426 }
4427
4428 tcomplain ();
4429 }
4430
4431 /* See target.h. */
4432
4433 void
4434 target_call_history_from (ULONGEST begin, int size, int flags)
4435 {
4436 struct target_ops *t;
4437
4438 for (t = current_target.beneath; t != NULL; t = t->beneath)
4439 if (t->to_call_history_from != NULL)
4440 {
4441 t->to_call_history_from (begin, size, flags);
4442 return;
4443 }
4444
4445 tcomplain ();
4446 }
4447
4448 /* See target.h. */
4449
4450 void
4451 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4452 {
4453 struct target_ops *t;
4454
4455 for (t = current_target.beneath; t != NULL; t = t->beneath)
4456 if (t->to_call_history_range != NULL)
4457 {
4458 t->to_call_history_range (begin, end, flags);
4459 return;
4460 }
4461
4462 tcomplain ();
4463 }
4464
4465 static void
4466 debug_to_prepare_to_store (struct regcache *regcache)
4467 {
4468 debug_target.to_prepare_to_store (regcache);
4469
4470 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4471 }
4472
4473 static int
4474 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4475 int write, struct mem_attrib *attrib,
4476 struct target_ops *target)
4477 {
4478 int retval;
4479
4480 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4481 attrib, target);
4482
4483 fprintf_unfiltered (gdb_stdlog,
4484 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4485 paddress (target_gdbarch (), memaddr), len,
4486 write ? "write" : "read", retval);
4487
4488 if (retval > 0)
4489 {
4490 int i;
4491
4492 fputs_unfiltered (", bytes =", gdb_stdlog);
4493 for (i = 0; i < retval; i++)
4494 {
4495 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4496 {
4497 if (targetdebug < 2 && i > 0)
4498 {
4499 fprintf_unfiltered (gdb_stdlog, " ...");
4500 break;
4501 }
4502 fprintf_unfiltered (gdb_stdlog, "\n");
4503 }
4504
4505 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4506 }
4507 }
4508
4509 fputc_unfiltered ('\n', gdb_stdlog);
4510
4511 return retval;
4512 }
4513
4514 static void
4515 debug_to_files_info (struct target_ops *target)
4516 {
4517 debug_target.to_files_info (target);
4518
4519 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4520 }
4521
4522 static int
4523 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4524 struct bp_target_info *bp_tgt)
4525 {
4526 int retval;
4527
4528 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4529
4530 fprintf_unfiltered (gdb_stdlog,
4531 "target_insert_breakpoint (%s, xxx) = %ld\n",
4532 core_addr_to_string (bp_tgt->placed_address),
4533 (unsigned long) retval);
4534 return retval;
4535 }
4536
4537 static int
4538 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4539 struct bp_target_info *bp_tgt)
4540 {
4541 int retval;
4542
4543 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4544
4545 fprintf_unfiltered (gdb_stdlog,
4546 "target_remove_breakpoint (%s, xxx) = %ld\n",
4547 core_addr_to_string (bp_tgt->placed_address),
4548 (unsigned long) retval);
4549 return retval;
4550 }
4551
4552 static int
4553 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4554 {
4555 int retval;
4556
4557 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4558
4559 fprintf_unfiltered (gdb_stdlog,
4560 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4561 (unsigned long) type,
4562 (unsigned long) cnt,
4563 (unsigned long) from_tty,
4564 (unsigned long) retval);
4565 return retval;
4566 }
4567
4568 static int
4569 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4570 {
4571 CORE_ADDR retval;
4572
4573 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4574
4575 fprintf_unfiltered (gdb_stdlog,
4576 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4577 core_addr_to_string (addr), (unsigned long) len,
4578 core_addr_to_string (retval));
4579 return retval;
4580 }
4581
4582 static int
4583 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4584 struct expression *cond)
4585 {
4586 int retval;
4587
4588 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4589 rw, cond);
4590
4591 fprintf_unfiltered (gdb_stdlog,
4592 "target_can_accel_watchpoint_condition "
4593 "(%s, %d, %d, %s) = %ld\n",
4594 core_addr_to_string (addr), len, rw,
4595 host_address_to_string (cond), (unsigned long) retval);
4596 return retval;
4597 }
4598
4599 static int
4600 debug_to_stopped_by_watchpoint (void)
4601 {
4602 int retval;
4603
4604 retval = debug_target.to_stopped_by_watchpoint ();
4605
4606 fprintf_unfiltered (gdb_stdlog,
4607 "target_stopped_by_watchpoint () = %ld\n",
4608 (unsigned long) retval);
4609 return retval;
4610 }
4611
4612 static int
4613 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4614 {
4615 int retval;
4616
4617 retval = debug_target.to_stopped_data_address (target, addr);
4618
4619 fprintf_unfiltered (gdb_stdlog,
4620 "target_stopped_data_address ([%s]) = %ld\n",
4621 core_addr_to_string (*addr),
4622 (unsigned long)retval);
4623 return retval;
4624 }
4625
4626 static int
4627 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4628 CORE_ADDR addr,
4629 CORE_ADDR start, int length)
4630 {
4631 int retval;
4632
4633 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4634 start, length);
4635
4636 fprintf_filtered (gdb_stdlog,
4637 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4638 core_addr_to_string (addr), core_addr_to_string (start),
4639 length, retval);
4640 return retval;
4641 }
4642
4643 static int
4644 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4645 struct bp_target_info *bp_tgt)
4646 {
4647 int retval;
4648
4649 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4650
4651 fprintf_unfiltered (gdb_stdlog,
4652 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4653 core_addr_to_string (bp_tgt->placed_address),
4654 (unsigned long) retval);
4655 return retval;
4656 }
4657
4658 static int
4659 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4660 struct bp_target_info *bp_tgt)
4661 {
4662 int retval;
4663
4664 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4665
4666 fprintf_unfiltered (gdb_stdlog,
4667 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4668 core_addr_to_string (bp_tgt->placed_address),
4669 (unsigned long) retval);
4670 return retval;
4671 }
4672
4673 static int
4674 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4675 struct expression *cond)
4676 {
4677 int retval;
4678
4679 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4680
4681 fprintf_unfiltered (gdb_stdlog,
4682 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4683 core_addr_to_string (addr), len, type,
4684 host_address_to_string (cond), (unsigned long) retval);
4685 return retval;
4686 }
4687
4688 static int
4689 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4690 struct expression *cond)
4691 {
4692 int retval;
4693
4694 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4695
4696 fprintf_unfiltered (gdb_stdlog,
4697 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4698 core_addr_to_string (addr), len, type,
4699 host_address_to_string (cond), (unsigned long) retval);
4700 return retval;
4701 }
4702
4703 static void
4704 debug_to_terminal_init (void)
4705 {
4706 debug_target.to_terminal_init ();
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4709 }
4710
4711 static void
4712 debug_to_terminal_inferior (void)
4713 {
4714 debug_target.to_terminal_inferior ();
4715
4716 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4717 }
4718
4719 static void
4720 debug_to_terminal_ours_for_output (void)
4721 {
4722 debug_target.to_terminal_ours_for_output ();
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4725 }
4726
4727 static void
4728 debug_to_terminal_ours (void)
4729 {
4730 debug_target.to_terminal_ours ();
4731
4732 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4733 }
4734
4735 static void
4736 debug_to_terminal_save_ours (void)
4737 {
4738 debug_target.to_terminal_save_ours ();
4739
4740 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4741 }
4742
4743 static void
4744 debug_to_terminal_info (const char *arg, int from_tty)
4745 {
4746 debug_target.to_terminal_info (arg, from_tty);
4747
4748 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4749 from_tty);
4750 }
4751
4752 static void
4753 debug_to_load (char *args, int from_tty)
4754 {
4755 debug_target.to_load (args, from_tty);
4756
4757 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4758 }
4759
4760 static void
4761 debug_to_post_startup_inferior (ptid_t ptid)
4762 {
4763 debug_target.to_post_startup_inferior (ptid);
4764
4765 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4766 ptid_get_pid (ptid));
4767 }
4768
4769 static int
4770 debug_to_insert_fork_catchpoint (int pid)
4771 {
4772 int retval;
4773
4774 retval = debug_target.to_insert_fork_catchpoint (pid);
4775
4776 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4777 pid, retval);
4778
4779 return retval;
4780 }
4781
4782 static int
4783 debug_to_remove_fork_catchpoint (int pid)
4784 {
4785 int retval;
4786
4787 retval = debug_target.to_remove_fork_catchpoint (pid);
4788
4789 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4790 pid, retval);
4791
4792 return retval;
4793 }
4794
4795 static int
4796 debug_to_insert_vfork_catchpoint (int pid)
4797 {
4798 int retval;
4799
4800 retval = debug_target.to_insert_vfork_catchpoint (pid);
4801
4802 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4803 pid, retval);
4804
4805 return retval;
4806 }
4807
4808 static int
4809 debug_to_remove_vfork_catchpoint (int pid)
4810 {
4811 int retval;
4812
4813 retval = debug_target.to_remove_vfork_catchpoint (pid);
4814
4815 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4816 pid, retval);
4817
4818 return retval;
4819 }
4820
4821 static int
4822 debug_to_insert_exec_catchpoint (int pid)
4823 {
4824 int retval;
4825
4826 retval = debug_target.to_insert_exec_catchpoint (pid);
4827
4828 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4829 pid, retval);
4830
4831 return retval;
4832 }
4833
4834 static int
4835 debug_to_remove_exec_catchpoint (int pid)
4836 {
4837 int retval;
4838
4839 retval = debug_target.to_remove_exec_catchpoint (pid);
4840
4841 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4842 pid, retval);
4843
4844 return retval;
4845 }
4846
4847 static int
4848 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4849 {
4850 int has_exited;
4851
4852 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4853
4854 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4855 pid, wait_status, *exit_status, has_exited);
4856
4857 return has_exited;
4858 }
4859
4860 static int
4861 debug_to_can_run (void)
4862 {
4863 int retval;
4864
4865 retval = debug_target.to_can_run ();
4866
4867 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4868
4869 return retval;
4870 }
4871
4872 static struct gdbarch *
4873 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4874 {
4875 struct gdbarch *retval;
4876
4877 retval = debug_target.to_thread_architecture (ops, ptid);
4878
4879 fprintf_unfiltered (gdb_stdlog,
4880 "target_thread_architecture (%s) = %s [%s]\n",
4881 target_pid_to_str (ptid),
4882 host_address_to_string (retval),
4883 gdbarch_bfd_arch_info (retval)->printable_name);
4884 return retval;
4885 }
4886
4887 static void
4888 debug_to_stop (ptid_t ptid)
4889 {
4890 debug_target.to_stop (ptid);
4891
4892 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4893 target_pid_to_str (ptid));
4894 }
4895
4896 static void
4897 debug_to_rcmd (char *command,
4898 struct ui_file *outbuf)
4899 {
4900 debug_target.to_rcmd (command, outbuf);
4901 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4902 }
4903
4904 static char *
4905 debug_to_pid_to_exec_file (int pid)
4906 {
4907 char *exec_file;
4908
4909 exec_file = debug_target.to_pid_to_exec_file (pid);
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4912 pid, exec_file);
4913
4914 return exec_file;
4915 }
4916
4917 static void
4918 setup_target_debug (void)
4919 {
4920 memcpy (&debug_target, &current_target, sizeof debug_target);
4921
4922 current_target.to_open = debug_to_open;
4923 current_target.to_post_attach = debug_to_post_attach;
4924 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4925 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4926 current_target.to_files_info = debug_to_files_info;
4927 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4928 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4929 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4930 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4931 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4932 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4933 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4934 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4935 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4936 current_target.to_watchpoint_addr_within_range
4937 = debug_to_watchpoint_addr_within_range;
4938 current_target.to_region_ok_for_hw_watchpoint
4939 = debug_to_region_ok_for_hw_watchpoint;
4940 current_target.to_can_accel_watchpoint_condition
4941 = debug_to_can_accel_watchpoint_condition;
4942 current_target.to_terminal_init = debug_to_terminal_init;
4943 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4944 current_target.to_terminal_ours_for_output
4945 = debug_to_terminal_ours_for_output;
4946 current_target.to_terminal_ours = debug_to_terminal_ours;
4947 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4948 current_target.to_terminal_info = debug_to_terminal_info;
4949 current_target.to_load = debug_to_load;
4950 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4951 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4952 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4953 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4954 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4955 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4956 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4957 current_target.to_has_exited = debug_to_has_exited;
4958 current_target.to_can_run = debug_to_can_run;
4959 current_target.to_stop = debug_to_stop;
4960 current_target.to_rcmd = debug_to_rcmd;
4961 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4962 current_target.to_thread_architecture = debug_to_thread_architecture;
4963 }
4964 \f
4965
4966 static char targ_desc[] =
4967 "Names of targets and files being debugged.\nShows the entire \
4968 stack of targets currently in use (including the exec-file,\n\
4969 core-file, and process, if any), as well as the symbol file name.";
4970
4971 static void
4972 do_monitor_command (char *cmd,
4973 int from_tty)
4974 {
4975 if ((current_target.to_rcmd
4976 == (void (*) (char *, struct ui_file *)) tcomplain)
4977 || (current_target.to_rcmd == debug_to_rcmd
4978 && (debug_target.to_rcmd
4979 == (void (*) (char *, struct ui_file *)) tcomplain)))
4980 error (_("\"monitor\" command not supported by this target."));
4981 target_rcmd (cmd, gdb_stdtarg);
4982 }
4983
4984 /* Print the name of each layers of our target stack. */
4985
4986 static void
4987 maintenance_print_target_stack (char *cmd, int from_tty)
4988 {
4989 struct target_ops *t;
4990
4991 printf_filtered (_("The current target stack is:\n"));
4992
4993 for (t = target_stack; t != NULL; t = t->beneath)
4994 {
4995 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4996 }
4997 }
4998
4999 /* Controls if async mode is permitted. */
5000 int target_async_permitted = 0;
5001
5002 /* The set command writes to this variable. If the inferior is
5003 executing, target_async_permitted is *not* updated. */
5004 static int target_async_permitted_1 = 0;
5005
5006 static void
5007 set_target_async_command (char *args, int from_tty,
5008 struct cmd_list_element *c)
5009 {
5010 if (have_live_inferiors ())
5011 {
5012 target_async_permitted_1 = target_async_permitted;
5013 error (_("Cannot change this setting while the inferior is running."));
5014 }
5015
5016 target_async_permitted = target_async_permitted_1;
5017 }
5018
5019 static void
5020 show_target_async_command (struct ui_file *file, int from_tty,
5021 struct cmd_list_element *c,
5022 const char *value)
5023 {
5024 fprintf_filtered (file,
5025 _("Controlling the inferior in "
5026 "asynchronous mode is %s.\n"), value);
5027 }
5028
5029 /* Temporary copies of permission settings. */
5030
5031 static int may_write_registers_1 = 1;
5032 static int may_write_memory_1 = 1;
5033 static int may_insert_breakpoints_1 = 1;
5034 static int may_insert_tracepoints_1 = 1;
5035 static int may_insert_fast_tracepoints_1 = 1;
5036 static int may_stop_1 = 1;
5037
5038 /* Make the user-set values match the real values again. */
5039
5040 void
5041 update_target_permissions (void)
5042 {
5043 may_write_registers_1 = may_write_registers;
5044 may_write_memory_1 = may_write_memory;
5045 may_insert_breakpoints_1 = may_insert_breakpoints;
5046 may_insert_tracepoints_1 = may_insert_tracepoints;
5047 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5048 may_stop_1 = may_stop;
5049 }
5050
5051 /* The one function handles (most of) the permission flags in the same
5052 way. */
5053
5054 static void
5055 set_target_permissions (char *args, int from_tty,
5056 struct cmd_list_element *c)
5057 {
5058 if (target_has_execution)
5059 {
5060 update_target_permissions ();
5061 error (_("Cannot change this setting while the inferior is running."));
5062 }
5063
5064 /* Make the real values match the user-changed values. */
5065 may_write_registers = may_write_registers_1;
5066 may_insert_breakpoints = may_insert_breakpoints_1;
5067 may_insert_tracepoints = may_insert_tracepoints_1;
5068 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5069 may_stop = may_stop_1;
5070 update_observer_mode ();
5071 }
5072
5073 /* Set memory write permission independently of observer mode. */
5074
5075 static void
5076 set_write_memory_permission (char *args, int from_tty,
5077 struct cmd_list_element *c)
5078 {
5079 /* Make the real values match the user-changed values. */
5080 may_write_memory = may_write_memory_1;
5081 update_observer_mode ();
5082 }
5083
5084
5085 void
5086 initialize_targets (void)
5087 {
5088 init_dummy_target ();
5089 push_target (&dummy_target);
5090
5091 add_info ("target", target_info, targ_desc);
5092 add_info ("files", target_info, targ_desc);
5093
5094 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5095 Set target debugging."), _("\
5096 Show target debugging."), _("\
5097 When non-zero, target debugging is enabled. Higher numbers are more\n\
5098 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5099 command."),
5100 NULL,
5101 show_targetdebug,
5102 &setdebuglist, &showdebuglist);
5103
5104 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5105 &trust_readonly, _("\
5106 Set mode for reading from readonly sections."), _("\
5107 Show mode for reading from readonly sections."), _("\
5108 When this mode is on, memory reads from readonly sections (such as .text)\n\
5109 will be read from the object file instead of from the target. This will\n\
5110 result in significant performance improvement for remote targets."),
5111 NULL,
5112 show_trust_readonly,
5113 &setlist, &showlist);
5114
5115 add_com ("monitor", class_obscure, do_monitor_command,
5116 _("Send a command to the remote monitor (remote targets only)."));
5117
5118 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5119 _("Print the name of each layer of the internal target stack."),
5120 &maintenanceprintlist);
5121
5122 add_setshow_boolean_cmd ("target-async", no_class,
5123 &target_async_permitted_1, _("\
5124 Set whether gdb controls the inferior in asynchronous mode."), _("\
5125 Show whether gdb controls the inferior in asynchronous mode."), _("\
5126 Tells gdb whether to control the inferior in asynchronous mode."),
5127 set_target_async_command,
5128 show_target_async_command,
5129 &setlist,
5130 &showlist);
5131
5132 add_setshow_boolean_cmd ("may-write-registers", class_support,
5133 &may_write_registers_1, _("\
5134 Set permission to write into registers."), _("\
5135 Show permission to write into registers."), _("\
5136 When this permission is on, GDB may write into the target's registers.\n\
5137 Otherwise, any sort of write attempt will result in an error."),
5138 set_target_permissions, NULL,
5139 &setlist, &showlist);
5140
5141 add_setshow_boolean_cmd ("may-write-memory", class_support,
5142 &may_write_memory_1, _("\
5143 Set permission to write into target memory."), _("\
5144 Show permission to write into target memory."), _("\
5145 When this permission is on, GDB may write into the target's memory.\n\
5146 Otherwise, any sort of write attempt will result in an error."),
5147 set_write_memory_permission, NULL,
5148 &setlist, &showlist);
5149
5150 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5151 &may_insert_breakpoints_1, _("\
5152 Set permission to insert breakpoints in the target."), _("\
5153 Show permission to insert breakpoints in the target."), _("\
5154 When this permission is on, GDB may insert breakpoints in the program.\n\
5155 Otherwise, any sort of insertion attempt will result in an error."),
5156 set_target_permissions, NULL,
5157 &setlist, &showlist);
5158
5159 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5160 &may_insert_tracepoints_1, _("\
5161 Set permission to insert tracepoints in the target."), _("\
5162 Show permission to insert tracepoints in the target."), _("\
5163 When this permission is on, GDB may insert tracepoints in the program.\n\
5164 Otherwise, any sort of insertion attempt will result in an error."),
5165 set_target_permissions, NULL,
5166 &setlist, &showlist);
5167
5168 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5169 &may_insert_fast_tracepoints_1, _("\
5170 Set permission to insert fast tracepoints in the target."), _("\
5171 Show permission to insert fast tracepoints in the target."), _("\
5172 When this permission is on, GDB may insert fast tracepoints.\n\
5173 Otherwise, any sort of insertion attempt will result in an error."),
5174 set_target_permissions, NULL,
5175 &setlist, &showlist);
5176
5177 add_setshow_boolean_cmd ("may-interrupt", class_support,
5178 &may_stop_1, _("\
5179 Set permission to interrupt or signal the target."), _("\
5180 Show permission to interrupt or signal the target."), _("\
5181 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5182 Otherwise, any attempt to interrupt or stop will be ignored."),
5183 set_target_permissions, NULL,
5184 &setlist, &showlist);
5185 }
This page took 0.185073 seconds and 4 git commands to generate.