2012-03-02 Tom Tromey <tromey@redhat.com>
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdb_wait.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_terminal_info (char *, int);
146
147 static void debug_to_load (char *, int);
148
149 static int debug_to_can_run (void);
150
151 static void debug_to_stop (ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_index;
159 unsigned target_struct_allocsize;
160 #define DEFAULT_ALLOCSIZE 10
161
162 /* The initial current target, so that there is always a semi-valid
163 current target. */
164
165 static struct target_ops dummy_target;
166
167 /* Top of target stack. */
168
169 static struct target_ops *target_stack;
170
171 /* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
173
174 struct target_ops current_target;
175
176 /* Command list for target. */
177
178 static struct cmd_list_element *targetlist = NULL;
179
180 /* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
182
183 static int trust_readonly = 0;
184
185 /* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
187
188 static int show_memory_breakpoints = 0;
189
190 /* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
193
194 int may_write_registers = 1;
195
196 int may_write_memory = 1;
197
198 int may_insert_breakpoints = 1;
199
200 int may_insert_tracepoints = 1;
201
202 int may_insert_fast_tracepoints = 1;
203
204 int may_stop = 1;
205
206 /* Non-zero if we want to see trace of target level stuff. */
207
208 static int targetdebug = 0;
209 static void
210 show_targetdebug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212 {
213 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
214 }
215
216 static void setup_target_debug (void);
217
218 /* The option sets this. */
219 static int stack_cache_enabled_p_1 = 1;
220 /* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223 static int stack_cache_enabled_p = 1;
224
225 /* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
229
230 static void
231 set_stack_cache_enabled_p (char *args, int from_tty,
232 struct cmd_list_element *c)
233 {
234 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
235 target_dcache_invalidate ();
236
237 stack_cache_enabled_p = stack_cache_enabled_p_1;
238 }
239
240 static void
241 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
242 struct cmd_list_element *c, const char *value)
243 {
244 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
245 }
246
247 /* Cache of memory operations, to speed up remote access. */
248 static DCACHE *target_dcache;
249
250 /* Invalidate the target dcache. */
251
252 void
253 target_dcache_invalidate (void)
254 {
255 dcache_invalidate (target_dcache);
256 }
257
258 /* The user just typed 'target' without the name of a target. */
259
260 static void
261 target_command (char *arg, int from_tty)
262 {
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
264 gdb_stdout);
265 }
266
267 /* Default target_has_* methods for process_stratum targets. */
268
269 int
270 default_child_has_all_memory (struct target_ops *ops)
271 {
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_memory (struct target_ops *ops)
281 {
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid, null_ptid))
284 return 0;
285
286 return 1;
287 }
288
289 int
290 default_child_has_stack (struct target_ops *ops)
291 {
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid, null_ptid))
294 return 0;
295
296 return 1;
297 }
298
299 int
300 default_child_has_registers (struct target_ops *ops)
301 {
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid, null_ptid))
304 return 0;
305
306 return 1;
307 }
308
309 int
310 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
311 {
312 /* If there's no thread selected, then we can't make it run through
313 hoops. */
314 if (ptid_equal (the_ptid, null_ptid))
315 return 0;
316
317 return 1;
318 }
319
320
321 int
322 target_has_all_memory_1 (void)
323 {
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_all_memory (t))
328 return 1;
329
330 return 0;
331 }
332
333 int
334 target_has_memory_1 (void)
335 {
336 struct target_ops *t;
337
338 for (t = current_target.beneath; t != NULL; t = t->beneath)
339 if (t->to_has_memory (t))
340 return 1;
341
342 return 0;
343 }
344
345 int
346 target_has_stack_1 (void)
347 {
348 struct target_ops *t;
349
350 for (t = current_target.beneath; t != NULL; t = t->beneath)
351 if (t->to_has_stack (t))
352 return 1;
353
354 return 0;
355 }
356
357 int
358 target_has_registers_1 (void)
359 {
360 struct target_ops *t;
361
362 for (t = current_target.beneath; t != NULL; t = t->beneath)
363 if (t->to_has_registers (t))
364 return 1;
365
366 return 0;
367 }
368
369 int
370 target_has_execution_1 (ptid_t the_ptid)
371 {
372 struct target_ops *t;
373
374 for (t = current_target.beneath; t != NULL; t = t->beneath)
375 if (t->to_has_execution (t, the_ptid))
376 return 1;
377
378 return 0;
379 }
380
381 int
382 target_has_execution_current (void)
383 {
384 return target_has_execution_1 (inferior_ptid);
385 }
386
387 /* Add a possible target architecture to the list. */
388
389 void
390 add_target (struct target_ops *t)
391 {
392 /* Provide default values for all "must have" methods. */
393 if (t->to_xfer_partial == NULL)
394 t->to_xfer_partial = default_xfer_partial;
395
396 if (t->to_has_all_memory == NULL)
397 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
398
399 if (t->to_has_memory == NULL)
400 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_stack == NULL)
403 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_registers == NULL)
406 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_execution == NULL)
409 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
435 }
436
437 /* Stub functions */
438
439 void
440 target_ignore (void)
441 {
442 }
443
444 void
445 target_kill (void)
446 {
447 struct target_ops *t;
448
449 for (t = current_target.beneath; t != NULL; t = t->beneath)
450 if (t->to_kill != NULL)
451 {
452 if (targetdebug)
453 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
454
455 t->to_kill (t);
456 return;
457 }
458
459 noprocess ();
460 }
461
462 void
463 target_load (char *arg, int from_tty)
464 {
465 target_dcache_invalidate ();
466 (*current_target.to_load) (arg, from_tty);
467 }
468
469 void
470 target_create_inferior (char *exec_file, char *args,
471 char **env, int from_tty)
472 {
473 struct target_ops *t;
474
475 for (t = current_target.beneath; t != NULL; t = t->beneath)
476 {
477 if (t->to_create_inferior != NULL)
478 {
479 t->to_create_inferior (t, exec_file, args, env, from_tty);
480 if (targetdebug)
481 fprintf_unfiltered (gdb_stdlog,
482 "target_create_inferior (%s, %s, xxx, %d)\n",
483 exec_file, args, from_tty);
484 return;
485 }
486 }
487
488 internal_error (__FILE__, __LINE__,
489 _("could not find a target to create inferior"));
490 }
491
492 void
493 target_terminal_inferior (void)
494 {
495 /* A background resume (``run&'') should leave GDB in control of the
496 terminal. Use target_can_async_p, not target_is_async_p, since at
497 this point the target is not async yet. However, if sync_execution
498 is not set, we know it will become async prior to resume. */
499 if (target_can_async_p () && !sync_execution)
500 return;
501
502 /* If GDB is resuming the inferior in the foreground, install
503 inferior's terminal modes. */
504 (*current_target.to_terminal_inferior) ();
505 }
506
507 static int
508 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
509 struct target_ops *t)
510 {
511 errno = EIO; /* Can't read/write this location. */
512 return 0; /* No bytes handled. */
513 }
514
515 static void
516 tcomplain (void)
517 {
518 error (_("You can't do that when your target is `%s'"),
519 current_target.to_shortname);
520 }
521
522 void
523 noprocess (void)
524 {
525 error (_("You can't do that without a process to debug."));
526 }
527
528 static void
529 default_terminal_info (char *args, int from_tty)
530 {
531 printf_unfiltered (_("No saved terminal information.\n"));
532 }
533
534 /* A default implementation for the to_get_ada_task_ptid target method.
535
536 This function builds the PTID by using both LWP and TID as part of
537 the PTID lwp and tid elements. The pid used is the pid of the
538 inferior_ptid. */
539
540 static ptid_t
541 default_get_ada_task_ptid (long lwp, long tid)
542 {
543 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
544 }
545
546 static enum exec_direction_kind
547 default_execution_direction (void)
548 {
549 if (!target_can_execute_reverse)
550 return EXEC_FORWARD;
551 else if (!target_can_async_p ())
552 return EXEC_FORWARD;
553 else
554 gdb_assert_not_reached ("\
555 to_execution_direction must be implemented for reverse async");
556 }
557
558 /* Go through the target stack from top to bottom, copying over zero
559 entries in current_target, then filling in still empty entries. In
560 effect, we are doing class inheritance through the pushed target
561 vectors.
562
563 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
564 is currently implemented, is that it discards any knowledge of
565 which target an inherited method originally belonged to.
566 Consequently, new new target methods should instead explicitly and
567 locally search the target stack for the target that can handle the
568 request. */
569
570 static void
571 update_current_target (void)
572 {
573 struct target_ops *t;
574
575 /* First, reset current's contents. */
576 memset (&current_target, 0, sizeof (current_target));
577
578 #define INHERIT(FIELD, TARGET) \
579 if (!current_target.FIELD) \
580 current_target.FIELD = (TARGET)->FIELD
581
582 for (t = target_stack; t; t = t->beneath)
583 {
584 INHERIT (to_shortname, t);
585 INHERIT (to_longname, t);
586 INHERIT (to_doc, t);
587 /* Do not inherit to_open. */
588 /* Do not inherit to_close. */
589 /* Do not inherit to_attach. */
590 INHERIT (to_post_attach, t);
591 INHERIT (to_attach_no_wait, t);
592 /* Do not inherit to_detach. */
593 /* Do not inherit to_disconnect. */
594 /* Do not inherit to_resume. */
595 /* Do not inherit to_wait. */
596 /* Do not inherit to_fetch_registers. */
597 /* Do not inherit to_store_registers. */
598 INHERIT (to_prepare_to_store, t);
599 INHERIT (deprecated_xfer_memory, t);
600 INHERIT (to_files_info, t);
601 INHERIT (to_insert_breakpoint, t);
602 INHERIT (to_remove_breakpoint, t);
603 INHERIT (to_can_use_hw_breakpoint, t);
604 INHERIT (to_insert_hw_breakpoint, t);
605 INHERIT (to_remove_hw_breakpoint, t);
606 /* Do not inherit to_ranged_break_num_registers. */
607 INHERIT (to_insert_watchpoint, t);
608 INHERIT (to_remove_watchpoint, t);
609 /* Do not inherit to_insert_mask_watchpoint. */
610 /* Do not inherit to_remove_mask_watchpoint. */
611 INHERIT (to_stopped_data_address, t);
612 INHERIT (to_have_steppable_watchpoint, t);
613 INHERIT (to_have_continuable_watchpoint, t);
614 INHERIT (to_stopped_by_watchpoint, t);
615 INHERIT (to_watchpoint_addr_within_range, t);
616 INHERIT (to_region_ok_for_hw_watchpoint, t);
617 INHERIT (to_can_accel_watchpoint_condition, t);
618 /* Do not inherit to_masked_watch_num_registers. */
619 INHERIT (to_terminal_init, t);
620 INHERIT (to_terminal_inferior, t);
621 INHERIT (to_terminal_ours_for_output, t);
622 INHERIT (to_terminal_ours, t);
623 INHERIT (to_terminal_save_ours, t);
624 INHERIT (to_terminal_info, t);
625 /* Do not inherit to_kill. */
626 INHERIT (to_load, t);
627 /* Do no inherit to_create_inferior. */
628 INHERIT (to_post_startup_inferior, t);
629 INHERIT (to_insert_fork_catchpoint, t);
630 INHERIT (to_remove_fork_catchpoint, t);
631 INHERIT (to_insert_vfork_catchpoint, t);
632 INHERIT (to_remove_vfork_catchpoint, t);
633 /* Do not inherit to_follow_fork. */
634 INHERIT (to_insert_exec_catchpoint, t);
635 INHERIT (to_remove_exec_catchpoint, t);
636 INHERIT (to_set_syscall_catchpoint, t);
637 INHERIT (to_has_exited, t);
638 /* Do not inherit to_mourn_inferior. */
639 INHERIT (to_can_run, t);
640 /* Do not inherit to_pass_signals. */
641 /* Do not inherit to_thread_alive. */
642 /* Do not inherit to_find_new_threads. */
643 /* Do not inherit to_pid_to_str. */
644 INHERIT (to_extra_thread_info, t);
645 INHERIT (to_thread_name, t);
646 INHERIT (to_stop, t);
647 /* Do not inherit to_xfer_partial. */
648 INHERIT (to_rcmd, t);
649 INHERIT (to_pid_to_exec_file, t);
650 INHERIT (to_log_command, t);
651 INHERIT (to_stratum, t);
652 /* Do not inherit to_has_all_memory. */
653 /* Do not inherit to_has_memory. */
654 /* Do not inherit to_has_stack. */
655 /* Do not inherit to_has_registers. */
656 /* Do not inherit to_has_execution. */
657 INHERIT (to_has_thread_control, t);
658 INHERIT (to_can_async_p, t);
659 INHERIT (to_is_async_p, t);
660 INHERIT (to_async, t);
661 INHERIT (to_find_memory_regions, t);
662 INHERIT (to_make_corefile_notes, t);
663 INHERIT (to_get_bookmark, t);
664 INHERIT (to_goto_bookmark, t);
665 /* Do not inherit to_get_thread_local_address. */
666 INHERIT (to_can_execute_reverse, t);
667 INHERIT (to_execution_direction, t);
668 INHERIT (to_thread_architecture, t);
669 /* Do not inherit to_read_description. */
670 INHERIT (to_get_ada_task_ptid, t);
671 /* Do not inherit to_search_memory. */
672 INHERIT (to_supports_multi_process, t);
673 INHERIT (to_supports_enable_disable_tracepoint, t);
674 INHERIT (to_supports_string_tracing, t);
675 INHERIT (to_trace_init, t);
676 INHERIT (to_download_tracepoint, t);
677 INHERIT (to_can_download_tracepoint, t);
678 INHERIT (to_download_trace_state_variable, t);
679 INHERIT (to_enable_tracepoint, t);
680 INHERIT (to_disable_tracepoint, t);
681 INHERIT (to_trace_set_readonly_regions, t);
682 INHERIT (to_trace_start, t);
683 INHERIT (to_get_trace_status, t);
684 INHERIT (to_get_tracepoint_status, t);
685 INHERIT (to_trace_stop, t);
686 INHERIT (to_trace_find, t);
687 INHERIT (to_get_trace_state_variable_value, t);
688 INHERIT (to_save_trace_data, t);
689 INHERIT (to_upload_tracepoints, t);
690 INHERIT (to_upload_trace_state_variables, t);
691 INHERIT (to_get_raw_trace_data, t);
692 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
693 INHERIT (to_set_disconnected_tracing, t);
694 INHERIT (to_set_circular_trace_buffer, t);
695 INHERIT (to_set_trace_notes, t);
696 INHERIT (to_get_tib_address, t);
697 INHERIT (to_set_permissions, t);
698 INHERIT (to_static_tracepoint_marker_at, t);
699 INHERIT (to_static_tracepoint_markers_by_strid, t);
700 INHERIT (to_traceframe_info, t);
701 INHERIT (to_magic, t);
702 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
703 /* Do not inherit to_memory_map. */
704 /* Do not inherit to_flash_erase. */
705 /* Do not inherit to_flash_done. */
706 }
707 #undef INHERIT
708
709 /* Clean up a target struct so it no longer has any zero pointers in
710 it. Some entries are defaulted to a method that print an error,
711 others are hard-wired to a standard recursive default. */
712
713 #define de_fault(field, value) \
714 if (!current_target.field) \
715 current_target.field = value
716
717 de_fault (to_open,
718 (void (*) (char *, int))
719 tcomplain);
720 de_fault (to_close,
721 (void (*) (int))
722 target_ignore);
723 de_fault (to_post_attach,
724 (void (*) (int))
725 target_ignore);
726 de_fault (to_prepare_to_store,
727 (void (*) (struct regcache *))
728 noprocess);
729 de_fault (deprecated_xfer_memory,
730 (int (*) (CORE_ADDR, gdb_byte *, int, int,
731 struct mem_attrib *, struct target_ops *))
732 nomemory);
733 de_fault (to_files_info,
734 (void (*) (struct target_ops *))
735 target_ignore);
736 de_fault (to_insert_breakpoint,
737 memory_insert_breakpoint);
738 de_fault (to_remove_breakpoint,
739 memory_remove_breakpoint);
740 de_fault (to_can_use_hw_breakpoint,
741 (int (*) (int, int, int))
742 return_zero);
743 de_fault (to_insert_hw_breakpoint,
744 (int (*) (struct gdbarch *, struct bp_target_info *))
745 return_minus_one);
746 de_fault (to_remove_hw_breakpoint,
747 (int (*) (struct gdbarch *, struct bp_target_info *))
748 return_minus_one);
749 de_fault (to_insert_watchpoint,
750 (int (*) (CORE_ADDR, int, int, struct expression *))
751 return_minus_one);
752 de_fault (to_remove_watchpoint,
753 (int (*) (CORE_ADDR, int, int, struct expression *))
754 return_minus_one);
755 de_fault (to_stopped_by_watchpoint,
756 (int (*) (void))
757 return_zero);
758 de_fault (to_stopped_data_address,
759 (int (*) (struct target_ops *, CORE_ADDR *))
760 return_zero);
761 de_fault (to_watchpoint_addr_within_range,
762 default_watchpoint_addr_within_range);
763 de_fault (to_region_ok_for_hw_watchpoint,
764 default_region_ok_for_hw_watchpoint);
765 de_fault (to_can_accel_watchpoint_condition,
766 (int (*) (CORE_ADDR, int, int, struct expression *))
767 return_zero);
768 de_fault (to_terminal_init,
769 (void (*) (void))
770 target_ignore);
771 de_fault (to_terminal_inferior,
772 (void (*) (void))
773 target_ignore);
774 de_fault (to_terminal_ours_for_output,
775 (void (*) (void))
776 target_ignore);
777 de_fault (to_terminal_ours,
778 (void (*) (void))
779 target_ignore);
780 de_fault (to_terminal_save_ours,
781 (void (*) (void))
782 target_ignore);
783 de_fault (to_terminal_info,
784 default_terminal_info);
785 de_fault (to_load,
786 (void (*) (char *, int))
787 tcomplain);
788 de_fault (to_post_startup_inferior,
789 (void (*) (ptid_t))
790 target_ignore);
791 de_fault (to_insert_fork_catchpoint,
792 (int (*) (int))
793 return_one);
794 de_fault (to_remove_fork_catchpoint,
795 (int (*) (int))
796 return_one);
797 de_fault (to_insert_vfork_catchpoint,
798 (int (*) (int))
799 return_one);
800 de_fault (to_remove_vfork_catchpoint,
801 (int (*) (int))
802 return_one);
803 de_fault (to_insert_exec_catchpoint,
804 (int (*) (int))
805 return_one);
806 de_fault (to_remove_exec_catchpoint,
807 (int (*) (int))
808 return_one);
809 de_fault (to_set_syscall_catchpoint,
810 (int (*) (int, int, int, int, int *))
811 return_one);
812 de_fault (to_has_exited,
813 (int (*) (int, int, int *))
814 return_zero);
815 de_fault (to_can_run,
816 return_zero);
817 de_fault (to_extra_thread_info,
818 (char *(*) (struct thread_info *))
819 return_zero);
820 de_fault (to_thread_name,
821 (char *(*) (struct thread_info *))
822 return_zero);
823 de_fault (to_stop,
824 (void (*) (ptid_t))
825 target_ignore);
826 current_target.to_xfer_partial = current_xfer_partial;
827 de_fault (to_rcmd,
828 (void (*) (char *, struct ui_file *))
829 tcomplain);
830 de_fault (to_pid_to_exec_file,
831 (char *(*) (int))
832 return_zero);
833 de_fault (to_async,
834 (void (*) (void (*) (enum inferior_event_type, void*), void*))
835 tcomplain);
836 de_fault (to_thread_architecture,
837 default_thread_architecture);
838 current_target.to_read_description = NULL;
839 de_fault (to_get_ada_task_ptid,
840 (ptid_t (*) (long, long))
841 default_get_ada_task_ptid);
842 de_fault (to_supports_multi_process,
843 (int (*) (void))
844 return_zero);
845 de_fault (to_supports_enable_disable_tracepoint,
846 (int (*) (void))
847 return_zero);
848 de_fault (to_supports_string_tracing,
849 (int (*) (void))
850 return_zero);
851 de_fault (to_trace_init,
852 (void (*) (void))
853 tcomplain);
854 de_fault (to_download_tracepoint,
855 (void (*) (struct bp_location *))
856 tcomplain);
857 de_fault (to_can_download_tracepoint,
858 (int (*) (void))
859 return_zero);
860 de_fault (to_download_trace_state_variable,
861 (void (*) (struct trace_state_variable *))
862 tcomplain);
863 de_fault (to_enable_tracepoint,
864 (void (*) (struct bp_location *))
865 tcomplain);
866 de_fault (to_disable_tracepoint,
867 (void (*) (struct bp_location *))
868 tcomplain);
869 de_fault (to_trace_set_readonly_regions,
870 (void (*) (void))
871 tcomplain);
872 de_fault (to_trace_start,
873 (void (*) (void))
874 tcomplain);
875 de_fault (to_get_trace_status,
876 (int (*) (struct trace_status *))
877 return_minus_one);
878 de_fault (to_get_tracepoint_status,
879 (void (*) (struct breakpoint *, struct uploaded_tp *))
880 tcomplain);
881 de_fault (to_trace_stop,
882 (void (*) (void))
883 tcomplain);
884 de_fault (to_trace_find,
885 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
886 return_minus_one);
887 de_fault (to_get_trace_state_variable_value,
888 (int (*) (int, LONGEST *))
889 return_zero);
890 de_fault (to_save_trace_data,
891 (int (*) (const char *))
892 tcomplain);
893 de_fault (to_upload_tracepoints,
894 (int (*) (struct uploaded_tp **))
895 return_zero);
896 de_fault (to_upload_trace_state_variables,
897 (int (*) (struct uploaded_tsv **))
898 return_zero);
899 de_fault (to_get_raw_trace_data,
900 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
901 tcomplain);
902 de_fault (to_get_min_fast_tracepoint_insn_len,
903 (int (*) (void))
904 return_minus_one);
905 de_fault (to_set_disconnected_tracing,
906 (void (*) (int))
907 target_ignore);
908 de_fault (to_set_circular_trace_buffer,
909 (void (*) (int))
910 target_ignore);
911 de_fault (to_set_trace_notes,
912 (int (*) (char *, char *, char *))
913 return_zero);
914 de_fault (to_get_tib_address,
915 (int (*) (ptid_t, CORE_ADDR *))
916 tcomplain);
917 de_fault (to_set_permissions,
918 (void (*) (void))
919 target_ignore);
920 de_fault (to_static_tracepoint_marker_at,
921 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
922 return_zero);
923 de_fault (to_static_tracepoint_markers_by_strid,
924 (VEC(static_tracepoint_marker_p) * (*) (const char *))
925 tcomplain);
926 de_fault (to_traceframe_info,
927 (struct traceframe_info * (*) (void))
928 tcomplain);
929 de_fault (to_supports_evaluation_of_breakpoint_conditions,
930 (int (*) (void))
931 return_zero);
932 de_fault (to_execution_direction, default_execution_direction);
933
934 #undef de_fault
935
936 /* Finally, position the target-stack beneath the squashed
937 "current_target". That way code looking for a non-inherited
938 target method can quickly and simply find it. */
939 current_target.beneath = target_stack;
940
941 if (targetdebug)
942 setup_target_debug ();
943 }
944
945 /* Push a new target type into the stack of the existing target accessors,
946 possibly superseding some of the existing accessors.
947
948 Rather than allow an empty stack, we always have the dummy target at
949 the bottom stratum, so we can call the function vectors without
950 checking them. */
951
952 void
953 push_target (struct target_ops *t)
954 {
955 struct target_ops **cur;
956
957 /* Check magic number. If wrong, it probably means someone changed
958 the struct definition, but not all the places that initialize one. */
959 if (t->to_magic != OPS_MAGIC)
960 {
961 fprintf_unfiltered (gdb_stderr,
962 "Magic number of %s target struct wrong\n",
963 t->to_shortname);
964 internal_error (__FILE__, __LINE__,
965 _("failed internal consistency check"));
966 }
967
968 /* Find the proper stratum to install this target in. */
969 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
970 {
971 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
972 break;
973 }
974
975 /* If there's already targets at this stratum, remove them. */
976 /* FIXME: cagney/2003-10-15: I think this should be popping all
977 targets to CUR, and not just those at this stratum level. */
978 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
979 {
980 /* There's already something at this stratum level. Close it,
981 and un-hook it from the stack. */
982 struct target_ops *tmp = (*cur);
983
984 (*cur) = (*cur)->beneath;
985 tmp->beneath = NULL;
986 target_close (tmp, 0);
987 }
988
989 /* We have removed all targets in our stratum, now add the new one. */
990 t->beneath = (*cur);
991 (*cur) = t;
992
993 update_current_target ();
994 }
995
996 /* Remove a target_ops vector from the stack, wherever it may be.
997 Return how many times it was removed (0 or 1). */
998
999 int
1000 unpush_target (struct target_ops *t)
1001 {
1002 struct target_ops **cur;
1003 struct target_ops *tmp;
1004
1005 if (t->to_stratum == dummy_stratum)
1006 internal_error (__FILE__, __LINE__,
1007 _("Attempt to unpush the dummy target"));
1008
1009 /* Look for the specified target. Note that we assume that a target
1010 can only occur once in the target stack. */
1011
1012 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1013 {
1014 if ((*cur) == t)
1015 break;
1016 }
1017
1018 /* If we don't find target_ops, quit. Only open targets should be
1019 closed. */
1020 if ((*cur) == NULL)
1021 return 0;
1022
1023 /* Unchain the target. */
1024 tmp = (*cur);
1025 (*cur) = (*cur)->beneath;
1026 tmp->beneath = NULL;
1027
1028 update_current_target ();
1029
1030 /* Finally close the target. Note we do this after unchaining, so
1031 any target method calls from within the target_close
1032 implementation don't end up in T anymore. */
1033 target_close (t, 0);
1034
1035 return 1;
1036 }
1037
1038 void
1039 pop_target (void)
1040 {
1041 target_close (target_stack, 0); /* Let it clean up. */
1042 if (unpush_target (target_stack) == 1)
1043 return;
1044
1045 fprintf_unfiltered (gdb_stderr,
1046 "pop_target couldn't find target %s\n",
1047 current_target.to_shortname);
1048 internal_error (__FILE__, __LINE__,
1049 _("failed internal consistency check"));
1050 }
1051
1052 void
1053 pop_all_targets_above (enum strata above_stratum, int quitting)
1054 {
1055 while ((int) (current_target.to_stratum) > (int) above_stratum)
1056 {
1057 target_close (target_stack, quitting);
1058 if (!unpush_target (target_stack))
1059 {
1060 fprintf_unfiltered (gdb_stderr,
1061 "pop_all_targets couldn't find target %s\n",
1062 target_stack->to_shortname);
1063 internal_error (__FILE__, __LINE__,
1064 _("failed internal consistency check"));
1065 break;
1066 }
1067 }
1068 }
1069
1070 void
1071 pop_all_targets (int quitting)
1072 {
1073 pop_all_targets_above (dummy_stratum, quitting);
1074 }
1075
1076 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1077
1078 int
1079 target_is_pushed (struct target_ops *t)
1080 {
1081 struct target_ops **cur;
1082
1083 /* Check magic number. If wrong, it probably means someone changed
1084 the struct definition, but not all the places that initialize one. */
1085 if (t->to_magic != OPS_MAGIC)
1086 {
1087 fprintf_unfiltered (gdb_stderr,
1088 "Magic number of %s target struct wrong\n",
1089 t->to_shortname);
1090 internal_error (__FILE__, __LINE__,
1091 _("failed internal consistency check"));
1092 }
1093
1094 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1095 if (*cur == t)
1096 return 1;
1097
1098 return 0;
1099 }
1100
1101 /* Using the objfile specified in OBJFILE, find the address for the
1102 current thread's thread-local storage with offset OFFSET. */
1103 CORE_ADDR
1104 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1105 {
1106 volatile CORE_ADDR addr = 0;
1107 struct target_ops *target;
1108
1109 for (target = current_target.beneath;
1110 target != NULL;
1111 target = target->beneath)
1112 {
1113 if (target->to_get_thread_local_address != NULL)
1114 break;
1115 }
1116
1117 if (target != NULL
1118 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1119 {
1120 ptid_t ptid = inferior_ptid;
1121 volatile struct gdb_exception ex;
1122
1123 TRY_CATCH (ex, RETURN_MASK_ALL)
1124 {
1125 CORE_ADDR lm_addr;
1126
1127 /* Fetch the load module address for this objfile. */
1128 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1129 objfile);
1130 /* If it's 0, throw the appropriate exception. */
1131 if (lm_addr == 0)
1132 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1133 _("TLS load module not found"));
1134
1135 addr = target->to_get_thread_local_address (target, ptid,
1136 lm_addr, offset);
1137 }
1138 /* If an error occurred, print TLS related messages here. Otherwise,
1139 throw the error to some higher catcher. */
1140 if (ex.reason < 0)
1141 {
1142 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1143
1144 switch (ex.error)
1145 {
1146 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1147 error (_("Cannot find thread-local variables "
1148 "in this thread library."));
1149 break;
1150 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1151 if (objfile_is_library)
1152 error (_("Cannot find shared library `%s' in dynamic"
1153 " linker's load module list"), objfile->name);
1154 else
1155 error (_("Cannot find executable file `%s' in dynamic"
1156 " linker's load module list"), objfile->name);
1157 break;
1158 case TLS_NOT_ALLOCATED_YET_ERROR:
1159 if (objfile_is_library)
1160 error (_("The inferior has not yet allocated storage for"
1161 " thread-local variables in\n"
1162 "the shared library `%s'\n"
1163 "for %s"),
1164 objfile->name, target_pid_to_str (ptid));
1165 else
1166 error (_("The inferior has not yet allocated storage for"
1167 " thread-local variables in\n"
1168 "the executable `%s'\n"
1169 "for %s"),
1170 objfile->name, target_pid_to_str (ptid));
1171 break;
1172 case TLS_GENERIC_ERROR:
1173 if (objfile_is_library)
1174 error (_("Cannot find thread-local storage for %s, "
1175 "shared library %s:\n%s"),
1176 target_pid_to_str (ptid),
1177 objfile->name, ex.message);
1178 else
1179 error (_("Cannot find thread-local storage for %s, "
1180 "executable file %s:\n%s"),
1181 target_pid_to_str (ptid),
1182 objfile->name, ex.message);
1183 break;
1184 default:
1185 throw_exception (ex);
1186 break;
1187 }
1188 }
1189 }
1190 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1191 TLS is an ABI-specific thing. But we don't do that yet. */
1192 else
1193 error (_("Cannot find thread-local variables on this target"));
1194
1195 return addr;
1196 }
1197
1198 #undef MIN
1199 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1200
1201 /* target_read_string -- read a null terminated string, up to LEN bytes,
1202 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1203 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1204 is responsible for freeing it. Return the number of bytes successfully
1205 read. */
1206
1207 int
1208 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1209 {
1210 int tlen, origlen, offset, i;
1211 gdb_byte buf[4];
1212 int errcode = 0;
1213 char *buffer;
1214 int buffer_allocated;
1215 char *bufptr;
1216 unsigned int nbytes_read = 0;
1217
1218 gdb_assert (string);
1219
1220 /* Small for testing. */
1221 buffer_allocated = 4;
1222 buffer = xmalloc (buffer_allocated);
1223 bufptr = buffer;
1224
1225 origlen = len;
1226
1227 while (len > 0)
1228 {
1229 tlen = MIN (len, 4 - (memaddr & 3));
1230 offset = memaddr & 3;
1231
1232 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1233 if (errcode != 0)
1234 {
1235 /* The transfer request might have crossed the boundary to an
1236 unallocated region of memory. Retry the transfer, requesting
1237 a single byte. */
1238 tlen = 1;
1239 offset = 0;
1240 errcode = target_read_memory (memaddr, buf, 1);
1241 if (errcode != 0)
1242 goto done;
1243 }
1244
1245 if (bufptr - buffer + tlen > buffer_allocated)
1246 {
1247 unsigned int bytes;
1248
1249 bytes = bufptr - buffer;
1250 buffer_allocated *= 2;
1251 buffer = xrealloc (buffer, buffer_allocated);
1252 bufptr = buffer + bytes;
1253 }
1254
1255 for (i = 0; i < tlen; i++)
1256 {
1257 *bufptr++ = buf[i + offset];
1258 if (buf[i + offset] == '\000')
1259 {
1260 nbytes_read += i + 1;
1261 goto done;
1262 }
1263 }
1264
1265 memaddr += tlen;
1266 len -= tlen;
1267 nbytes_read += tlen;
1268 }
1269 done:
1270 *string = buffer;
1271 if (errnop != NULL)
1272 *errnop = errcode;
1273 return nbytes_read;
1274 }
1275
1276 struct target_section_table *
1277 target_get_section_table (struct target_ops *target)
1278 {
1279 struct target_ops *t;
1280
1281 if (targetdebug)
1282 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1283
1284 for (t = target; t != NULL; t = t->beneath)
1285 if (t->to_get_section_table != NULL)
1286 return (*t->to_get_section_table) (t);
1287
1288 return NULL;
1289 }
1290
1291 /* Find a section containing ADDR. */
1292
1293 struct target_section *
1294 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1295 {
1296 struct target_section_table *table = target_get_section_table (target);
1297 struct target_section *secp;
1298
1299 if (table == NULL)
1300 return NULL;
1301
1302 for (secp = table->sections; secp < table->sections_end; secp++)
1303 {
1304 if (addr >= secp->addr && addr < secp->endaddr)
1305 return secp;
1306 }
1307 return NULL;
1308 }
1309
1310 /* Read memory from the live target, even if currently inspecting a
1311 traceframe. The return is the same as that of target_read. */
1312
1313 static LONGEST
1314 target_read_live_memory (enum target_object object,
1315 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1316 {
1317 int ret;
1318 struct cleanup *cleanup;
1319
1320 /* Switch momentarily out of tfind mode so to access live memory.
1321 Note that this must not clear global state, such as the frame
1322 cache, which must still remain valid for the previous traceframe.
1323 We may be _building_ the frame cache at this point. */
1324 cleanup = make_cleanup_restore_traceframe_number ();
1325 set_traceframe_number (-1);
1326
1327 ret = target_read (current_target.beneath, object, NULL,
1328 myaddr, memaddr, len);
1329
1330 do_cleanups (cleanup);
1331 return ret;
1332 }
1333
1334 /* Using the set of read-only target sections of OPS, read live
1335 read-only memory. Note that the actual reads start from the
1336 top-most target again.
1337
1338 For interface/parameters/return description see target.h,
1339 to_xfer_partial. */
1340
1341 static LONGEST
1342 memory_xfer_live_readonly_partial (struct target_ops *ops,
1343 enum target_object object,
1344 gdb_byte *readbuf, ULONGEST memaddr,
1345 LONGEST len)
1346 {
1347 struct target_section *secp;
1348 struct target_section_table *table;
1349
1350 secp = target_section_by_addr (ops, memaddr);
1351 if (secp != NULL
1352 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1353 & SEC_READONLY))
1354 {
1355 struct target_section *p;
1356 ULONGEST memend = memaddr + len;
1357
1358 table = target_get_section_table (ops);
1359
1360 for (p = table->sections; p < table->sections_end; p++)
1361 {
1362 if (memaddr >= p->addr)
1363 {
1364 if (memend <= p->endaddr)
1365 {
1366 /* Entire transfer is within this section. */
1367 return target_read_live_memory (object, memaddr,
1368 readbuf, len);
1369 }
1370 else if (memaddr >= p->endaddr)
1371 {
1372 /* This section ends before the transfer starts. */
1373 continue;
1374 }
1375 else
1376 {
1377 /* This section overlaps the transfer. Just do half. */
1378 len = p->endaddr - memaddr;
1379 return target_read_live_memory (object, memaddr,
1380 readbuf, len);
1381 }
1382 }
1383 }
1384 }
1385
1386 return 0;
1387 }
1388
1389 /* Perform a partial memory transfer.
1390 For docs see target.h, to_xfer_partial. */
1391
1392 static LONGEST
1393 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1394 void *readbuf, const void *writebuf, ULONGEST memaddr,
1395 LONGEST len)
1396 {
1397 LONGEST res;
1398 int reg_len;
1399 struct mem_region *region;
1400 struct inferior *inf;
1401
1402 /* For accesses to unmapped overlay sections, read directly from
1403 files. Must do this first, as MEMADDR may need adjustment. */
1404 if (readbuf != NULL && overlay_debugging)
1405 {
1406 struct obj_section *section = find_pc_overlay (memaddr);
1407
1408 if (pc_in_unmapped_range (memaddr, section))
1409 {
1410 struct target_section_table *table
1411 = target_get_section_table (ops);
1412 const char *section_name = section->the_bfd_section->name;
1413
1414 memaddr = overlay_mapped_address (memaddr, section);
1415 return section_table_xfer_memory_partial (readbuf, writebuf,
1416 memaddr, len,
1417 table->sections,
1418 table->sections_end,
1419 section_name);
1420 }
1421 }
1422
1423 /* Try the executable files, if "trust-readonly-sections" is set. */
1424 if (readbuf != NULL && trust_readonly)
1425 {
1426 struct target_section *secp;
1427 struct target_section_table *table;
1428
1429 secp = target_section_by_addr (ops, memaddr);
1430 if (secp != NULL
1431 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1432 & SEC_READONLY))
1433 {
1434 table = target_get_section_table (ops);
1435 return section_table_xfer_memory_partial (readbuf, writebuf,
1436 memaddr, len,
1437 table->sections,
1438 table->sections_end,
1439 NULL);
1440 }
1441 }
1442
1443 /* If reading unavailable memory in the context of traceframes, and
1444 this address falls within a read-only section, fallback to
1445 reading from live memory. */
1446 if (readbuf != NULL && get_traceframe_number () != -1)
1447 {
1448 VEC(mem_range_s) *available;
1449
1450 /* If we fail to get the set of available memory, then the
1451 target does not support querying traceframe info, and so we
1452 attempt reading from the traceframe anyway (assuming the
1453 target implements the old QTro packet then). */
1454 if (traceframe_available_memory (&available, memaddr, len))
1455 {
1456 struct cleanup *old_chain;
1457
1458 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1459
1460 if (VEC_empty (mem_range_s, available)
1461 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1462 {
1463 /* Don't read into the traceframe's available
1464 memory. */
1465 if (!VEC_empty (mem_range_s, available))
1466 {
1467 LONGEST oldlen = len;
1468
1469 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1470 gdb_assert (len <= oldlen);
1471 }
1472
1473 do_cleanups (old_chain);
1474
1475 /* This goes through the topmost target again. */
1476 res = memory_xfer_live_readonly_partial (ops, object,
1477 readbuf, memaddr, len);
1478 if (res > 0)
1479 return res;
1480
1481 /* No use trying further, we know some memory starting
1482 at MEMADDR isn't available. */
1483 return -1;
1484 }
1485
1486 /* Don't try to read more than how much is available, in
1487 case the target implements the deprecated QTro packet to
1488 cater for older GDBs (the target's knowledge of read-only
1489 sections may be outdated by now). */
1490 len = VEC_index (mem_range_s, available, 0)->length;
1491
1492 do_cleanups (old_chain);
1493 }
1494 }
1495
1496 /* Try GDB's internal data cache. */
1497 region = lookup_mem_region (memaddr);
1498 /* region->hi == 0 means there's no upper bound. */
1499 if (memaddr + len < region->hi || region->hi == 0)
1500 reg_len = len;
1501 else
1502 reg_len = region->hi - memaddr;
1503
1504 switch (region->attrib.mode)
1505 {
1506 case MEM_RO:
1507 if (writebuf != NULL)
1508 return -1;
1509 break;
1510
1511 case MEM_WO:
1512 if (readbuf != NULL)
1513 return -1;
1514 break;
1515
1516 case MEM_FLASH:
1517 /* We only support writing to flash during "load" for now. */
1518 if (writebuf != NULL)
1519 error (_("Writing to flash memory forbidden in this context"));
1520 break;
1521
1522 case MEM_NONE:
1523 return -1;
1524 }
1525
1526 if (!ptid_equal (inferior_ptid, null_ptid))
1527 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1528 else
1529 inf = NULL;
1530
1531 if (inf != NULL
1532 /* The dcache reads whole cache lines; that doesn't play well
1533 with reading from a trace buffer, because reading outside of
1534 the collected memory range fails. */
1535 && get_traceframe_number () == -1
1536 && (region->attrib.cache
1537 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1538 {
1539 if (readbuf != NULL)
1540 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1541 reg_len, 0);
1542 else
1543 /* FIXME drow/2006-08-09: If we're going to preserve const
1544 correctness dcache_xfer_memory should take readbuf and
1545 writebuf. */
1546 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1547 (void *) writebuf,
1548 reg_len, 1);
1549 if (res <= 0)
1550 return -1;
1551 else
1552 return res;
1553 }
1554
1555 /* If none of those methods found the memory we wanted, fall back
1556 to a target partial transfer. Normally a single call to
1557 to_xfer_partial is enough; if it doesn't recognize an object
1558 it will call the to_xfer_partial of the next target down.
1559 But for memory this won't do. Memory is the only target
1560 object which can be read from more than one valid target.
1561 A core file, for instance, could have some of memory but
1562 delegate other bits to the target below it. So, we must
1563 manually try all targets. */
1564
1565 do
1566 {
1567 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1568 readbuf, writebuf, memaddr, reg_len);
1569 if (res > 0)
1570 break;
1571
1572 /* We want to continue past core files to executables, but not
1573 past a running target's memory. */
1574 if (ops->to_has_all_memory (ops))
1575 break;
1576
1577 ops = ops->beneath;
1578 }
1579 while (ops != NULL);
1580
1581 /* Make sure the cache gets updated no matter what - if we are writing
1582 to the stack. Even if this write is not tagged as such, we still need
1583 to update the cache. */
1584
1585 if (res > 0
1586 && inf != NULL
1587 && writebuf != NULL
1588 && !region->attrib.cache
1589 && stack_cache_enabled_p
1590 && object != TARGET_OBJECT_STACK_MEMORY)
1591 {
1592 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1593 }
1594
1595 /* If we still haven't got anything, return the last error. We
1596 give up. */
1597 return res;
1598 }
1599
1600 /* Perform a partial memory transfer. For docs see target.h,
1601 to_xfer_partial. */
1602
1603 static LONGEST
1604 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1605 void *readbuf, const void *writebuf, ULONGEST memaddr,
1606 LONGEST len)
1607 {
1608 int res;
1609
1610 /* Zero length requests are ok and require no work. */
1611 if (len == 0)
1612 return 0;
1613
1614 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1615 breakpoint insns, thus hiding out from higher layers whether
1616 there are software breakpoints inserted in the code stream. */
1617 if (readbuf != NULL)
1618 {
1619 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1620
1621 if (res > 0 && !show_memory_breakpoints)
1622 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1623 }
1624 else
1625 {
1626 void *buf;
1627 struct cleanup *old_chain;
1628
1629 buf = xmalloc (len);
1630 old_chain = make_cleanup (xfree, buf);
1631 memcpy (buf, writebuf, len);
1632
1633 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1634 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1635
1636 do_cleanups (old_chain);
1637 }
1638
1639 return res;
1640 }
1641
1642 static void
1643 restore_show_memory_breakpoints (void *arg)
1644 {
1645 show_memory_breakpoints = (uintptr_t) arg;
1646 }
1647
1648 struct cleanup *
1649 make_show_memory_breakpoints_cleanup (int show)
1650 {
1651 int current = show_memory_breakpoints;
1652
1653 show_memory_breakpoints = show;
1654 return make_cleanup (restore_show_memory_breakpoints,
1655 (void *) (uintptr_t) current);
1656 }
1657
1658 /* For docs see target.h, to_xfer_partial. */
1659
1660 static LONGEST
1661 target_xfer_partial (struct target_ops *ops,
1662 enum target_object object, const char *annex,
1663 void *readbuf, const void *writebuf,
1664 ULONGEST offset, LONGEST len)
1665 {
1666 LONGEST retval;
1667
1668 gdb_assert (ops->to_xfer_partial != NULL);
1669
1670 if (writebuf && !may_write_memory)
1671 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1672 core_addr_to_string_nz (offset), plongest (len));
1673
1674 /* If this is a memory transfer, let the memory-specific code
1675 have a look at it instead. Memory transfers are more
1676 complicated. */
1677 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1678 retval = memory_xfer_partial (ops, object, readbuf,
1679 writebuf, offset, len);
1680 else
1681 {
1682 enum target_object raw_object = object;
1683
1684 /* If this is a raw memory transfer, request the normal
1685 memory object from other layers. */
1686 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1687 raw_object = TARGET_OBJECT_MEMORY;
1688
1689 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1690 writebuf, offset, len);
1691 }
1692
1693 if (targetdebug)
1694 {
1695 const unsigned char *myaddr = NULL;
1696
1697 fprintf_unfiltered (gdb_stdlog,
1698 "%s:target_xfer_partial "
1699 "(%d, %s, %s, %s, %s, %s) = %s",
1700 ops->to_shortname,
1701 (int) object,
1702 (annex ? annex : "(null)"),
1703 host_address_to_string (readbuf),
1704 host_address_to_string (writebuf),
1705 core_addr_to_string_nz (offset),
1706 plongest (len), plongest (retval));
1707
1708 if (readbuf)
1709 myaddr = readbuf;
1710 if (writebuf)
1711 myaddr = writebuf;
1712 if (retval > 0 && myaddr != NULL)
1713 {
1714 int i;
1715
1716 fputs_unfiltered (", bytes =", gdb_stdlog);
1717 for (i = 0; i < retval; i++)
1718 {
1719 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1720 {
1721 if (targetdebug < 2 && i > 0)
1722 {
1723 fprintf_unfiltered (gdb_stdlog, " ...");
1724 break;
1725 }
1726 fprintf_unfiltered (gdb_stdlog, "\n");
1727 }
1728
1729 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1730 }
1731 }
1732
1733 fputc_unfiltered ('\n', gdb_stdlog);
1734 }
1735 return retval;
1736 }
1737
1738 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1739 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1740 if any error occurs.
1741
1742 If an error occurs, no guarantee is made about the contents of the data at
1743 MYADDR. In particular, the caller should not depend upon partial reads
1744 filling the buffer with good data. There is no way for the caller to know
1745 how much good data might have been transfered anyway. Callers that can
1746 deal with partial reads should call target_read (which will retry until
1747 it makes no progress, and then return how much was transferred). */
1748
1749 int
1750 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1751 {
1752 /* Dispatch to the topmost target, not the flattened current_target.
1753 Memory accesses check target->to_has_(all_)memory, and the
1754 flattened target doesn't inherit those. */
1755 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1756 myaddr, memaddr, len) == len)
1757 return 0;
1758 else
1759 return EIO;
1760 }
1761
1762 /* Like target_read_memory, but specify explicitly that this is a read from
1763 the target's stack. This may trigger different cache behavior. */
1764
1765 int
1766 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1767 {
1768 /* Dispatch to the topmost target, not the flattened current_target.
1769 Memory accesses check target->to_has_(all_)memory, and the
1770 flattened target doesn't inherit those. */
1771
1772 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1773 myaddr, memaddr, len) == len)
1774 return 0;
1775 else
1776 return EIO;
1777 }
1778
1779 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1780 Returns either 0 for success or an errno value if any error occurs.
1781 If an error occurs, no guarantee is made about how much data got written.
1782 Callers that can deal with partial writes should call target_write. */
1783
1784 int
1785 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1786 {
1787 /* Dispatch to the topmost target, not the flattened current_target.
1788 Memory accesses check target->to_has_(all_)memory, and the
1789 flattened target doesn't inherit those. */
1790 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1791 myaddr, memaddr, len) == len)
1792 return 0;
1793 else
1794 return EIO;
1795 }
1796
1797 /* Write LEN bytes from MYADDR to target raw memory at address
1798 MEMADDR. Returns either 0 for success or an errno value if any
1799 error occurs. If an error occurs, no guarantee is made about how
1800 much data got written. Callers that can deal with partial writes
1801 should call target_write. */
1802
1803 int
1804 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1805 {
1806 /* Dispatch to the topmost target, not the flattened current_target.
1807 Memory accesses check target->to_has_(all_)memory, and the
1808 flattened target doesn't inherit those. */
1809 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1810 myaddr, memaddr, len) == len)
1811 return 0;
1812 else
1813 return EIO;
1814 }
1815
1816 /* Fetch the target's memory map. */
1817
1818 VEC(mem_region_s) *
1819 target_memory_map (void)
1820 {
1821 VEC(mem_region_s) *result;
1822 struct mem_region *last_one, *this_one;
1823 int ix;
1824 struct target_ops *t;
1825
1826 if (targetdebug)
1827 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1828
1829 for (t = current_target.beneath; t != NULL; t = t->beneath)
1830 if (t->to_memory_map != NULL)
1831 break;
1832
1833 if (t == NULL)
1834 return NULL;
1835
1836 result = t->to_memory_map (t);
1837 if (result == NULL)
1838 return NULL;
1839
1840 qsort (VEC_address (mem_region_s, result),
1841 VEC_length (mem_region_s, result),
1842 sizeof (struct mem_region), mem_region_cmp);
1843
1844 /* Check that regions do not overlap. Simultaneously assign
1845 a numbering for the "mem" commands to use to refer to
1846 each region. */
1847 last_one = NULL;
1848 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1849 {
1850 this_one->number = ix;
1851
1852 if (last_one && last_one->hi > this_one->lo)
1853 {
1854 warning (_("Overlapping regions in memory map: ignoring"));
1855 VEC_free (mem_region_s, result);
1856 return NULL;
1857 }
1858 last_one = this_one;
1859 }
1860
1861 return result;
1862 }
1863
1864 void
1865 target_flash_erase (ULONGEST address, LONGEST length)
1866 {
1867 struct target_ops *t;
1868
1869 for (t = current_target.beneath; t != NULL; t = t->beneath)
1870 if (t->to_flash_erase != NULL)
1871 {
1872 if (targetdebug)
1873 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1874 hex_string (address), phex (length, 0));
1875 t->to_flash_erase (t, address, length);
1876 return;
1877 }
1878
1879 tcomplain ();
1880 }
1881
1882 void
1883 target_flash_done (void)
1884 {
1885 struct target_ops *t;
1886
1887 for (t = current_target.beneath; t != NULL; t = t->beneath)
1888 if (t->to_flash_done != NULL)
1889 {
1890 if (targetdebug)
1891 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1892 t->to_flash_done (t);
1893 return;
1894 }
1895
1896 tcomplain ();
1897 }
1898
1899 static void
1900 show_trust_readonly (struct ui_file *file, int from_tty,
1901 struct cmd_list_element *c, const char *value)
1902 {
1903 fprintf_filtered (file,
1904 _("Mode for reading from readonly sections is %s.\n"),
1905 value);
1906 }
1907
1908 /* More generic transfers. */
1909
1910 static LONGEST
1911 default_xfer_partial (struct target_ops *ops, enum target_object object,
1912 const char *annex, gdb_byte *readbuf,
1913 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1914 {
1915 if (object == TARGET_OBJECT_MEMORY
1916 && ops->deprecated_xfer_memory != NULL)
1917 /* If available, fall back to the target's
1918 "deprecated_xfer_memory" method. */
1919 {
1920 int xfered = -1;
1921
1922 errno = 0;
1923 if (writebuf != NULL)
1924 {
1925 void *buffer = xmalloc (len);
1926 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1927
1928 memcpy (buffer, writebuf, len);
1929 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1930 1/*write*/, NULL, ops);
1931 do_cleanups (cleanup);
1932 }
1933 if (readbuf != NULL)
1934 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1935 0/*read*/, NULL, ops);
1936 if (xfered > 0)
1937 return xfered;
1938 else if (xfered == 0 && errno == 0)
1939 /* "deprecated_xfer_memory" uses 0, cross checked against
1940 ERRNO as one indication of an error. */
1941 return 0;
1942 else
1943 return -1;
1944 }
1945 else if (ops->beneath != NULL)
1946 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1947 readbuf, writebuf, offset, len);
1948 else
1949 return -1;
1950 }
1951
1952 /* The xfer_partial handler for the topmost target. Unlike the default,
1953 it does not need to handle memory specially; it just passes all
1954 requests down the stack. */
1955
1956 static LONGEST
1957 current_xfer_partial (struct target_ops *ops, enum target_object object,
1958 const char *annex, gdb_byte *readbuf,
1959 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1960 {
1961 if (ops->beneath != NULL)
1962 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1963 readbuf, writebuf, offset, len);
1964 else
1965 return -1;
1966 }
1967
1968 /* Target vector read/write partial wrapper functions. */
1969
1970 static LONGEST
1971 target_read_partial (struct target_ops *ops,
1972 enum target_object object,
1973 const char *annex, gdb_byte *buf,
1974 ULONGEST offset, LONGEST len)
1975 {
1976 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1977 }
1978
1979 static LONGEST
1980 target_write_partial (struct target_ops *ops,
1981 enum target_object object,
1982 const char *annex, const gdb_byte *buf,
1983 ULONGEST offset, LONGEST len)
1984 {
1985 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1986 }
1987
1988 /* Wrappers to perform the full transfer. */
1989
1990 /* For docs on target_read see target.h. */
1991
1992 LONGEST
1993 target_read (struct target_ops *ops,
1994 enum target_object object,
1995 const char *annex, gdb_byte *buf,
1996 ULONGEST offset, LONGEST len)
1997 {
1998 LONGEST xfered = 0;
1999
2000 while (xfered < len)
2001 {
2002 LONGEST xfer = target_read_partial (ops, object, annex,
2003 (gdb_byte *) buf + xfered,
2004 offset + xfered, len - xfered);
2005
2006 /* Call an observer, notifying them of the xfer progress? */
2007 if (xfer == 0)
2008 return xfered;
2009 if (xfer < 0)
2010 return -1;
2011 xfered += xfer;
2012 QUIT;
2013 }
2014 return len;
2015 }
2016
2017 /* Assuming that the entire [begin, end) range of memory cannot be
2018 read, try to read whatever subrange is possible to read.
2019
2020 The function returns, in RESULT, either zero or one memory block.
2021 If there's a readable subrange at the beginning, it is completely
2022 read and returned. Any further readable subrange will not be read.
2023 Otherwise, if there's a readable subrange at the end, it will be
2024 completely read and returned. Any readable subranges before it
2025 (obviously, not starting at the beginning), will be ignored. In
2026 other cases -- either no readable subrange, or readable subrange(s)
2027 that is neither at the beginning, or end, nothing is returned.
2028
2029 The purpose of this function is to handle a read across a boundary
2030 of accessible memory in a case when memory map is not available.
2031 The above restrictions are fine for this case, but will give
2032 incorrect results if the memory is 'patchy'. However, supporting
2033 'patchy' memory would require trying to read every single byte,
2034 and it seems unacceptable solution. Explicit memory map is
2035 recommended for this case -- and target_read_memory_robust will
2036 take care of reading multiple ranges then. */
2037
2038 static void
2039 read_whatever_is_readable (struct target_ops *ops,
2040 ULONGEST begin, ULONGEST end,
2041 VEC(memory_read_result_s) **result)
2042 {
2043 gdb_byte *buf = xmalloc (end - begin);
2044 ULONGEST current_begin = begin;
2045 ULONGEST current_end = end;
2046 int forward;
2047 memory_read_result_s r;
2048
2049 /* If we previously failed to read 1 byte, nothing can be done here. */
2050 if (end - begin <= 1)
2051 {
2052 xfree (buf);
2053 return;
2054 }
2055
2056 /* Check that either first or the last byte is readable, and give up
2057 if not. This heuristic is meant to permit reading accessible memory
2058 at the boundary of accessible region. */
2059 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2060 buf, begin, 1) == 1)
2061 {
2062 forward = 1;
2063 ++current_begin;
2064 }
2065 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2066 buf + (end-begin) - 1, end - 1, 1) == 1)
2067 {
2068 forward = 0;
2069 --current_end;
2070 }
2071 else
2072 {
2073 xfree (buf);
2074 return;
2075 }
2076
2077 /* Loop invariant is that the [current_begin, current_end) was previously
2078 found to be not readable as a whole.
2079
2080 Note loop condition -- if the range has 1 byte, we can't divide the range
2081 so there's no point trying further. */
2082 while (current_end - current_begin > 1)
2083 {
2084 ULONGEST first_half_begin, first_half_end;
2085 ULONGEST second_half_begin, second_half_end;
2086 LONGEST xfer;
2087 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2088
2089 if (forward)
2090 {
2091 first_half_begin = current_begin;
2092 first_half_end = middle;
2093 second_half_begin = middle;
2094 second_half_end = current_end;
2095 }
2096 else
2097 {
2098 first_half_begin = middle;
2099 first_half_end = current_end;
2100 second_half_begin = current_begin;
2101 second_half_end = middle;
2102 }
2103
2104 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2105 buf + (first_half_begin - begin),
2106 first_half_begin,
2107 first_half_end - first_half_begin);
2108
2109 if (xfer == first_half_end - first_half_begin)
2110 {
2111 /* This half reads up fine. So, the error must be in the
2112 other half. */
2113 current_begin = second_half_begin;
2114 current_end = second_half_end;
2115 }
2116 else
2117 {
2118 /* This half is not readable. Because we've tried one byte, we
2119 know some part of this half if actually redable. Go to the next
2120 iteration to divide again and try to read.
2121
2122 We don't handle the other half, because this function only tries
2123 to read a single readable subrange. */
2124 current_begin = first_half_begin;
2125 current_end = first_half_end;
2126 }
2127 }
2128
2129 if (forward)
2130 {
2131 /* The [begin, current_begin) range has been read. */
2132 r.begin = begin;
2133 r.end = current_begin;
2134 r.data = buf;
2135 }
2136 else
2137 {
2138 /* The [current_end, end) range has been read. */
2139 LONGEST rlen = end - current_end;
2140
2141 r.data = xmalloc (rlen);
2142 memcpy (r.data, buf + current_end - begin, rlen);
2143 r.begin = current_end;
2144 r.end = end;
2145 xfree (buf);
2146 }
2147 VEC_safe_push(memory_read_result_s, (*result), &r);
2148 }
2149
2150 void
2151 free_memory_read_result_vector (void *x)
2152 {
2153 VEC(memory_read_result_s) *v = x;
2154 memory_read_result_s *current;
2155 int ix;
2156
2157 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2158 {
2159 xfree (current->data);
2160 }
2161 VEC_free (memory_read_result_s, v);
2162 }
2163
2164 VEC(memory_read_result_s) *
2165 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2166 {
2167 VEC(memory_read_result_s) *result = 0;
2168
2169 LONGEST xfered = 0;
2170 while (xfered < len)
2171 {
2172 struct mem_region *region = lookup_mem_region (offset + xfered);
2173 LONGEST rlen;
2174
2175 /* If there is no explicit region, a fake one should be created. */
2176 gdb_assert (region);
2177
2178 if (region->hi == 0)
2179 rlen = len - xfered;
2180 else
2181 rlen = region->hi - offset;
2182
2183 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2184 {
2185 /* Cannot read this region. Note that we can end up here only
2186 if the region is explicitly marked inaccessible, or
2187 'inaccessible-by-default' is in effect. */
2188 xfered += rlen;
2189 }
2190 else
2191 {
2192 LONGEST to_read = min (len - xfered, rlen);
2193 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2194
2195 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2196 (gdb_byte *) buffer,
2197 offset + xfered, to_read);
2198 /* Call an observer, notifying them of the xfer progress? */
2199 if (xfer <= 0)
2200 {
2201 /* Got an error reading full chunk. See if maybe we can read
2202 some subrange. */
2203 xfree (buffer);
2204 read_whatever_is_readable (ops, offset + xfered,
2205 offset + xfered + to_read, &result);
2206 xfered += to_read;
2207 }
2208 else
2209 {
2210 struct memory_read_result r;
2211 r.data = buffer;
2212 r.begin = offset + xfered;
2213 r.end = r.begin + xfer;
2214 VEC_safe_push (memory_read_result_s, result, &r);
2215 xfered += xfer;
2216 }
2217 QUIT;
2218 }
2219 }
2220 return result;
2221 }
2222
2223
2224 /* An alternative to target_write with progress callbacks. */
2225
2226 LONGEST
2227 target_write_with_progress (struct target_ops *ops,
2228 enum target_object object,
2229 const char *annex, const gdb_byte *buf,
2230 ULONGEST offset, LONGEST len,
2231 void (*progress) (ULONGEST, void *), void *baton)
2232 {
2233 LONGEST xfered = 0;
2234
2235 /* Give the progress callback a chance to set up. */
2236 if (progress)
2237 (*progress) (0, baton);
2238
2239 while (xfered < len)
2240 {
2241 LONGEST xfer = target_write_partial (ops, object, annex,
2242 (gdb_byte *) buf + xfered,
2243 offset + xfered, len - xfered);
2244
2245 if (xfer == 0)
2246 return xfered;
2247 if (xfer < 0)
2248 return -1;
2249
2250 if (progress)
2251 (*progress) (xfer, baton);
2252
2253 xfered += xfer;
2254 QUIT;
2255 }
2256 return len;
2257 }
2258
2259 /* For docs on target_write see target.h. */
2260
2261 LONGEST
2262 target_write (struct target_ops *ops,
2263 enum target_object object,
2264 const char *annex, const gdb_byte *buf,
2265 ULONGEST offset, LONGEST len)
2266 {
2267 return target_write_with_progress (ops, object, annex, buf, offset, len,
2268 NULL, NULL);
2269 }
2270
2271 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2272 the size of the transferred data. PADDING additional bytes are
2273 available in *BUF_P. This is a helper function for
2274 target_read_alloc; see the declaration of that function for more
2275 information. */
2276
2277 static LONGEST
2278 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2279 const char *annex, gdb_byte **buf_p, int padding)
2280 {
2281 size_t buf_alloc, buf_pos;
2282 gdb_byte *buf;
2283 LONGEST n;
2284
2285 /* This function does not have a length parameter; it reads the
2286 entire OBJECT). Also, it doesn't support objects fetched partly
2287 from one target and partly from another (in a different stratum,
2288 e.g. a core file and an executable). Both reasons make it
2289 unsuitable for reading memory. */
2290 gdb_assert (object != TARGET_OBJECT_MEMORY);
2291
2292 /* Start by reading up to 4K at a time. The target will throttle
2293 this number down if necessary. */
2294 buf_alloc = 4096;
2295 buf = xmalloc (buf_alloc);
2296 buf_pos = 0;
2297 while (1)
2298 {
2299 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2300 buf_pos, buf_alloc - buf_pos - padding);
2301 if (n < 0)
2302 {
2303 /* An error occurred. */
2304 xfree (buf);
2305 return -1;
2306 }
2307 else if (n == 0)
2308 {
2309 /* Read all there was. */
2310 if (buf_pos == 0)
2311 xfree (buf);
2312 else
2313 *buf_p = buf;
2314 return buf_pos;
2315 }
2316
2317 buf_pos += n;
2318
2319 /* If the buffer is filling up, expand it. */
2320 if (buf_alloc < buf_pos * 2)
2321 {
2322 buf_alloc *= 2;
2323 buf = xrealloc (buf, buf_alloc);
2324 }
2325
2326 QUIT;
2327 }
2328 }
2329
2330 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2331 the size of the transferred data. See the declaration in "target.h"
2332 function for more information about the return value. */
2333
2334 LONGEST
2335 target_read_alloc (struct target_ops *ops, enum target_object object,
2336 const char *annex, gdb_byte **buf_p)
2337 {
2338 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2339 }
2340
2341 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2342 returned as a string, allocated using xmalloc. If an error occurs
2343 or the transfer is unsupported, NULL is returned. Empty objects
2344 are returned as allocated but empty strings. A warning is issued
2345 if the result contains any embedded NUL bytes. */
2346
2347 char *
2348 target_read_stralloc (struct target_ops *ops, enum target_object object,
2349 const char *annex)
2350 {
2351 gdb_byte *buffer;
2352 LONGEST i, transferred;
2353
2354 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2355
2356 if (transferred < 0)
2357 return NULL;
2358
2359 if (transferred == 0)
2360 return xstrdup ("");
2361
2362 buffer[transferred] = 0;
2363
2364 /* Check for embedded NUL bytes; but allow trailing NULs. */
2365 for (i = strlen (buffer); i < transferred; i++)
2366 if (buffer[i] != 0)
2367 {
2368 warning (_("target object %d, annex %s, "
2369 "contained unexpected null characters"),
2370 (int) object, annex ? annex : "(none)");
2371 break;
2372 }
2373
2374 return (char *) buffer;
2375 }
2376
2377 /* Memory transfer methods. */
2378
2379 void
2380 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2381 LONGEST len)
2382 {
2383 /* This method is used to read from an alternate, non-current
2384 target. This read must bypass the overlay support (as symbols
2385 don't match this target), and GDB's internal cache (wrong cache
2386 for this target). */
2387 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2388 != len)
2389 memory_error (EIO, addr);
2390 }
2391
2392 ULONGEST
2393 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2394 int len, enum bfd_endian byte_order)
2395 {
2396 gdb_byte buf[sizeof (ULONGEST)];
2397
2398 gdb_assert (len <= sizeof (buf));
2399 get_target_memory (ops, addr, buf, len);
2400 return extract_unsigned_integer (buf, len, byte_order);
2401 }
2402
2403 int
2404 target_insert_breakpoint (struct gdbarch *gdbarch,
2405 struct bp_target_info *bp_tgt)
2406 {
2407 if (!may_insert_breakpoints)
2408 {
2409 warning (_("May not insert breakpoints"));
2410 return 1;
2411 }
2412
2413 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2414 }
2415
2416 int
2417 target_remove_breakpoint (struct gdbarch *gdbarch,
2418 struct bp_target_info *bp_tgt)
2419 {
2420 /* This is kind of a weird case to handle, but the permission might
2421 have been changed after breakpoints were inserted - in which case
2422 we should just take the user literally and assume that any
2423 breakpoints should be left in place. */
2424 if (!may_insert_breakpoints)
2425 {
2426 warning (_("May not remove breakpoints"));
2427 return 1;
2428 }
2429
2430 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2431 }
2432
2433 static void
2434 target_info (char *args, int from_tty)
2435 {
2436 struct target_ops *t;
2437 int has_all_mem = 0;
2438
2439 if (symfile_objfile != NULL)
2440 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2441
2442 for (t = target_stack; t != NULL; t = t->beneath)
2443 {
2444 if (!(*t->to_has_memory) (t))
2445 continue;
2446
2447 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2448 continue;
2449 if (has_all_mem)
2450 printf_unfiltered (_("\tWhile running this, "
2451 "GDB does not access memory from...\n"));
2452 printf_unfiltered ("%s:\n", t->to_longname);
2453 (t->to_files_info) (t);
2454 has_all_mem = (*t->to_has_all_memory) (t);
2455 }
2456 }
2457
2458 /* This function is called before any new inferior is created, e.g.
2459 by running a program, attaching, or connecting to a target.
2460 It cleans up any state from previous invocations which might
2461 change between runs. This is a subset of what target_preopen
2462 resets (things which might change between targets). */
2463
2464 void
2465 target_pre_inferior (int from_tty)
2466 {
2467 /* Clear out solib state. Otherwise the solib state of the previous
2468 inferior might have survived and is entirely wrong for the new
2469 target. This has been observed on GNU/Linux using glibc 2.3. How
2470 to reproduce:
2471
2472 bash$ ./foo&
2473 [1] 4711
2474 bash$ ./foo&
2475 [1] 4712
2476 bash$ gdb ./foo
2477 [...]
2478 (gdb) attach 4711
2479 (gdb) detach
2480 (gdb) attach 4712
2481 Cannot access memory at address 0xdeadbeef
2482 */
2483
2484 /* In some OSs, the shared library list is the same/global/shared
2485 across inferiors. If code is shared between processes, so are
2486 memory regions and features. */
2487 if (!gdbarch_has_global_solist (target_gdbarch))
2488 {
2489 no_shared_libraries (NULL, from_tty);
2490
2491 invalidate_target_mem_regions ();
2492
2493 target_clear_description ();
2494 }
2495 }
2496
2497 /* Callback for iterate_over_inferiors. Gets rid of the given
2498 inferior. */
2499
2500 static int
2501 dispose_inferior (struct inferior *inf, void *args)
2502 {
2503 struct thread_info *thread;
2504
2505 thread = any_thread_of_process (inf->pid);
2506 if (thread)
2507 {
2508 switch_to_thread (thread->ptid);
2509
2510 /* Core inferiors actually should be detached, not killed. */
2511 if (target_has_execution)
2512 target_kill ();
2513 else
2514 target_detach (NULL, 0);
2515 }
2516
2517 return 0;
2518 }
2519
2520 /* This is to be called by the open routine before it does
2521 anything. */
2522
2523 void
2524 target_preopen (int from_tty)
2525 {
2526 dont_repeat ();
2527
2528 if (have_inferiors ())
2529 {
2530 if (!from_tty
2531 || !have_live_inferiors ()
2532 || query (_("A program is being debugged already. Kill it? ")))
2533 iterate_over_inferiors (dispose_inferior, NULL);
2534 else
2535 error (_("Program not killed."));
2536 }
2537
2538 /* Calling target_kill may remove the target from the stack. But if
2539 it doesn't (which seems like a win for UDI), remove it now. */
2540 /* Leave the exec target, though. The user may be switching from a
2541 live process to a core of the same program. */
2542 pop_all_targets_above (file_stratum, 0);
2543
2544 target_pre_inferior (from_tty);
2545 }
2546
2547 /* Detach a target after doing deferred register stores. */
2548
2549 void
2550 target_detach (char *args, int from_tty)
2551 {
2552 struct target_ops* t;
2553
2554 if (gdbarch_has_global_breakpoints (target_gdbarch))
2555 /* Don't remove global breakpoints here. They're removed on
2556 disconnection from the target. */
2557 ;
2558 else
2559 /* If we're in breakpoints-always-inserted mode, have to remove
2560 them before detaching. */
2561 remove_breakpoints_pid (PIDGET (inferior_ptid));
2562
2563 prepare_for_detach ();
2564
2565 for (t = current_target.beneath; t != NULL; t = t->beneath)
2566 {
2567 if (t->to_detach != NULL)
2568 {
2569 t->to_detach (t, args, from_tty);
2570 if (targetdebug)
2571 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2572 args, from_tty);
2573 return;
2574 }
2575 }
2576
2577 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2578 }
2579
2580 void
2581 target_disconnect (char *args, int from_tty)
2582 {
2583 struct target_ops *t;
2584
2585 /* If we're in breakpoints-always-inserted mode or if breakpoints
2586 are global across processes, we have to remove them before
2587 disconnecting. */
2588 remove_breakpoints ();
2589
2590 for (t = current_target.beneath; t != NULL; t = t->beneath)
2591 if (t->to_disconnect != NULL)
2592 {
2593 if (targetdebug)
2594 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2595 args, from_tty);
2596 t->to_disconnect (t, args, from_tty);
2597 return;
2598 }
2599
2600 tcomplain ();
2601 }
2602
2603 ptid_t
2604 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2605 {
2606 struct target_ops *t;
2607
2608 for (t = current_target.beneath; t != NULL; t = t->beneath)
2609 {
2610 if (t->to_wait != NULL)
2611 {
2612 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2613
2614 if (targetdebug)
2615 {
2616 char *status_string;
2617
2618 status_string = target_waitstatus_to_string (status);
2619 fprintf_unfiltered (gdb_stdlog,
2620 "target_wait (%d, status) = %d, %s\n",
2621 PIDGET (ptid), PIDGET (retval),
2622 status_string);
2623 xfree (status_string);
2624 }
2625
2626 return retval;
2627 }
2628 }
2629
2630 noprocess ();
2631 }
2632
2633 char *
2634 target_pid_to_str (ptid_t ptid)
2635 {
2636 struct target_ops *t;
2637
2638 for (t = current_target.beneath; t != NULL; t = t->beneath)
2639 {
2640 if (t->to_pid_to_str != NULL)
2641 return (*t->to_pid_to_str) (t, ptid);
2642 }
2643
2644 return normal_pid_to_str (ptid);
2645 }
2646
2647 char *
2648 target_thread_name (struct thread_info *info)
2649 {
2650 struct target_ops *t;
2651
2652 for (t = current_target.beneath; t != NULL; t = t->beneath)
2653 {
2654 if (t->to_thread_name != NULL)
2655 return (*t->to_thread_name) (info);
2656 }
2657
2658 return NULL;
2659 }
2660
2661 void
2662 target_resume (ptid_t ptid, int step, enum target_signal signal)
2663 {
2664 struct target_ops *t;
2665
2666 target_dcache_invalidate ();
2667
2668 for (t = current_target.beneath; t != NULL; t = t->beneath)
2669 {
2670 if (t->to_resume != NULL)
2671 {
2672 t->to_resume (t, ptid, step, signal);
2673 if (targetdebug)
2674 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2675 PIDGET (ptid),
2676 step ? "step" : "continue",
2677 target_signal_to_name (signal));
2678
2679 registers_changed_ptid (ptid);
2680 set_executing (ptid, 1);
2681 set_running (ptid, 1);
2682 clear_inline_frame_state (ptid);
2683 return;
2684 }
2685 }
2686
2687 noprocess ();
2688 }
2689
2690 void
2691 target_pass_signals (int numsigs, unsigned char *pass_signals)
2692 {
2693 struct target_ops *t;
2694
2695 for (t = current_target.beneath; t != NULL; t = t->beneath)
2696 {
2697 if (t->to_pass_signals != NULL)
2698 {
2699 if (targetdebug)
2700 {
2701 int i;
2702
2703 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2704 numsigs);
2705
2706 for (i = 0; i < numsigs; i++)
2707 if (pass_signals[i])
2708 fprintf_unfiltered (gdb_stdlog, " %s",
2709 target_signal_to_name (i));
2710
2711 fprintf_unfiltered (gdb_stdlog, " })\n");
2712 }
2713
2714 (*t->to_pass_signals) (numsigs, pass_signals);
2715 return;
2716 }
2717 }
2718 }
2719
2720 /* Look through the list of possible targets for a target that can
2721 follow forks. */
2722
2723 int
2724 target_follow_fork (int follow_child)
2725 {
2726 struct target_ops *t;
2727
2728 for (t = current_target.beneath; t != NULL; t = t->beneath)
2729 {
2730 if (t->to_follow_fork != NULL)
2731 {
2732 int retval = t->to_follow_fork (t, follow_child);
2733
2734 if (targetdebug)
2735 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2736 follow_child, retval);
2737 return retval;
2738 }
2739 }
2740
2741 /* Some target returned a fork event, but did not know how to follow it. */
2742 internal_error (__FILE__, __LINE__,
2743 _("could not find a target to follow fork"));
2744 }
2745
2746 void
2747 target_mourn_inferior (void)
2748 {
2749 struct target_ops *t;
2750
2751 for (t = current_target.beneath; t != NULL; t = t->beneath)
2752 {
2753 if (t->to_mourn_inferior != NULL)
2754 {
2755 t->to_mourn_inferior (t);
2756 if (targetdebug)
2757 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2758
2759 /* We no longer need to keep handles on any of the object files.
2760 Make sure to release them to avoid unnecessarily locking any
2761 of them while we're not actually debugging. */
2762 bfd_cache_close_all ();
2763
2764 return;
2765 }
2766 }
2767
2768 internal_error (__FILE__, __LINE__,
2769 _("could not find a target to follow mourn inferior"));
2770 }
2771
2772 /* Look for a target which can describe architectural features, starting
2773 from TARGET. If we find one, return its description. */
2774
2775 const struct target_desc *
2776 target_read_description (struct target_ops *target)
2777 {
2778 struct target_ops *t;
2779
2780 for (t = target; t != NULL; t = t->beneath)
2781 if (t->to_read_description != NULL)
2782 {
2783 const struct target_desc *tdesc;
2784
2785 tdesc = t->to_read_description (t);
2786 if (tdesc)
2787 return tdesc;
2788 }
2789
2790 return NULL;
2791 }
2792
2793 /* The default implementation of to_search_memory.
2794 This implements a basic search of memory, reading target memory and
2795 performing the search here (as opposed to performing the search in on the
2796 target side with, for example, gdbserver). */
2797
2798 int
2799 simple_search_memory (struct target_ops *ops,
2800 CORE_ADDR start_addr, ULONGEST search_space_len,
2801 const gdb_byte *pattern, ULONGEST pattern_len,
2802 CORE_ADDR *found_addrp)
2803 {
2804 /* NOTE: also defined in find.c testcase. */
2805 #define SEARCH_CHUNK_SIZE 16000
2806 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2807 /* Buffer to hold memory contents for searching. */
2808 gdb_byte *search_buf;
2809 unsigned search_buf_size;
2810 struct cleanup *old_cleanups;
2811
2812 search_buf_size = chunk_size + pattern_len - 1;
2813
2814 /* No point in trying to allocate a buffer larger than the search space. */
2815 if (search_space_len < search_buf_size)
2816 search_buf_size = search_space_len;
2817
2818 search_buf = malloc (search_buf_size);
2819 if (search_buf == NULL)
2820 error (_("Unable to allocate memory to perform the search."));
2821 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2822
2823 /* Prime the search buffer. */
2824
2825 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2826 search_buf, start_addr, search_buf_size) != search_buf_size)
2827 {
2828 warning (_("Unable to access target memory at %s, halting search."),
2829 hex_string (start_addr));
2830 do_cleanups (old_cleanups);
2831 return -1;
2832 }
2833
2834 /* Perform the search.
2835
2836 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2837 When we've scanned N bytes we copy the trailing bytes to the start and
2838 read in another N bytes. */
2839
2840 while (search_space_len >= pattern_len)
2841 {
2842 gdb_byte *found_ptr;
2843 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2844
2845 found_ptr = memmem (search_buf, nr_search_bytes,
2846 pattern, pattern_len);
2847
2848 if (found_ptr != NULL)
2849 {
2850 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2851
2852 *found_addrp = found_addr;
2853 do_cleanups (old_cleanups);
2854 return 1;
2855 }
2856
2857 /* Not found in this chunk, skip to next chunk. */
2858
2859 /* Don't let search_space_len wrap here, it's unsigned. */
2860 if (search_space_len >= chunk_size)
2861 search_space_len -= chunk_size;
2862 else
2863 search_space_len = 0;
2864
2865 if (search_space_len >= pattern_len)
2866 {
2867 unsigned keep_len = search_buf_size - chunk_size;
2868 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2869 int nr_to_read;
2870
2871 /* Copy the trailing part of the previous iteration to the front
2872 of the buffer for the next iteration. */
2873 gdb_assert (keep_len == pattern_len - 1);
2874 memcpy (search_buf, search_buf + chunk_size, keep_len);
2875
2876 nr_to_read = min (search_space_len - keep_len, chunk_size);
2877
2878 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2879 search_buf + keep_len, read_addr,
2880 nr_to_read) != nr_to_read)
2881 {
2882 warning (_("Unable to access target "
2883 "memory at %s, halting search."),
2884 hex_string (read_addr));
2885 do_cleanups (old_cleanups);
2886 return -1;
2887 }
2888
2889 start_addr += chunk_size;
2890 }
2891 }
2892
2893 /* Not found. */
2894
2895 do_cleanups (old_cleanups);
2896 return 0;
2897 }
2898
2899 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2900 sequence of bytes in PATTERN with length PATTERN_LEN.
2901
2902 The result is 1 if found, 0 if not found, and -1 if there was an error
2903 requiring halting of the search (e.g. memory read error).
2904 If the pattern is found the address is recorded in FOUND_ADDRP. */
2905
2906 int
2907 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2908 const gdb_byte *pattern, ULONGEST pattern_len,
2909 CORE_ADDR *found_addrp)
2910 {
2911 struct target_ops *t;
2912 int found;
2913
2914 /* We don't use INHERIT to set current_target.to_search_memory,
2915 so we have to scan the target stack and handle targetdebug
2916 ourselves. */
2917
2918 if (targetdebug)
2919 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2920 hex_string (start_addr));
2921
2922 for (t = current_target.beneath; t != NULL; t = t->beneath)
2923 if (t->to_search_memory != NULL)
2924 break;
2925
2926 if (t != NULL)
2927 {
2928 found = t->to_search_memory (t, start_addr, search_space_len,
2929 pattern, pattern_len, found_addrp);
2930 }
2931 else
2932 {
2933 /* If a special version of to_search_memory isn't available, use the
2934 simple version. */
2935 found = simple_search_memory (current_target.beneath,
2936 start_addr, search_space_len,
2937 pattern, pattern_len, found_addrp);
2938 }
2939
2940 if (targetdebug)
2941 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2942
2943 return found;
2944 }
2945
2946 /* Look through the currently pushed targets. If none of them will
2947 be able to restart the currently running process, issue an error
2948 message. */
2949
2950 void
2951 target_require_runnable (void)
2952 {
2953 struct target_ops *t;
2954
2955 for (t = target_stack; t != NULL; t = t->beneath)
2956 {
2957 /* If this target knows how to create a new program, then
2958 assume we will still be able to after killing the current
2959 one. Either killing and mourning will not pop T, or else
2960 find_default_run_target will find it again. */
2961 if (t->to_create_inferior != NULL)
2962 return;
2963
2964 /* Do not worry about thread_stratum targets that can not
2965 create inferiors. Assume they will be pushed again if
2966 necessary, and continue to the process_stratum. */
2967 if (t->to_stratum == thread_stratum
2968 || t->to_stratum == arch_stratum)
2969 continue;
2970
2971 error (_("The \"%s\" target does not support \"run\". "
2972 "Try \"help target\" or \"continue\"."),
2973 t->to_shortname);
2974 }
2975
2976 /* This function is only called if the target is running. In that
2977 case there should have been a process_stratum target and it
2978 should either know how to create inferiors, or not... */
2979 internal_error (__FILE__, __LINE__, _("No targets found"));
2980 }
2981
2982 /* Look through the list of possible targets for a target that can
2983 execute a run or attach command without any other data. This is
2984 used to locate the default process stratum.
2985
2986 If DO_MESG is not NULL, the result is always valid (error() is
2987 called for errors); else, return NULL on error. */
2988
2989 static struct target_ops *
2990 find_default_run_target (char *do_mesg)
2991 {
2992 struct target_ops **t;
2993 struct target_ops *runable = NULL;
2994 int count;
2995
2996 count = 0;
2997
2998 for (t = target_structs; t < target_structs + target_struct_size;
2999 ++t)
3000 {
3001 if ((*t)->to_can_run && target_can_run (*t))
3002 {
3003 runable = *t;
3004 ++count;
3005 }
3006 }
3007
3008 if (count != 1)
3009 {
3010 if (do_mesg)
3011 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3012 else
3013 return NULL;
3014 }
3015
3016 return runable;
3017 }
3018
3019 void
3020 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3021 {
3022 struct target_ops *t;
3023
3024 t = find_default_run_target ("attach");
3025 (t->to_attach) (t, args, from_tty);
3026 return;
3027 }
3028
3029 void
3030 find_default_create_inferior (struct target_ops *ops,
3031 char *exec_file, char *allargs, char **env,
3032 int from_tty)
3033 {
3034 struct target_ops *t;
3035
3036 t = find_default_run_target ("run");
3037 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3038 return;
3039 }
3040
3041 static int
3042 find_default_can_async_p (void)
3043 {
3044 struct target_ops *t;
3045
3046 /* This may be called before the target is pushed on the stack;
3047 look for the default process stratum. If there's none, gdb isn't
3048 configured with a native debugger, and target remote isn't
3049 connected yet. */
3050 t = find_default_run_target (NULL);
3051 if (t && t->to_can_async_p)
3052 return (t->to_can_async_p) ();
3053 return 0;
3054 }
3055
3056 static int
3057 find_default_is_async_p (void)
3058 {
3059 struct target_ops *t;
3060
3061 /* This may be called before the target is pushed on the stack;
3062 look for the default process stratum. If there's none, gdb isn't
3063 configured with a native debugger, and target remote isn't
3064 connected yet. */
3065 t = find_default_run_target (NULL);
3066 if (t && t->to_is_async_p)
3067 return (t->to_is_async_p) ();
3068 return 0;
3069 }
3070
3071 static int
3072 find_default_supports_non_stop (void)
3073 {
3074 struct target_ops *t;
3075
3076 t = find_default_run_target (NULL);
3077 if (t && t->to_supports_non_stop)
3078 return (t->to_supports_non_stop) ();
3079 return 0;
3080 }
3081
3082 int
3083 target_supports_non_stop (void)
3084 {
3085 struct target_ops *t;
3086
3087 for (t = &current_target; t != NULL; t = t->beneath)
3088 if (t->to_supports_non_stop)
3089 return t->to_supports_non_stop ();
3090
3091 return 0;
3092 }
3093
3094 /* Implement the "info proc" command. */
3095
3096 void
3097 target_info_proc (char *args, enum info_proc_what what)
3098 {
3099 struct target_ops *t;
3100
3101 /* If we're already connected to something that can get us OS
3102 related data, use it. Otherwise, try using the native
3103 target. */
3104 if (current_target.to_stratum >= process_stratum)
3105 t = current_target.beneath;
3106 else
3107 t = find_default_run_target (NULL);
3108
3109 for (; t != NULL; t = t->beneath)
3110 {
3111 if (t->to_info_proc != NULL)
3112 {
3113 t->to_info_proc (t, args, what);
3114
3115 if (targetdebug)
3116 fprintf_unfiltered (gdb_stdlog,
3117 "target_info_proc (\"%s\", %d)\n", args, what);
3118
3119 return;
3120 }
3121 }
3122
3123 error (_("Not supported on this target."));
3124 }
3125
3126 static int
3127 find_default_supports_disable_randomization (void)
3128 {
3129 struct target_ops *t;
3130
3131 t = find_default_run_target (NULL);
3132 if (t && t->to_supports_disable_randomization)
3133 return (t->to_supports_disable_randomization) ();
3134 return 0;
3135 }
3136
3137 int
3138 target_supports_disable_randomization (void)
3139 {
3140 struct target_ops *t;
3141
3142 for (t = &current_target; t != NULL; t = t->beneath)
3143 if (t->to_supports_disable_randomization)
3144 return t->to_supports_disable_randomization ();
3145
3146 return 0;
3147 }
3148
3149 char *
3150 target_get_osdata (const char *type)
3151 {
3152 struct target_ops *t;
3153
3154 /* If we're already connected to something that can get us OS
3155 related data, use it. Otherwise, try using the native
3156 target. */
3157 if (current_target.to_stratum >= process_stratum)
3158 t = current_target.beneath;
3159 else
3160 t = find_default_run_target ("get OS data");
3161
3162 if (!t)
3163 return NULL;
3164
3165 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3166 }
3167
3168 /* Determine the current address space of thread PTID. */
3169
3170 struct address_space *
3171 target_thread_address_space (ptid_t ptid)
3172 {
3173 struct address_space *aspace;
3174 struct inferior *inf;
3175 struct target_ops *t;
3176
3177 for (t = current_target.beneath; t != NULL; t = t->beneath)
3178 {
3179 if (t->to_thread_address_space != NULL)
3180 {
3181 aspace = t->to_thread_address_space (t, ptid);
3182 gdb_assert (aspace);
3183
3184 if (targetdebug)
3185 fprintf_unfiltered (gdb_stdlog,
3186 "target_thread_address_space (%s) = %d\n",
3187 target_pid_to_str (ptid),
3188 address_space_num (aspace));
3189 return aspace;
3190 }
3191 }
3192
3193 /* Fall-back to the "main" address space of the inferior. */
3194 inf = find_inferior_pid (ptid_get_pid (ptid));
3195
3196 if (inf == NULL || inf->aspace == NULL)
3197 internal_error (__FILE__, __LINE__,
3198 _("Can't determine the current "
3199 "address space of thread %s\n"),
3200 target_pid_to_str (ptid));
3201
3202 return inf->aspace;
3203 }
3204
3205
3206 /* Target file operations. */
3207
3208 static struct target_ops *
3209 default_fileio_target (void)
3210 {
3211 /* If we're already connected to something that can perform
3212 file I/O, use it. Otherwise, try using the native target. */
3213 if (current_target.to_stratum >= process_stratum)
3214 return current_target.beneath;
3215 else
3216 return find_default_run_target ("file I/O");
3217 }
3218
3219 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3220 target file descriptor, or -1 if an error occurs (and set
3221 *TARGET_ERRNO). */
3222 int
3223 target_fileio_open (const char *filename, int flags, int mode,
3224 int *target_errno)
3225 {
3226 struct target_ops *t;
3227
3228 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3229 {
3230 if (t->to_fileio_open != NULL)
3231 {
3232 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3233
3234 if (targetdebug)
3235 fprintf_unfiltered (gdb_stdlog,
3236 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3237 filename, flags, mode,
3238 fd, fd != -1 ? 0 : *target_errno);
3239 return fd;
3240 }
3241 }
3242
3243 *target_errno = FILEIO_ENOSYS;
3244 return -1;
3245 }
3246
3247 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3248 Return the number of bytes written, or -1 if an error occurs
3249 (and set *TARGET_ERRNO). */
3250 int
3251 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3252 ULONGEST offset, int *target_errno)
3253 {
3254 struct target_ops *t;
3255
3256 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3257 {
3258 if (t->to_fileio_pwrite != NULL)
3259 {
3260 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3261 target_errno);
3262
3263 if (targetdebug)
3264 fprintf_unfiltered (gdb_stdlog,
3265 "target_fileio_pwrite (%d,...,%d,%s) "
3266 "= %d (%d)\n",
3267 fd, len, pulongest (offset),
3268 ret, ret != -1 ? 0 : *target_errno);
3269 return ret;
3270 }
3271 }
3272
3273 *target_errno = FILEIO_ENOSYS;
3274 return -1;
3275 }
3276
3277 /* Read up to LEN bytes FD on the target into READ_BUF.
3278 Return the number of bytes read, or -1 if an error occurs
3279 (and set *TARGET_ERRNO). */
3280 int
3281 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3282 ULONGEST offset, int *target_errno)
3283 {
3284 struct target_ops *t;
3285
3286 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3287 {
3288 if (t->to_fileio_pread != NULL)
3289 {
3290 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3291 target_errno);
3292
3293 if (targetdebug)
3294 fprintf_unfiltered (gdb_stdlog,
3295 "target_fileio_pread (%d,...,%d,%s) "
3296 "= %d (%d)\n",
3297 fd, len, pulongest (offset),
3298 ret, ret != -1 ? 0 : *target_errno);
3299 return ret;
3300 }
3301 }
3302
3303 *target_errno = FILEIO_ENOSYS;
3304 return -1;
3305 }
3306
3307 /* Close FD on the target. Return 0, or -1 if an error occurs
3308 (and set *TARGET_ERRNO). */
3309 int
3310 target_fileio_close (int fd, int *target_errno)
3311 {
3312 struct target_ops *t;
3313
3314 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3315 {
3316 if (t->to_fileio_close != NULL)
3317 {
3318 int ret = t->to_fileio_close (fd, target_errno);
3319
3320 if (targetdebug)
3321 fprintf_unfiltered (gdb_stdlog,
3322 "target_fileio_close (%d) = %d (%d)\n",
3323 fd, ret, ret != -1 ? 0 : *target_errno);
3324 return ret;
3325 }
3326 }
3327
3328 *target_errno = FILEIO_ENOSYS;
3329 return -1;
3330 }
3331
3332 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3333 occurs (and set *TARGET_ERRNO). */
3334 int
3335 target_fileio_unlink (const char *filename, int *target_errno)
3336 {
3337 struct target_ops *t;
3338
3339 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3340 {
3341 if (t->to_fileio_unlink != NULL)
3342 {
3343 int ret = t->to_fileio_unlink (filename, target_errno);
3344
3345 if (targetdebug)
3346 fprintf_unfiltered (gdb_stdlog,
3347 "target_fileio_unlink (%s) = %d (%d)\n",
3348 filename, ret, ret != -1 ? 0 : *target_errno);
3349 return ret;
3350 }
3351 }
3352
3353 *target_errno = FILEIO_ENOSYS;
3354 return -1;
3355 }
3356
3357 /* Read value of symbolic link FILENAME on the target. Return a
3358 null-terminated string allocated via xmalloc, or NULL if an error
3359 occurs (and set *TARGET_ERRNO). */
3360 char *
3361 target_fileio_readlink (const char *filename, int *target_errno)
3362 {
3363 struct target_ops *t;
3364
3365 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3366 {
3367 if (t->to_fileio_readlink != NULL)
3368 {
3369 char *ret = t->to_fileio_readlink (filename, target_errno);
3370
3371 if (targetdebug)
3372 fprintf_unfiltered (gdb_stdlog,
3373 "target_fileio_readlink (%s) = %s (%d)\n",
3374 filename, ret? ret : "(nil)",
3375 ret? 0 : *target_errno);
3376 return ret;
3377 }
3378 }
3379
3380 *target_errno = FILEIO_ENOSYS;
3381 return NULL;
3382 }
3383
3384 static void
3385 target_fileio_close_cleanup (void *opaque)
3386 {
3387 int fd = *(int *) opaque;
3388 int target_errno;
3389
3390 target_fileio_close (fd, &target_errno);
3391 }
3392
3393 /* Read target file FILENAME. Store the result in *BUF_P and
3394 return the size of the transferred data. PADDING additional bytes are
3395 available in *BUF_P. This is a helper function for
3396 target_fileio_read_alloc; see the declaration of that function for more
3397 information. */
3398
3399 static LONGEST
3400 target_fileio_read_alloc_1 (const char *filename,
3401 gdb_byte **buf_p, int padding)
3402 {
3403 struct cleanup *close_cleanup;
3404 size_t buf_alloc, buf_pos;
3405 gdb_byte *buf;
3406 LONGEST n;
3407 int fd;
3408 int target_errno;
3409
3410 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3411 if (fd == -1)
3412 return -1;
3413
3414 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3415
3416 /* Start by reading up to 4K at a time. The target will throttle
3417 this number down if necessary. */
3418 buf_alloc = 4096;
3419 buf = xmalloc (buf_alloc);
3420 buf_pos = 0;
3421 while (1)
3422 {
3423 n = target_fileio_pread (fd, &buf[buf_pos],
3424 buf_alloc - buf_pos - padding, buf_pos,
3425 &target_errno);
3426 if (n < 0)
3427 {
3428 /* An error occurred. */
3429 do_cleanups (close_cleanup);
3430 xfree (buf);
3431 return -1;
3432 }
3433 else if (n == 0)
3434 {
3435 /* Read all there was. */
3436 do_cleanups (close_cleanup);
3437 if (buf_pos == 0)
3438 xfree (buf);
3439 else
3440 *buf_p = buf;
3441 return buf_pos;
3442 }
3443
3444 buf_pos += n;
3445
3446 /* If the buffer is filling up, expand it. */
3447 if (buf_alloc < buf_pos * 2)
3448 {
3449 buf_alloc *= 2;
3450 buf = xrealloc (buf, buf_alloc);
3451 }
3452
3453 QUIT;
3454 }
3455 }
3456
3457 /* Read target file FILENAME. Store the result in *BUF_P and return
3458 the size of the transferred data. See the declaration in "target.h"
3459 function for more information about the return value. */
3460
3461 LONGEST
3462 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3463 {
3464 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3465 }
3466
3467 /* Read target file FILENAME. The result is NUL-terminated and
3468 returned as a string, allocated using xmalloc. If an error occurs
3469 or the transfer is unsupported, NULL is returned. Empty objects
3470 are returned as allocated but empty strings. A warning is issued
3471 if the result contains any embedded NUL bytes. */
3472
3473 char *
3474 target_fileio_read_stralloc (const char *filename)
3475 {
3476 gdb_byte *buffer;
3477 LONGEST i, transferred;
3478
3479 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3480
3481 if (transferred < 0)
3482 return NULL;
3483
3484 if (transferred == 0)
3485 return xstrdup ("");
3486
3487 buffer[transferred] = 0;
3488
3489 /* Check for embedded NUL bytes; but allow trailing NULs. */
3490 for (i = strlen (buffer); i < transferred; i++)
3491 if (buffer[i] != 0)
3492 {
3493 warning (_("target file %s "
3494 "contained unexpected null characters"),
3495 filename);
3496 break;
3497 }
3498
3499 return (char *) buffer;
3500 }
3501
3502
3503 static int
3504 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3505 {
3506 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3507 }
3508
3509 static int
3510 default_watchpoint_addr_within_range (struct target_ops *target,
3511 CORE_ADDR addr,
3512 CORE_ADDR start, int length)
3513 {
3514 return addr >= start && addr < start + length;
3515 }
3516
3517 static struct gdbarch *
3518 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3519 {
3520 return target_gdbarch;
3521 }
3522
3523 static int
3524 return_zero (void)
3525 {
3526 return 0;
3527 }
3528
3529 static int
3530 return_one (void)
3531 {
3532 return 1;
3533 }
3534
3535 static int
3536 return_minus_one (void)
3537 {
3538 return -1;
3539 }
3540
3541 /* Find a single runnable target in the stack and return it. If for
3542 some reason there is more than one, return NULL. */
3543
3544 struct target_ops *
3545 find_run_target (void)
3546 {
3547 struct target_ops **t;
3548 struct target_ops *runable = NULL;
3549 int count;
3550
3551 count = 0;
3552
3553 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3554 {
3555 if ((*t)->to_can_run && target_can_run (*t))
3556 {
3557 runable = *t;
3558 ++count;
3559 }
3560 }
3561
3562 return (count == 1 ? runable : NULL);
3563 }
3564
3565 /*
3566 * Find the next target down the stack from the specified target.
3567 */
3568
3569 struct target_ops *
3570 find_target_beneath (struct target_ops *t)
3571 {
3572 return t->beneath;
3573 }
3574
3575 \f
3576 /* The inferior process has died. Long live the inferior! */
3577
3578 void
3579 generic_mourn_inferior (void)
3580 {
3581 ptid_t ptid;
3582
3583 ptid = inferior_ptid;
3584 inferior_ptid = null_ptid;
3585
3586 /* Mark breakpoints uninserted in case something tries to delete a
3587 breakpoint while we delete the inferior's threads (which would
3588 fail, since the inferior is long gone). */
3589 mark_breakpoints_out ();
3590
3591 if (!ptid_equal (ptid, null_ptid))
3592 {
3593 int pid = ptid_get_pid (ptid);
3594 exit_inferior (pid);
3595 }
3596
3597 /* Note this wipes step-resume breakpoints, so needs to be done
3598 after exit_inferior, which ends up referencing the step-resume
3599 breakpoints through clear_thread_inferior_resources. */
3600 breakpoint_init_inferior (inf_exited);
3601
3602 registers_changed ();
3603
3604 reopen_exec_file ();
3605 reinit_frame_cache ();
3606
3607 if (deprecated_detach_hook)
3608 deprecated_detach_hook ();
3609 }
3610 \f
3611 /* Helper function for child_wait and the derivatives of child_wait.
3612 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3613 translation of that in OURSTATUS. */
3614 void
3615 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
3616 {
3617 if (WIFEXITED (hoststatus))
3618 {
3619 ourstatus->kind = TARGET_WAITKIND_EXITED;
3620 ourstatus->value.integer = WEXITSTATUS (hoststatus);
3621 }
3622 else if (!WIFSTOPPED (hoststatus))
3623 {
3624 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3625 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
3626 }
3627 else
3628 {
3629 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3630 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
3631 }
3632 }
3633 \f
3634 /* Convert a normal process ID to a string. Returns the string in a
3635 static buffer. */
3636
3637 char *
3638 normal_pid_to_str (ptid_t ptid)
3639 {
3640 static char buf[32];
3641
3642 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3643 return buf;
3644 }
3645
3646 static char *
3647 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3648 {
3649 return normal_pid_to_str (ptid);
3650 }
3651
3652 /* Error-catcher for target_find_memory_regions. */
3653 static int
3654 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3655 {
3656 error (_("Command not implemented for this target."));
3657 return 0;
3658 }
3659
3660 /* Error-catcher for target_make_corefile_notes. */
3661 static char *
3662 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3663 {
3664 error (_("Command not implemented for this target."));
3665 return NULL;
3666 }
3667
3668 /* Error-catcher for target_get_bookmark. */
3669 static gdb_byte *
3670 dummy_get_bookmark (char *ignore1, int ignore2)
3671 {
3672 tcomplain ();
3673 return NULL;
3674 }
3675
3676 /* Error-catcher for target_goto_bookmark. */
3677 static void
3678 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3679 {
3680 tcomplain ();
3681 }
3682
3683 /* Set up the handful of non-empty slots needed by the dummy target
3684 vector. */
3685
3686 static void
3687 init_dummy_target (void)
3688 {
3689 dummy_target.to_shortname = "None";
3690 dummy_target.to_longname = "None";
3691 dummy_target.to_doc = "";
3692 dummy_target.to_attach = find_default_attach;
3693 dummy_target.to_detach =
3694 (void (*)(struct target_ops *, char *, int))target_ignore;
3695 dummy_target.to_create_inferior = find_default_create_inferior;
3696 dummy_target.to_can_async_p = find_default_can_async_p;
3697 dummy_target.to_is_async_p = find_default_is_async_p;
3698 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3699 dummy_target.to_supports_disable_randomization
3700 = find_default_supports_disable_randomization;
3701 dummy_target.to_pid_to_str = dummy_pid_to_str;
3702 dummy_target.to_stratum = dummy_stratum;
3703 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3704 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3705 dummy_target.to_get_bookmark = dummy_get_bookmark;
3706 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3707 dummy_target.to_xfer_partial = default_xfer_partial;
3708 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3709 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3710 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3711 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3712 dummy_target.to_has_execution
3713 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3714 dummy_target.to_stopped_by_watchpoint = return_zero;
3715 dummy_target.to_stopped_data_address =
3716 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3717 dummy_target.to_magic = OPS_MAGIC;
3718 }
3719 \f
3720 static void
3721 debug_to_open (char *args, int from_tty)
3722 {
3723 debug_target.to_open (args, from_tty);
3724
3725 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3726 }
3727
3728 void
3729 target_close (struct target_ops *targ, int quitting)
3730 {
3731 if (targ->to_xclose != NULL)
3732 targ->to_xclose (targ, quitting);
3733 else if (targ->to_close != NULL)
3734 targ->to_close (quitting);
3735
3736 if (targetdebug)
3737 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3738 }
3739
3740 void
3741 target_attach (char *args, int from_tty)
3742 {
3743 struct target_ops *t;
3744
3745 for (t = current_target.beneath; t != NULL; t = t->beneath)
3746 {
3747 if (t->to_attach != NULL)
3748 {
3749 t->to_attach (t, args, from_tty);
3750 if (targetdebug)
3751 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3752 args, from_tty);
3753 return;
3754 }
3755 }
3756
3757 internal_error (__FILE__, __LINE__,
3758 _("could not find a target to attach"));
3759 }
3760
3761 int
3762 target_thread_alive (ptid_t ptid)
3763 {
3764 struct target_ops *t;
3765
3766 for (t = current_target.beneath; t != NULL; t = t->beneath)
3767 {
3768 if (t->to_thread_alive != NULL)
3769 {
3770 int retval;
3771
3772 retval = t->to_thread_alive (t, ptid);
3773 if (targetdebug)
3774 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3775 PIDGET (ptid), retval);
3776
3777 return retval;
3778 }
3779 }
3780
3781 return 0;
3782 }
3783
3784 void
3785 target_find_new_threads (void)
3786 {
3787 struct target_ops *t;
3788
3789 for (t = current_target.beneath; t != NULL; t = t->beneath)
3790 {
3791 if (t->to_find_new_threads != NULL)
3792 {
3793 t->to_find_new_threads (t);
3794 if (targetdebug)
3795 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3796
3797 return;
3798 }
3799 }
3800 }
3801
3802 void
3803 target_stop (ptid_t ptid)
3804 {
3805 if (!may_stop)
3806 {
3807 warning (_("May not interrupt or stop the target, ignoring attempt"));
3808 return;
3809 }
3810
3811 (*current_target.to_stop) (ptid);
3812 }
3813
3814 static void
3815 debug_to_post_attach (int pid)
3816 {
3817 debug_target.to_post_attach (pid);
3818
3819 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3820 }
3821
3822 /* Return a pretty printed form of target_waitstatus.
3823 Space for the result is malloc'd, caller must free. */
3824
3825 char *
3826 target_waitstatus_to_string (const struct target_waitstatus *ws)
3827 {
3828 const char *kind_str = "status->kind = ";
3829
3830 switch (ws->kind)
3831 {
3832 case TARGET_WAITKIND_EXITED:
3833 return xstrprintf ("%sexited, status = %d",
3834 kind_str, ws->value.integer);
3835 case TARGET_WAITKIND_STOPPED:
3836 return xstrprintf ("%sstopped, signal = %s",
3837 kind_str, target_signal_to_name (ws->value.sig));
3838 case TARGET_WAITKIND_SIGNALLED:
3839 return xstrprintf ("%ssignalled, signal = %s",
3840 kind_str, target_signal_to_name (ws->value.sig));
3841 case TARGET_WAITKIND_LOADED:
3842 return xstrprintf ("%sloaded", kind_str);
3843 case TARGET_WAITKIND_FORKED:
3844 return xstrprintf ("%sforked", kind_str);
3845 case TARGET_WAITKIND_VFORKED:
3846 return xstrprintf ("%svforked", kind_str);
3847 case TARGET_WAITKIND_EXECD:
3848 return xstrprintf ("%sexecd", kind_str);
3849 case TARGET_WAITKIND_SYSCALL_ENTRY:
3850 return xstrprintf ("%sentered syscall", kind_str);
3851 case TARGET_WAITKIND_SYSCALL_RETURN:
3852 return xstrprintf ("%sexited syscall", kind_str);
3853 case TARGET_WAITKIND_SPURIOUS:
3854 return xstrprintf ("%sspurious", kind_str);
3855 case TARGET_WAITKIND_IGNORE:
3856 return xstrprintf ("%signore", kind_str);
3857 case TARGET_WAITKIND_NO_HISTORY:
3858 return xstrprintf ("%sno-history", kind_str);
3859 case TARGET_WAITKIND_NO_RESUMED:
3860 return xstrprintf ("%sno-resumed", kind_str);
3861 default:
3862 return xstrprintf ("%sunknown???", kind_str);
3863 }
3864 }
3865
3866 static void
3867 debug_print_register (const char * func,
3868 struct regcache *regcache, int regno)
3869 {
3870 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3871
3872 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3873 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3874 && gdbarch_register_name (gdbarch, regno) != NULL
3875 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3876 fprintf_unfiltered (gdb_stdlog, "(%s)",
3877 gdbarch_register_name (gdbarch, regno));
3878 else
3879 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3880 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3881 {
3882 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3883 int i, size = register_size (gdbarch, regno);
3884 unsigned char buf[MAX_REGISTER_SIZE];
3885
3886 regcache_raw_collect (regcache, regno, buf);
3887 fprintf_unfiltered (gdb_stdlog, " = ");
3888 for (i = 0; i < size; i++)
3889 {
3890 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3891 }
3892 if (size <= sizeof (LONGEST))
3893 {
3894 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3895
3896 fprintf_unfiltered (gdb_stdlog, " %s %s",
3897 core_addr_to_string_nz (val), plongest (val));
3898 }
3899 }
3900 fprintf_unfiltered (gdb_stdlog, "\n");
3901 }
3902
3903 void
3904 target_fetch_registers (struct regcache *regcache, int regno)
3905 {
3906 struct target_ops *t;
3907
3908 for (t = current_target.beneath; t != NULL; t = t->beneath)
3909 {
3910 if (t->to_fetch_registers != NULL)
3911 {
3912 t->to_fetch_registers (t, regcache, regno);
3913 if (targetdebug)
3914 debug_print_register ("target_fetch_registers", regcache, regno);
3915 return;
3916 }
3917 }
3918 }
3919
3920 void
3921 target_store_registers (struct regcache *regcache, int regno)
3922 {
3923 struct target_ops *t;
3924
3925 if (!may_write_registers)
3926 error (_("Writing to registers is not allowed (regno %d)"), regno);
3927
3928 for (t = current_target.beneath; t != NULL; t = t->beneath)
3929 {
3930 if (t->to_store_registers != NULL)
3931 {
3932 t->to_store_registers (t, regcache, regno);
3933 if (targetdebug)
3934 {
3935 debug_print_register ("target_store_registers", regcache, regno);
3936 }
3937 return;
3938 }
3939 }
3940
3941 noprocess ();
3942 }
3943
3944 int
3945 target_core_of_thread (ptid_t ptid)
3946 {
3947 struct target_ops *t;
3948
3949 for (t = current_target.beneath; t != NULL; t = t->beneath)
3950 {
3951 if (t->to_core_of_thread != NULL)
3952 {
3953 int retval = t->to_core_of_thread (t, ptid);
3954
3955 if (targetdebug)
3956 fprintf_unfiltered (gdb_stdlog,
3957 "target_core_of_thread (%d) = %d\n",
3958 PIDGET (ptid), retval);
3959 return retval;
3960 }
3961 }
3962
3963 return -1;
3964 }
3965
3966 int
3967 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3968 {
3969 struct target_ops *t;
3970
3971 for (t = current_target.beneath; t != NULL; t = t->beneath)
3972 {
3973 if (t->to_verify_memory != NULL)
3974 {
3975 int retval = t->to_verify_memory (t, data, memaddr, size);
3976
3977 if (targetdebug)
3978 fprintf_unfiltered (gdb_stdlog,
3979 "target_verify_memory (%s, %s) = %d\n",
3980 paddress (target_gdbarch, memaddr),
3981 pulongest (size),
3982 retval);
3983 return retval;
3984 }
3985 }
3986
3987 tcomplain ();
3988 }
3989
3990 /* The documentation for this function is in its prototype declaration in
3991 target.h. */
3992
3993 int
3994 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3995 {
3996 struct target_ops *t;
3997
3998 for (t = current_target.beneath; t != NULL; t = t->beneath)
3999 if (t->to_insert_mask_watchpoint != NULL)
4000 {
4001 int ret;
4002
4003 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4004
4005 if (targetdebug)
4006 fprintf_unfiltered (gdb_stdlog, "\
4007 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4008 core_addr_to_string (addr),
4009 core_addr_to_string (mask), rw, ret);
4010
4011 return ret;
4012 }
4013
4014 return 1;
4015 }
4016
4017 /* The documentation for this function is in its prototype declaration in
4018 target.h. */
4019
4020 int
4021 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4022 {
4023 struct target_ops *t;
4024
4025 for (t = current_target.beneath; t != NULL; t = t->beneath)
4026 if (t->to_remove_mask_watchpoint != NULL)
4027 {
4028 int ret;
4029
4030 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4031
4032 if (targetdebug)
4033 fprintf_unfiltered (gdb_stdlog, "\
4034 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4035 core_addr_to_string (addr),
4036 core_addr_to_string (mask), rw, ret);
4037
4038 return ret;
4039 }
4040
4041 return 1;
4042 }
4043
4044 /* The documentation for this function is in its prototype declaration
4045 in target.h. */
4046
4047 int
4048 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4049 {
4050 struct target_ops *t;
4051
4052 for (t = current_target.beneath; t != NULL; t = t->beneath)
4053 if (t->to_masked_watch_num_registers != NULL)
4054 return t->to_masked_watch_num_registers (t, addr, mask);
4055
4056 return -1;
4057 }
4058
4059 /* The documentation for this function is in its prototype declaration
4060 in target.h. */
4061
4062 int
4063 target_ranged_break_num_registers (void)
4064 {
4065 struct target_ops *t;
4066
4067 for (t = current_target.beneath; t != NULL; t = t->beneath)
4068 if (t->to_ranged_break_num_registers != NULL)
4069 return t->to_ranged_break_num_registers (t);
4070
4071 return -1;
4072 }
4073
4074 static void
4075 debug_to_prepare_to_store (struct regcache *regcache)
4076 {
4077 debug_target.to_prepare_to_store (regcache);
4078
4079 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4080 }
4081
4082 static int
4083 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4084 int write, struct mem_attrib *attrib,
4085 struct target_ops *target)
4086 {
4087 int retval;
4088
4089 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4090 attrib, target);
4091
4092 fprintf_unfiltered (gdb_stdlog,
4093 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4094 paddress (target_gdbarch, memaddr), len,
4095 write ? "write" : "read", retval);
4096
4097 if (retval > 0)
4098 {
4099 int i;
4100
4101 fputs_unfiltered (", bytes =", gdb_stdlog);
4102 for (i = 0; i < retval; i++)
4103 {
4104 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4105 {
4106 if (targetdebug < 2 && i > 0)
4107 {
4108 fprintf_unfiltered (gdb_stdlog, " ...");
4109 break;
4110 }
4111 fprintf_unfiltered (gdb_stdlog, "\n");
4112 }
4113
4114 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4115 }
4116 }
4117
4118 fputc_unfiltered ('\n', gdb_stdlog);
4119
4120 return retval;
4121 }
4122
4123 static void
4124 debug_to_files_info (struct target_ops *target)
4125 {
4126 debug_target.to_files_info (target);
4127
4128 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4129 }
4130
4131 static int
4132 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4133 struct bp_target_info *bp_tgt)
4134 {
4135 int retval;
4136
4137 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4138
4139 fprintf_unfiltered (gdb_stdlog,
4140 "target_insert_breakpoint (%s, xxx) = %ld\n",
4141 core_addr_to_string (bp_tgt->placed_address),
4142 (unsigned long) retval);
4143 return retval;
4144 }
4145
4146 static int
4147 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4148 struct bp_target_info *bp_tgt)
4149 {
4150 int retval;
4151
4152 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4153
4154 fprintf_unfiltered (gdb_stdlog,
4155 "target_remove_breakpoint (%s, xxx) = %ld\n",
4156 core_addr_to_string (bp_tgt->placed_address),
4157 (unsigned long) retval);
4158 return retval;
4159 }
4160
4161 static int
4162 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4163 {
4164 int retval;
4165
4166 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4167
4168 fprintf_unfiltered (gdb_stdlog,
4169 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4170 (unsigned long) type,
4171 (unsigned long) cnt,
4172 (unsigned long) from_tty,
4173 (unsigned long) retval);
4174 return retval;
4175 }
4176
4177 static int
4178 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4179 {
4180 CORE_ADDR retval;
4181
4182 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4183
4184 fprintf_unfiltered (gdb_stdlog,
4185 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4186 core_addr_to_string (addr), (unsigned long) len,
4187 core_addr_to_string (retval));
4188 return retval;
4189 }
4190
4191 static int
4192 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4193 struct expression *cond)
4194 {
4195 int retval;
4196
4197 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4198 rw, cond);
4199
4200 fprintf_unfiltered (gdb_stdlog,
4201 "target_can_accel_watchpoint_condition "
4202 "(%s, %d, %d, %s) = %ld\n",
4203 core_addr_to_string (addr), len, rw,
4204 host_address_to_string (cond), (unsigned long) retval);
4205 return retval;
4206 }
4207
4208 static int
4209 debug_to_stopped_by_watchpoint (void)
4210 {
4211 int retval;
4212
4213 retval = debug_target.to_stopped_by_watchpoint ();
4214
4215 fprintf_unfiltered (gdb_stdlog,
4216 "target_stopped_by_watchpoint () = %ld\n",
4217 (unsigned long) retval);
4218 return retval;
4219 }
4220
4221 static int
4222 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4223 {
4224 int retval;
4225
4226 retval = debug_target.to_stopped_data_address (target, addr);
4227
4228 fprintf_unfiltered (gdb_stdlog,
4229 "target_stopped_data_address ([%s]) = %ld\n",
4230 core_addr_to_string (*addr),
4231 (unsigned long)retval);
4232 return retval;
4233 }
4234
4235 static int
4236 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4237 CORE_ADDR addr,
4238 CORE_ADDR start, int length)
4239 {
4240 int retval;
4241
4242 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4243 start, length);
4244
4245 fprintf_filtered (gdb_stdlog,
4246 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4247 core_addr_to_string (addr), core_addr_to_string (start),
4248 length, retval);
4249 return retval;
4250 }
4251
4252 static int
4253 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4254 struct bp_target_info *bp_tgt)
4255 {
4256 int retval;
4257
4258 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4259
4260 fprintf_unfiltered (gdb_stdlog,
4261 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4262 core_addr_to_string (bp_tgt->placed_address),
4263 (unsigned long) retval);
4264 return retval;
4265 }
4266
4267 static int
4268 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4269 struct bp_target_info *bp_tgt)
4270 {
4271 int retval;
4272
4273 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4274
4275 fprintf_unfiltered (gdb_stdlog,
4276 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4277 core_addr_to_string (bp_tgt->placed_address),
4278 (unsigned long) retval);
4279 return retval;
4280 }
4281
4282 static int
4283 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4284 struct expression *cond)
4285 {
4286 int retval;
4287
4288 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4289
4290 fprintf_unfiltered (gdb_stdlog,
4291 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4292 core_addr_to_string (addr), len, type,
4293 host_address_to_string (cond), (unsigned long) retval);
4294 return retval;
4295 }
4296
4297 static int
4298 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4299 struct expression *cond)
4300 {
4301 int retval;
4302
4303 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4304
4305 fprintf_unfiltered (gdb_stdlog,
4306 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4307 core_addr_to_string (addr), len, type,
4308 host_address_to_string (cond), (unsigned long) retval);
4309 return retval;
4310 }
4311
4312 static void
4313 debug_to_terminal_init (void)
4314 {
4315 debug_target.to_terminal_init ();
4316
4317 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4318 }
4319
4320 static void
4321 debug_to_terminal_inferior (void)
4322 {
4323 debug_target.to_terminal_inferior ();
4324
4325 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4326 }
4327
4328 static void
4329 debug_to_terminal_ours_for_output (void)
4330 {
4331 debug_target.to_terminal_ours_for_output ();
4332
4333 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4334 }
4335
4336 static void
4337 debug_to_terminal_ours (void)
4338 {
4339 debug_target.to_terminal_ours ();
4340
4341 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4342 }
4343
4344 static void
4345 debug_to_terminal_save_ours (void)
4346 {
4347 debug_target.to_terminal_save_ours ();
4348
4349 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4350 }
4351
4352 static void
4353 debug_to_terminal_info (char *arg, int from_tty)
4354 {
4355 debug_target.to_terminal_info (arg, from_tty);
4356
4357 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4358 from_tty);
4359 }
4360
4361 static void
4362 debug_to_load (char *args, int from_tty)
4363 {
4364 debug_target.to_load (args, from_tty);
4365
4366 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4367 }
4368
4369 static void
4370 debug_to_post_startup_inferior (ptid_t ptid)
4371 {
4372 debug_target.to_post_startup_inferior (ptid);
4373
4374 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4375 PIDGET (ptid));
4376 }
4377
4378 static int
4379 debug_to_insert_fork_catchpoint (int pid)
4380 {
4381 int retval;
4382
4383 retval = debug_target.to_insert_fork_catchpoint (pid);
4384
4385 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4386 pid, retval);
4387
4388 return retval;
4389 }
4390
4391 static int
4392 debug_to_remove_fork_catchpoint (int pid)
4393 {
4394 int retval;
4395
4396 retval = debug_target.to_remove_fork_catchpoint (pid);
4397
4398 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4399 pid, retval);
4400
4401 return retval;
4402 }
4403
4404 static int
4405 debug_to_insert_vfork_catchpoint (int pid)
4406 {
4407 int retval;
4408
4409 retval = debug_target.to_insert_vfork_catchpoint (pid);
4410
4411 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4412 pid, retval);
4413
4414 return retval;
4415 }
4416
4417 static int
4418 debug_to_remove_vfork_catchpoint (int pid)
4419 {
4420 int retval;
4421
4422 retval = debug_target.to_remove_vfork_catchpoint (pid);
4423
4424 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4425 pid, retval);
4426
4427 return retval;
4428 }
4429
4430 static int
4431 debug_to_insert_exec_catchpoint (int pid)
4432 {
4433 int retval;
4434
4435 retval = debug_target.to_insert_exec_catchpoint (pid);
4436
4437 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4438 pid, retval);
4439
4440 return retval;
4441 }
4442
4443 static int
4444 debug_to_remove_exec_catchpoint (int pid)
4445 {
4446 int retval;
4447
4448 retval = debug_target.to_remove_exec_catchpoint (pid);
4449
4450 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4451 pid, retval);
4452
4453 return retval;
4454 }
4455
4456 static int
4457 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4458 {
4459 int has_exited;
4460
4461 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4462
4463 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4464 pid, wait_status, *exit_status, has_exited);
4465
4466 return has_exited;
4467 }
4468
4469 static int
4470 debug_to_can_run (void)
4471 {
4472 int retval;
4473
4474 retval = debug_target.to_can_run ();
4475
4476 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4477
4478 return retval;
4479 }
4480
4481 static struct gdbarch *
4482 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4483 {
4484 struct gdbarch *retval;
4485
4486 retval = debug_target.to_thread_architecture (ops, ptid);
4487
4488 fprintf_unfiltered (gdb_stdlog,
4489 "target_thread_architecture (%s) = %s [%s]\n",
4490 target_pid_to_str (ptid),
4491 host_address_to_string (retval),
4492 gdbarch_bfd_arch_info (retval)->printable_name);
4493 return retval;
4494 }
4495
4496 static void
4497 debug_to_stop (ptid_t ptid)
4498 {
4499 debug_target.to_stop (ptid);
4500
4501 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4502 target_pid_to_str (ptid));
4503 }
4504
4505 static void
4506 debug_to_rcmd (char *command,
4507 struct ui_file *outbuf)
4508 {
4509 debug_target.to_rcmd (command, outbuf);
4510 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4511 }
4512
4513 static char *
4514 debug_to_pid_to_exec_file (int pid)
4515 {
4516 char *exec_file;
4517
4518 exec_file = debug_target.to_pid_to_exec_file (pid);
4519
4520 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4521 pid, exec_file);
4522
4523 return exec_file;
4524 }
4525
4526 static void
4527 setup_target_debug (void)
4528 {
4529 memcpy (&debug_target, &current_target, sizeof debug_target);
4530
4531 current_target.to_open = debug_to_open;
4532 current_target.to_post_attach = debug_to_post_attach;
4533 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4534 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4535 current_target.to_files_info = debug_to_files_info;
4536 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4537 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4538 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4539 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4540 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4541 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4542 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4543 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4544 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4545 current_target.to_watchpoint_addr_within_range
4546 = debug_to_watchpoint_addr_within_range;
4547 current_target.to_region_ok_for_hw_watchpoint
4548 = debug_to_region_ok_for_hw_watchpoint;
4549 current_target.to_can_accel_watchpoint_condition
4550 = debug_to_can_accel_watchpoint_condition;
4551 current_target.to_terminal_init = debug_to_terminal_init;
4552 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4553 current_target.to_terminal_ours_for_output
4554 = debug_to_terminal_ours_for_output;
4555 current_target.to_terminal_ours = debug_to_terminal_ours;
4556 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4557 current_target.to_terminal_info = debug_to_terminal_info;
4558 current_target.to_load = debug_to_load;
4559 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4560 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4561 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4562 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4563 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4564 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4565 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4566 current_target.to_has_exited = debug_to_has_exited;
4567 current_target.to_can_run = debug_to_can_run;
4568 current_target.to_stop = debug_to_stop;
4569 current_target.to_rcmd = debug_to_rcmd;
4570 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4571 current_target.to_thread_architecture = debug_to_thread_architecture;
4572 }
4573 \f
4574
4575 static char targ_desc[] =
4576 "Names of targets and files being debugged.\nShows the entire \
4577 stack of targets currently in use (including the exec-file,\n\
4578 core-file, and process, if any), as well as the symbol file name.";
4579
4580 static void
4581 do_monitor_command (char *cmd,
4582 int from_tty)
4583 {
4584 if ((current_target.to_rcmd
4585 == (void (*) (char *, struct ui_file *)) tcomplain)
4586 || (current_target.to_rcmd == debug_to_rcmd
4587 && (debug_target.to_rcmd
4588 == (void (*) (char *, struct ui_file *)) tcomplain)))
4589 error (_("\"monitor\" command not supported by this target."));
4590 target_rcmd (cmd, gdb_stdtarg);
4591 }
4592
4593 /* Print the name of each layers of our target stack. */
4594
4595 static void
4596 maintenance_print_target_stack (char *cmd, int from_tty)
4597 {
4598 struct target_ops *t;
4599
4600 printf_filtered (_("The current target stack is:\n"));
4601
4602 for (t = target_stack; t != NULL; t = t->beneath)
4603 {
4604 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4605 }
4606 }
4607
4608 /* Controls if async mode is permitted. */
4609 int target_async_permitted = 0;
4610
4611 /* The set command writes to this variable. If the inferior is
4612 executing, linux_nat_async_permitted is *not* updated. */
4613 static int target_async_permitted_1 = 0;
4614
4615 static void
4616 set_maintenance_target_async_permitted (char *args, int from_tty,
4617 struct cmd_list_element *c)
4618 {
4619 if (have_live_inferiors ())
4620 {
4621 target_async_permitted_1 = target_async_permitted;
4622 error (_("Cannot change this setting while the inferior is running."));
4623 }
4624
4625 target_async_permitted = target_async_permitted_1;
4626 }
4627
4628 static void
4629 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
4630 struct cmd_list_element *c,
4631 const char *value)
4632 {
4633 fprintf_filtered (file,
4634 _("Controlling the inferior in "
4635 "asynchronous mode is %s.\n"), value);
4636 }
4637
4638 /* Temporary copies of permission settings. */
4639
4640 static int may_write_registers_1 = 1;
4641 static int may_write_memory_1 = 1;
4642 static int may_insert_breakpoints_1 = 1;
4643 static int may_insert_tracepoints_1 = 1;
4644 static int may_insert_fast_tracepoints_1 = 1;
4645 static int may_stop_1 = 1;
4646
4647 /* Make the user-set values match the real values again. */
4648
4649 void
4650 update_target_permissions (void)
4651 {
4652 may_write_registers_1 = may_write_registers;
4653 may_write_memory_1 = may_write_memory;
4654 may_insert_breakpoints_1 = may_insert_breakpoints;
4655 may_insert_tracepoints_1 = may_insert_tracepoints;
4656 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4657 may_stop_1 = may_stop;
4658 }
4659
4660 /* The one function handles (most of) the permission flags in the same
4661 way. */
4662
4663 static void
4664 set_target_permissions (char *args, int from_tty,
4665 struct cmd_list_element *c)
4666 {
4667 if (target_has_execution)
4668 {
4669 update_target_permissions ();
4670 error (_("Cannot change this setting while the inferior is running."));
4671 }
4672
4673 /* Make the real values match the user-changed values. */
4674 may_write_registers = may_write_registers_1;
4675 may_insert_breakpoints = may_insert_breakpoints_1;
4676 may_insert_tracepoints = may_insert_tracepoints_1;
4677 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4678 may_stop = may_stop_1;
4679 update_observer_mode ();
4680 }
4681
4682 /* Set memory write permission independently of observer mode. */
4683
4684 static void
4685 set_write_memory_permission (char *args, int from_tty,
4686 struct cmd_list_element *c)
4687 {
4688 /* Make the real values match the user-changed values. */
4689 may_write_memory = may_write_memory_1;
4690 update_observer_mode ();
4691 }
4692
4693
4694 void
4695 initialize_targets (void)
4696 {
4697 init_dummy_target ();
4698 push_target (&dummy_target);
4699
4700 add_info ("target", target_info, targ_desc);
4701 add_info ("files", target_info, targ_desc);
4702
4703 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4704 Set target debugging."), _("\
4705 Show target debugging."), _("\
4706 When non-zero, target debugging is enabled. Higher numbers are more\n\
4707 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4708 command."),
4709 NULL,
4710 show_targetdebug,
4711 &setdebuglist, &showdebuglist);
4712
4713 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4714 &trust_readonly, _("\
4715 Set mode for reading from readonly sections."), _("\
4716 Show mode for reading from readonly sections."), _("\
4717 When this mode is on, memory reads from readonly sections (such as .text)\n\
4718 will be read from the object file instead of from the target. This will\n\
4719 result in significant performance improvement for remote targets."),
4720 NULL,
4721 show_trust_readonly,
4722 &setlist, &showlist);
4723
4724 add_com ("monitor", class_obscure, do_monitor_command,
4725 _("Send a command to the remote monitor (remote targets only)."));
4726
4727 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4728 _("Print the name of each layer of the internal target stack."),
4729 &maintenanceprintlist);
4730
4731 add_setshow_boolean_cmd ("target-async", no_class,
4732 &target_async_permitted_1, _("\
4733 Set whether gdb controls the inferior in asynchronous mode."), _("\
4734 Show whether gdb controls the inferior in asynchronous mode."), _("\
4735 Tells gdb whether to control the inferior in asynchronous mode."),
4736 set_maintenance_target_async_permitted,
4737 show_maintenance_target_async_permitted,
4738 &setlist,
4739 &showlist);
4740
4741 add_setshow_boolean_cmd ("stack-cache", class_support,
4742 &stack_cache_enabled_p_1, _("\
4743 Set cache use for stack access."), _("\
4744 Show cache use for stack access."), _("\
4745 When on, use the data cache for all stack access, regardless of any\n\
4746 configured memory regions. This improves remote performance significantly.\n\
4747 By default, caching for stack access is on."),
4748 set_stack_cache_enabled_p,
4749 show_stack_cache_enabled_p,
4750 &setlist, &showlist);
4751
4752 add_setshow_boolean_cmd ("may-write-registers", class_support,
4753 &may_write_registers_1, _("\
4754 Set permission to write into registers."), _("\
4755 Show permission to write into registers."), _("\
4756 When this permission is on, GDB may write into the target's registers.\n\
4757 Otherwise, any sort of write attempt will result in an error."),
4758 set_target_permissions, NULL,
4759 &setlist, &showlist);
4760
4761 add_setshow_boolean_cmd ("may-write-memory", class_support,
4762 &may_write_memory_1, _("\
4763 Set permission to write into target memory."), _("\
4764 Show permission to write into target memory."), _("\
4765 When this permission is on, GDB may write into the target's memory.\n\
4766 Otherwise, any sort of write attempt will result in an error."),
4767 set_write_memory_permission, NULL,
4768 &setlist, &showlist);
4769
4770 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4771 &may_insert_breakpoints_1, _("\
4772 Set permission to insert breakpoints in the target."), _("\
4773 Show permission to insert breakpoints in the target."), _("\
4774 When this permission is on, GDB may insert breakpoints in the program.\n\
4775 Otherwise, any sort of insertion attempt will result in an error."),
4776 set_target_permissions, NULL,
4777 &setlist, &showlist);
4778
4779 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4780 &may_insert_tracepoints_1, _("\
4781 Set permission to insert tracepoints in the target."), _("\
4782 Show permission to insert tracepoints in the target."), _("\
4783 When this permission is on, GDB may insert tracepoints in the program.\n\
4784 Otherwise, any sort of insertion attempt will result in an error."),
4785 set_target_permissions, NULL,
4786 &setlist, &showlist);
4787
4788 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4789 &may_insert_fast_tracepoints_1, _("\
4790 Set permission to insert fast tracepoints in the target."), _("\
4791 Show permission to insert fast tracepoints in the target."), _("\
4792 When this permission is on, GDB may insert fast tracepoints.\n\
4793 Otherwise, any sort of insertion attempt will result in an error."),
4794 set_target_permissions, NULL,
4795 &setlist, &showlist);
4796
4797 add_setshow_boolean_cmd ("may-interrupt", class_support,
4798 &may_stop_1, _("\
4799 Set permission to interrupt or signal the target."), _("\
4800 Show permission to interrupt or signal the target."), _("\
4801 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4802 Otherwise, any attempt to interrupt or stop will be ignored."),
4803 set_target_permissions, NULL,
4804 &setlist, &showlist);
4805
4806
4807 target_dcache = dcache_init ();
4808 }
This page took 0.129747 seconds and 4 git commands to generate.