Rename "wild_match" parameter in ada-lang.c:symbol_completion_add...
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdb_wait.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 void target_ignore (void);
68
69 static void target_command (char *, int);
70
71 static struct target_ops *find_default_run_target (char *);
72
73 static LONGEST default_xfer_partial (struct target_ops *ops,
74 enum target_object object,
75 const char *annex, gdb_byte *readbuf,
76 const gdb_byte *writebuf,
77 ULONGEST offset, LONGEST len);
78
79 static LONGEST current_xfer_partial (struct target_ops *ops,
80 enum target_object object,
81 const char *annex, gdb_byte *readbuf,
82 const gdb_byte *writebuf,
83 ULONGEST offset, LONGEST len);
84
85 static LONGEST target_xfer_partial (struct target_ops *ops,
86 enum target_object object,
87 const char *annex,
88 void *readbuf, const void *writebuf,
89 ULONGEST offset, LONGEST len);
90
91 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
92 ptid_t ptid);
93
94 static void init_dummy_target (void);
95
96 static struct target_ops debug_target;
97
98 static void debug_to_open (char *, int);
99
100 static void debug_to_prepare_to_store (struct regcache *);
101
102 static void debug_to_files_info (struct target_ops *);
103
104 static int debug_to_insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *);
106
107 static int debug_to_remove_breakpoint (struct gdbarch *,
108 struct bp_target_info *);
109
110 static int debug_to_can_use_hw_breakpoint (int, int, int);
111
112 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
116 struct bp_target_info *);
117
118 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
122 struct expression *);
123
124 static int debug_to_stopped_by_watchpoint (void);
125
126 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
127
128 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
129 CORE_ADDR, CORE_ADDR, int);
130
131 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
134 struct expression *);
135
136 static void debug_to_terminal_init (void);
137
138 static void debug_to_terminal_inferior (void);
139
140 static void debug_to_terminal_ours_for_output (void);
141
142 static void debug_to_terminal_save_ours (void);
143
144 static void debug_to_terminal_ours (void);
145
146 static void debug_to_terminal_info (char *, int);
147
148 static void debug_to_load (char *, int);
149
150 static int debug_to_can_run (void);
151
152 static void debug_to_stop (ptid_t);
153
154 /* Pointer to array of target architecture structures; the size of the
155 array; the current index into the array; the allocated size of the
156 array. */
157 struct target_ops **target_structs;
158 unsigned target_struct_size;
159 unsigned target_struct_index;
160 unsigned target_struct_allocsize;
161 #define DEFAULT_ALLOCSIZE 10
162
163 /* The initial current target, so that there is always a semi-valid
164 current target. */
165
166 static struct target_ops dummy_target;
167
168 /* Top of target stack. */
169
170 static struct target_ops *target_stack;
171
172 /* The target structure we are currently using to talk to a process
173 or file or whatever "inferior" we have. */
174
175 struct target_ops current_target;
176
177 /* Command list for target. */
178
179 static struct cmd_list_element *targetlist = NULL;
180
181 /* Nonzero if we should trust readonly sections from the
182 executable when reading memory. */
183
184 static int trust_readonly = 0;
185
186 /* Nonzero if we should show true memory content including
187 memory breakpoint inserted by gdb. */
188
189 static int show_memory_breakpoints = 0;
190
191 /* These globals control whether GDB attempts to perform these
192 operations; they are useful for targets that need to prevent
193 inadvertant disruption, such as in non-stop mode. */
194
195 int may_write_registers = 1;
196
197 int may_write_memory = 1;
198
199 int may_insert_breakpoints = 1;
200
201 int may_insert_tracepoints = 1;
202
203 int may_insert_fast_tracepoints = 1;
204
205 int may_stop = 1;
206
207 /* Non-zero if we want to see trace of target level stuff. */
208
209 static int targetdebug = 0;
210 static void
211 show_targetdebug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
213 {
214 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
215 }
216
217 static void setup_target_debug (void);
218
219 /* The option sets this. */
220 static int stack_cache_enabled_p_1 = 1;
221 /* And set_stack_cache_enabled_p updates this.
222 The reason for the separation is so that we don't flush the cache for
223 on->on transitions. */
224 static int stack_cache_enabled_p = 1;
225
226 /* This is called *after* the stack-cache has been set.
227 Flush the cache for off->on and on->off transitions.
228 There's no real need to flush the cache for on->off transitions,
229 except cleanliness. */
230
231 static void
232 set_stack_cache_enabled_p (char *args, int from_tty,
233 struct cmd_list_element *c)
234 {
235 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
236 target_dcache_invalidate ();
237
238 stack_cache_enabled_p = stack_cache_enabled_p_1;
239 }
240
241 static void
242 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
243 struct cmd_list_element *c, const char *value)
244 {
245 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
246 }
247
248 /* Cache of memory operations, to speed up remote access. */
249 static DCACHE *target_dcache;
250
251 /* Invalidate the target dcache. */
252
253 void
254 target_dcache_invalidate (void)
255 {
256 dcache_invalidate (target_dcache);
257 }
258
259 /* The user just typed 'target' without the name of a target. */
260
261 static void
262 target_command (char *arg, int from_tty)
263 {
264 fputs_filtered ("Argument required (target name). Try `help target'\n",
265 gdb_stdout);
266 }
267
268 /* Default target_has_* methods for process_stratum targets. */
269
270 int
271 default_child_has_all_memory (struct target_ops *ops)
272 {
273 /* If no inferior selected, then we can't read memory here. */
274 if (ptid_equal (inferior_ptid, null_ptid))
275 return 0;
276
277 return 1;
278 }
279
280 int
281 default_child_has_memory (struct target_ops *ops)
282 {
283 /* If no inferior selected, then we can't read memory here. */
284 if (ptid_equal (inferior_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290 int
291 default_child_has_stack (struct target_ops *ops)
292 {
293 /* If no inferior selected, there's no stack. */
294 if (ptid_equal (inferior_ptid, null_ptid))
295 return 0;
296
297 return 1;
298 }
299
300 int
301 default_child_has_registers (struct target_ops *ops)
302 {
303 /* Can't read registers from no inferior. */
304 if (ptid_equal (inferior_ptid, null_ptid))
305 return 0;
306
307 return 1;
308 }
309
310 int
311 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
312 {
313 /* If there's no thread selected, then we can't make it run through
314 hoops. */
315 if (ptid_equal (the_ptid, null_ptid))
316 return 0;
317
318 return 1;
319 }
320
321
322 int
323 target_has_all_memory_1 (void)
324 {
325 struct target_ops *t;
326
327 for (t = current_target.beneath; t != NULL; t = t->beneath)
328 if (t->to_has_all_memory (t))
329 return 1;
330
331 return 0;
332 }
333
334 int
335 target_has_memory_1 (void)
336 {
337 struct target_ops *t;
338
339 for (t = current_target.beneath; t != NULL; t = t->beneath)
340 if (t->to_has_memory (t))
341 return 1;
342
343 return 0;
344 }
345
346 int
347 target_has_stack_1 (void)
348 {
349 struct target_ops *t;
350
351 for (t = current_target.beneath; t != NULL; t = t->beneath)
352 if (t->to_has_stack (t))
353 return 1;
354
355 return 0;
356 }
357
358 int
359 target_has_registers_1 (void)
360 {
361 struct target_ops *t;
362
363 for (t = current_target.beneath; t != NULL; t = t->beneath)
364 if (t->to_has_registers (t))
365 return 1;
366
367 return 0;
368 }
369
370 int
371 target_has_execution_1 (ptid_t the_ptid)
372 {
373 struct target_ops *t;
374
375 for (t = current_target.beneath; t != NULL; t = t->beneath)
376 if (t->to_has_execution (t, the_ptid))
377 return 1;
378
379 return 0;
380 }
381
382 int
383 target_has_execution_current (void)
384 {
385 return target_has_execution_1 (inferior_ptid);
386 }
387
388 /* Add a possible target architecture to the list. */
389
390 void
391 add_target (struct target_ops *t)
392 {
393 /* Provide default values for all "must have" methods. */
394 if (t->to_xfer_partial == NULL)
395 t->to_xfer_partial = default_xfer_partial;
396
397 if (t->to_has_all_memory == NULL)
398 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
399
400 if (t->to_has_memory == NULL)
401 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
402
403 if (t->to_has_stack == NULL)
404 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
405
406 if (t->to_has_registers == NULL)
407 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
408
409 if (t->to_has_execution == NULL)
410 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
411
412 if (!target_structs)
413 {
414 target_struct_allocsize = DEFAULT_ALLOCSIZE;
415 target_structs = (struct target_ops **) xmalloc
416 (target_struct_allocsize * sizeof (*target_structs));
417 }
418 if (target_struct_size >= target_struct_allocsize)
419 {
420 target_struct_allocsize *= 2;
421 target_structs = (struct target_ops **)
422 xrealloc ((char *) target_structs,
423 target_struct_allocsize * sizeof (*target_structs));
424 }
425 target_structs[target_struct_size++] = t;
426
427 if (targetlist == NULL)
428 add_prefix_cmd ("target", class_run, target_command, _("\
429 Connect to a target machine or process.\n\
430 The first argument is the type or protocol of the target machine.\n\
431 Remaining arguments are interpreted by the target protocol. For more\n\
432 information on the arguments for a particular protocol, type\n\
433 `help target ' followed by the protocol name."),
434 &targetlist, "target ", 0, &cmdlist);
435 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) ();
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (void)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 #define INHERIT(FIELD, TARGET) \
580 if (!current_target.FIELD) \
581 current_target.FIELD = (TARGET)->FIELD
582
583 for (t = target_stack; t; t = t->beneath)
584 {
585 INHERIT (to_shortname, t);
586 INHERIT (to_longname, t);
587 INHERIT (to_doc, t);
588 /* Do not inherit to_open. */
589 /* Do not inherit to_close. */
590 /* Do not inherit to_attach. */
591 INHERIT (to_post_attach, t);
592 INHERIT (to_attach_no_wait, t);
593 /* Do not inherit to_detach. */
594 /* Do not inherit to_disconnect. */
595 /* Do not inherit to_resume. */
596 /* Do not inherit to_wait. */
597 /* Do not inherit to_fetch_registers. */
598 /* Do not inherit to_store_registers. */
599 INHERIT (to_prepare_to_store, t);
600 INHERIT (deprecated_xfer_memory, t);
601 INHERIT (to_files_info, t);
602 INHERIT (to_insert_breakpoint, t);
603 INHERIT (to_remove_breakpoint, t);
604 INHERIT (to_can_use_hw_breakpoint, t);
605 INHERIT (to_insert_hw_breakpoint, t);
606 INHERIT (to_remove_hw_breakpoint, t);
607 /* Do not inherit to_ranged_break_num_registers. */
608 INHERIT (to_insert_watchpoint, t);
609 INHERIT (to_remove_watchpoint, t);
610 /* Do not inherit to_insert_mask_watchpoint. */
611 /* Do not inherit to_remove_mask_watchpoint. */
612 INHERIT (to_stopped_data_address, t);
613 INHERIT (to_have_steppable_watchpoint, t);
614 INHERIT (to_have_continuable_watchpoint, t);
615 INHERIT (to_stopped_by_watchpoint, t);
616 INHERIT (to_watchpoint_addr_within_range, t);
617 INHERIT (to_region_ok_for_hw_watchpoint, t);
618 INHERIT (to_can_accel_watchpoint_condition, t);
619 /* Do not inherit to_masked_watch_num_registers. */
620 INHERIT (to_terminal_init, t);
621 INHERIT (to_terminal_inferior, t);
622 INHERIT (to_terminal_ours_for_output, t);
623 INHERIT (to_terminal_ours, t);
624 INHERIT (to_terminal_save_ours, t);
625 INHERIT (to_terminal_info, t);
626 /* Do not inherit to_kill. */
627 INHERIT (to_load, t);
628 /* Do no inherit to_create_inferior. */
629 INHERIT (to_post_startup_inferior, t);
630 INHERIT (to_insert_fork_catchpoint, t);
631 INHERIT (to_remove_fork_catchpoint, t);
632 INHERIT (to_insert_vfork_catchpoint, t);
633 INHERIT (to_remove_vfork_catchpoint, t);
634 /* Do not inherit to_follow_fork. */
635 INHERIT (to_insert_exec_catchpoint, t);
636 INHERIT (to_remove_exec_catchpoint, t);
637 INHERIT (to_set_syscall_catchpoint, t);
638 INHERIT (to_has_exited, t);
639 /* Do not inherit to_mourn_inferior. */
640 INHERIT (to_can_run, t);
641 /* Do not inherit to_pass_signals. */
642 /* Do not inherit to_program_signals. */
643 /* Do not inherit to_thread_alive. */
644 /* Do not inherit to_find_new_threads. */
645 /* Do not inherit to_pid_to_str. */
646 INHERIT (to_extra_thread_info, t);
647 INHERIT (to_thread_name, t);
648 INHERIT (to_stop, t);
649 /* Do not inherit to_xfer_partial. */
650 INHERIT (to_rcmd, t);
651 INHERIT (to_pid_to_exec_file, t);
652 INHERIT (to_log_command, t);
653 INHERIT (to_stratum, t);
654 /* Do not inherit to_has_all_memory. */
655 /* Do not inherit to_has_memory. */
656 /* Do not inherit to_has_stack. */
657 /* Do not inherit to_has_registers. */
658 /* Do not inherit to_has_execution. */
659 INHERIT (to_has_thread_control, t);
660 INHERIT (to_can_async_p, t);
661 INHERIT (to_is_async_p, t);
662 INHERIT (to_async, t);
663 INHERIT (to_find_memory_regions, t);
664 INHERIT (to_make_corefile_notes, t);
665 INHERIT (to_get_bookmark, t);
666 INHERIT (to_goto_bookmark, t);
667 /* Do not inherit to_get_thread_local_address. */
668 INHERIT (to_can_execute_reverse, t);
669 INHERIT (to_execution_direction, t);
670 INHERIT (to_thread_architecture, t);
671 /* Do not inherit to_read_description. */
672 INHERIT (to_get_ada_task_ptid, t);
673 /* Do not inherit to_search_memory. */
674 INHERIT (to_supports_multi_process, t);
675 INHERIT (to_supports_enable_disable_tracepoint, t);
676 INHERIT (to_supports_string_tracing, t);
677 INHERIT (to_trace_init, t);
678 INHERIT (to_download_tracepoint, t);
679 INHERIT (to_can_download_tracepoint, t);
680 INHERIT (to_download_trace_state_variable, t);
681 INHERIT (to_enable_tracepoint, t);
682 INHERIT (to_disable_tracepoint, t);
683 INHERIT (to_trace_set_readonly_regions, t);
684 INHERIT (to_trace_start, t);
685 INHERIT (to_get_trace_status, t);
686 INHERIT (to_get_tracepoint_status, t);
687 INHERIT (to_trace_stop, t);
688 INHERIT (to_trace_find, t);
689 INHERIT (to_get_trace_state_variable_value, t);
690 INHERIT (to_save_trace_data, t);
691 INHERIT (to_upload_tracepoints, t);
692 INHERIT (to_upload_trace_state_variables, t);
693 INHERIT (to_get_raw_trace_data, t);
694 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
695 INHERIT (to_set_disconnected_tracing, t);
696 INHERIT (to_set_circular_trace_buffer, t);
697 INHERIT (to_set_trace_notes, t);
698 INHERIT (to_get_tib_address, t);
699 INHERIT (to_set_permissions, t);
700 INHERIT (to_static_tracepoint_marker_at, t);
701 INHERIT (to_static_tracepoint_markers_by_strid, t);
702 INHERIT (to_traceframe_info, t);
703 INHERIT (to_use_agent, t);
704 INHERIT (to_can_use_agent, t);
705 INHERIT (to_magic, t);
706 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
707 /* Do not inherit to_memory_map. */
708 /* Do not inherit to_flash_erase. */
709 /* Do not inherit to_flash_done. */
710 }
711 #undef INHERIT
712
713 /* Clean up a target struct so it no longer has any zero pointers in
714 it. Some entries are defaulted to a method that print an error,
715 others are hard-wired to a standard recursive default. */
716
717 #define de_fault(field, value) \
718 if (!current_target.field) \
719 current_target.field = value
720
721 de_fault (to_open,
722 (void (*) (char *, int))
723 tcomplain);
724 de_fault (to_close,
725 (void (*) (int))
726 target_ignore);
727 de_fault (to_post_attach,
728 (void (*) (int))
729 target_ignore);
730 de_fault (to_prepare_to_store,
731 (void (*) (struct regcache *))
732 noprocess);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_files_info,
738 (void (*) (struct target_ops *))
739 target_ignore);
740 de_fault (to_insert_breakpoint,
741 memory_insert_breakpoint);
742 de_fault (to_remove_breakpoint,
743 memory_remove_breakpoint);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct gdbarch *, struct bp_target_info *))
749 return_minus_one);
750 de_fault (to_remove_hw_breakpoint,
751 (int (*) (struct gdbarch *, struct bp_target_info *))
752 return_minus_one);
753 de_fault (to_insert_watchpoint,
754 (int (*) (CORE_ADDR, int, int, struct expression *))
755 return_minus_one);
756 de_fault (to_remove_watchpoint,
757 (int (*) (CORE_ADDR, int, int, struct expression *))
758 return_minus_one);
759 de_fault (to_stopped_by_watchpoint,
760 (int (*) (void))
761 return_zero);
762 de_fault (to_stopped_data_address,
763 (int (*) (struct target_ops *, CORE_ADDR *))
764 return_zero);
765 de_fault (to_watchpoint_addr_within_range,
766 default_watchpoint_addr_within_range);
767 de_fault (to_region_ok_for_hw_watchpoint,
768 default_region_ok_for_hw_watchpoint);
769 de_fault (to_can_accel_watchpoint_condition,
770 (int (*) (CORE_ADDR, int, int, struct expression *))
771 return_zero);
772 de_fault (to_terminal_init,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_inferior,
776 (void (*) (void))
777 target_ignore);
778 de_fault (to_terminal_ours_for_output,
779 (void (*) (void))
780 target_ignore);
781 de_fault (to_terminal_ours,
782 (void (*) (void))
783 target_ignore);
784 de_fault (to_terminal_save_ours,
785 (void (*) (void))
786 target_ignore);
787 de_fault (to_terminal_info,
788 default_terminal_info);
789 de_fault (to_load,
790 (void (*) (char *, int))
791 tcomplain);
792 de_fault (to_post_startup_inferior,
793 (void (*) (ptid_t))
794 target_ignore);
795 de_fault (to_insert_fork_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_remove_fork_catchpoint,
799 (int (*) (int))
800 return_one);
801 de_fault (to_insert_vfork_catchpoint,
802 (int (*) (int))
803 return_one);
804 de_fault (to_remove_vfork_catchpoint,
805 (int (*) (int))
806 return_one);
807 de_fault (to_insert_exec_catchpoint,
808 (int (*) (int))
809 return_one);
810 de_fault (to_remove_exec_catchpoint,
811 (int (*) (int))
812 return_one);
813 de_fault (to_set_syscall_catchpoint,
814 (int (*) (int, int, int, int, int *))
815 return_one);
816 de_fault (to_has_exited,
817 (int (*) (int, int, int *))
818 return_zero);
819 de_fault (to_can_run,
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct thread_info *))
823 return_zero);
824 de_fault (to_thread_name,
825 (char *(*) (struct thread_info *))
826 return_zero);
827 de_fault (to_stop,
828 (void (*) (ptid_t))
829 target_ignore);
830 current_target.to_xfer_partial = current_xfer_partial;
831 de_fault (to_rcmd,
832 (void (*) (char *, struct ui_file *))
833 tcomplain);
834 de_fault (to_pid_to_exec_file,
835 (char *(*) (int))
836 return_zero);
837 de_fault (to_async,
838 (void (*) (void (*) (enum inferior_event_type, void*), void*))
839 tcomplain);
840 de_fault (to_thread_architecture,
841 default_thread_architecture);
842 current_target.to_read_description = NULL;
843 de_fault (to_get_ada_task_ptid,
844 (ptid_t (*) (long, long))
845 default_get_ada_task_ptid);
846 de_fault (to_supports_multi_process,
847 (int (*) (void))
848 return_zero);
849 de_fault (to_supports_enable_disable_tracepoint,
850 (int (*) (void))
851 return_zero);
852 de_fault (to_supports_string_tracing,
853 (int (*) (void))
854 return_zero);
855 de_fault (to_trace_init,
856 (void (*) (void))
857 tcomplain);
858 de_fault (to_download_tracepoint,
859 (void (*) (struct bp_location *))
860 tcomplain);
861 de_fault (to_can_download_tracepoint,
862 (int (*) (void))
863 return_zero);
864 de_fault (to_download_trace_state_variable,
865 (void (*) (struct trace_state_variable *))
866 tcomplain);
867 de_fault (to_enable_tracepoint,
868 (void (*) (struct bp_location *))
869 tcomplain);
870 de_fault (to_disable_tracepoint,
871 (void (*) (struct bp_location *))
872 tcomplain);
873 de_fault (to_trace_set_readonly_regions,
874 (void (*) (void))
875 tcomplain);
876 de_fault (to_trace_start,
877 (void (*) (void))
878 tcomplain);
879 de_fault (to_get_trace_status,
880 (int (*) (struct trace_status *))
881 return_minus_one);
882 de_fault (to_get_tracepoint_status,
883 (void (*) (struct breakpoint *, struct uploaded_tp *))
884 tcomplain);
885 de_fault (to_trace_stop,
886 (void (*) (void))
887 tcomplain);
888 de_fault (to_trace_find,
889 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
890 return_minus_one);
891 de_fault (to_get_trace_state_variable_value,
892 (int (*) (int, LONGEST *))
893 return_zero);
894 de_fault (to_save_trace_data,
895 (int (*) (const char *))
896 tcomplain);
897 de_fault (to_upload_tracepoints,
898 (int (*) (struct uploaded_tp **))
899 return_zero);
900 de_fault (to_upload_trace_state_variables,
901 (int (*) (struct uploaded_tsv **))
902 return_zero);
903 de_fault (to_get_raw_trace_data,
904 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
905 tcomplain);
906 de_fault (to_get_min_fast_tracepoint_insn_len,
907 (int (*) (void))
908 return_minus_one);
909 de_fault (to_set_disconnected_tracing,
910 (void (*) (int))
911 target_ignore);
912 de_fault (to_set_circular_trace_buffer,
913 (void (*) (int))
914 target_ignore);
915 de_fault (to_set_trace_notes,
916 (int (*) (char *, char *, char *))
917 return_zero);
918 de_fault (to_get_tib_address,
919 (int (*) (ptid_t, CORE_ADDR *))
920 tcomplain);
921 de_fault (to_set_permissions,
922 (void (*) (void))
923 target_ignore);
924 de_fault (to_static_tracepoint_marker_at,
925 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
926 return_zero);
927 de_fault (to_static_tracepoint_markers_by_strid,
928 (VEC(static_tracepoint_marker_p) * (*) (const char *))
929 tcomplain);
930 de_fault (to_traceframe_info,
931 (struct traceframe_info * (*) (void))
932 tcomplain);
933 de_fault (to_supports_evaluation_of_breakpoint_conditions,
934 (int (*) (void))
935 return_zero);
936 de_fault (to_use_agent,
937 (int (*) (int))
938 tcomplain);
939 de_fault (to_can_use_agent,
940 (int (*) (void))
941 return_zero);
942 de_fault (to_execution_direction, default_execution_direction);
943
944 #undef de_fault
945
946 /* Finally, position the target-stack beneath the squashed
947 "current_target". That way code looking for a non-inherited
948 target method can quickly and simply find it. */
949 current_target.beneath = target_stack;
950
951 if (targetdebug)
952 setup_target_debug ();
953 }
954
955 /* Push a new target type into the stack of the existing target accessors,
956 possibly superseding some of the existing accessors.
957
958 Rather than allow an empty stack, we always have the dummy target at
959 the bottom stratum, so we can call the function vectors without
960 checking them. */
961
962 void
963 push_target (struct target_ops *t)
964 {
965 struct target_ops **cur;
966
967 /* Check magic number. If wrong, it probably means someone changed
968 the struct definition, but not all the places that initialize one. */
969 if (t->to_magic != OPS_MAGIC)
970 {
971 fprintf_unfiltered (gdb_stderr,
972 "Magic number of %s target struct wrong\n",
973 t->to_shortname);
974 internal_error (__FILE__, __LINE__,
975 _("failed internal consistency check"));
976 }
977
978 /* Find the proper stratum to install this target in. */
979 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
980 {
981 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
982 break;
983 }
984
985 /* If there's already targets at this stratum, remove them. */
986 /* FIXME: cagney/2003-10-15: I think this should be popping all
987 targets to CUR, and not just those at this stratum level. */
988 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
989 {
990 /* There's already something at this stratum level. Close it,
991 and un-hook it from the stack. */
992 struct target_ops *tmp = (*cur);
993
994 (*cur) = (*cur)->beneath;
995 tmp->beneath = NULL;
996 target_close (tmp, 0);
997 }
998
999 /* We have removed all targets in our stratum, now add the new one. */
1000 t->beneath = (*cur);
1001 (*cur) = t;
1002
1003 update_current_target ();
1004 }
1005
1006 /* Remove a target_ops vector from the stack, wherever it may be.
1007 Return how many times it was removed (0 or 1). */
1008
1009 int
1010 unpush_target (struct target_ops *t)
1011 {
1012 struct target_ops **cur;
1013 struct target_ops *tmp;
1014
1015 if (t->to_stratum == dummy_stratum)
1016 internal_error (__FILE__, __LINE__,
1017 _("Attempt to unpush the dummy target"));
1018
1019 /* Look for the specified target. Note that we assume that a target
1020 can only occur once in the target stack. */
1021
1022 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1023 {
1024 if ((*cur) == t)
1025 break;
1026 }
1027
1028 /* If we don't find target_ops, quit. Only open targets should be
1029 closed. */
1030 if ((*cur) == NULL)
1031 return 0;
1032
1033 /* Unchain the target. */
1034 tmp = (*cur);
1035 (*cur) = (*cur)->beneath;
1036 tmp->beneath = NULL;
1037
1038 update_current_target ();
1039
1040 /* Finally close the target. Note we do this after unchaining, so
1041 any target method calls from within the target_close
1042 implementation don't end up in T anymore. */
1043 target_close (t, 0);
1044
1045 return 1;
1046 }
1047
1048 void
1049 pop_target (void)
1050 {
1051 target_close (target_stack, 0); /* Let it clean up. */
1052 if (unpush_target (target_stack) == 1)
1053 return;
1054
1055 fprintf_unfiltered (gdb_stderr,
1056 "pop_target couldn't find target %s\n",
1057 current_target.to_shortname);
1058 internal_error (__FILE__, __LINE__,
1059 _("failed internal consistency check"));
1060 }
1061
1062 void
1063 pop_all_targets_above (enum strata above_stratum, int quitting)
1064 {
1065 while ((int) (current_target.to_stratum) > (int) above_stratum)
1066 {
1067 target_close (target_stack, quitting);
1068 if (!unpush_target (target_stack))
1069 {
1070 fprintf_unfiltered (gdb_stderr,
1071 "pop_all_targets couldn't find target %s\n",
1072 target_stack->to_shortname);
1073 internal_error (__FILE__, __LINE__,
1074 _("failed internal consistency check"));
1075 break;
1076 }
1077 }
1078 }
1079
1080 void
1081 pop_all_targets (int quitting)
1082 {
1083 pop_all_targets_above (dummy_stratum, quitting);
1084 }
1085
1086 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1087
1088 int
1089 target_is_pushed (struct target_ops *t)
1090 {
1091 struct target_ops **cur;
1092
1093 /* Check magic number. If wrong, it probably means someone changed
1094 the struct definition, but not all the places that initialize one. */
1095 if (t->to_magic != OPS_MAGIC)
1096 {
1097 fprintf_unfiltered (gdb_stderr,
1098 "Magic number of %s target struct wrong\n",
1099 t->to_shortname);
1100 internal_error (__FILE__, __LINE__,
1101 _("failed internal consistency check"));
1102 }
1103
1104 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1105 if (*cur == t)
1106 return 1;
1107
1108 return 0;
1109 }
1110
1111 /* Using the objfile specified in OBJFILE, find the address for the
1112 current thread's thread-local storage with offset OFFSET. */
1113 CORE_ADDR
1114 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1115 {
1116 volatile CORE_ADDR addr = 0;
1117 struct target_ops *target;
1118
1119 for (target = current_target.beneath;
1120 target != NULL;
1121 target = target->beneath)
1122 {
1123 if (target->to_get_thread_local_address != NULL)
1124 break;
1125 }
1126
1127 if (target != NULL
1128 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1129 {
1130 ptid_t ptid = inferior_ptid;
1131 volatile struct gdb_exception ex;
1132
1133 TRY_CATCH (ex, RETURN_MASK_ALL)
1134 {
1135 CORE_ADDR lm_addr;
1136
1137 /* Fetch the load module address for this objfile. */
1138 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1139 objfile);
1140 /* If it's 0, throw the appropriate exception. */
1141 if (lm_addr == 0)
1142 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1143 _("TLS load module not found"));
1144
1145 addr = target->to_get_thread_local_address (target, ptid,
1146 lm_addr, offset);
1147 }
1148 /* If an error occurred, print TLS related messages here. Otherwise,
1149 throw the error to some higher catcher. */
1150 if (ex.reason < 0)
1151 {
1152 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1153
1154 switch (ex.error)
1155 {
1156 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1157 error (_("Cannot find thread-local variables "
1158 "in this thread library."));
1159 break;
1160 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1161 if (objfile_is_library)
1162 error (_("Cannot find shared library `%s' in dynamic"
1163 " linker's load module list"), objfile->name);
1164 else
1165 error (_("Cannot find executable file `%s' in dynamic"
1166 " linker's load module list"), objfile->name);
1167 break;
1168 case TLS_NOT_ALLOCATED_YET_ERROR:
1169 if (objfile_is_library)
1170 error (_("The inferior has not yet allocated storage for"
1171 " thread-local variables in\n"
1172 "the shared library `%s'\n"
1173 "for %s"),
1174 objfile->name, target_pid_to_str (ptid));
1175 else
1176 error (_("The inferior has not yet allocated storage for"
1177 " thread-local variables in\n"
1178 "the executable `%s'\n"
1179 "for %s"),
1180 objfile->name, target_pid_to_str (ptid));
1181 break;
1182 case TLS_GENERIC_ERROR:
1183 if (objfile_is_library)
1184 error (_("Cannot find thread-local storage for %s, "
1185 "shared library %s:\n%s"),
1186 target_pid_to_str (ptid),
1187 objfile->name, ex.message);
1188 else
1189 error (_("Cannot find thread-local storage for %s, "
1190 "executable file %s:\n%s"),
1191 target_pid_to_str (ptid),
1192 objfile->name, ex.message);
1193 break;
1194 default:
1195 throw_exception (ex);
1196 break;
1197 }
1198 }
1199 }
1200 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1201 TLS is an ABI-specific thing. But we don't do that yet. */
1202 else
1203 error (_("Cannot find thread-local variables on this target"));
1204
1205 return addr;
1206 }
1207
1208 #undef MIN
1209 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1210
1211 /* target_read_string -- read a null terminated string, up to LEN bytes,
1212 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1213 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1214 is responsible for freeing it. Return the number of bytes successfully
1215 read. */
1216
1217 int
1218 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1219 {
1220 int tlen, origlen, offset, i;
1221 gdb_byte buf[4];
1222 int errcode = 0;
1223 char *buffer;
1224 int buffer_allocated;
1225 char *bufptr;
1226 unsigned int nbytes_read = 0;
1227
1228 gdb_assert (string);
1229
1230 /* Small for testing. */
1231 buffer_allocated = 4;
1232 buffer = xmalloc (buffer_allocated);
1233 bufptr = buffer;
1234
1235 origlen = len;
1236
1237 while (len > 0)
1238 {
1239 tlen = MIN (len, 4 - (memaddr & 3));
1240 offset = memaddr & 3;
1241
1242 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1243 if (errcode != 0)
1244 {
1245 /* The transfer request might have crossed the boundary to an
1246 unallocated region of memory. Retry the transfer, requesting
1247 a single byte. */
1248 tlen = 1;
1249 offset = 0;
1250 errcode = target_read_memory (memaddr, buf, 1);
1251 if (errcode != 0)
1252 goto done;
1253 }
1254
1255 if (bufptr - buffer + tlen > buffer_allocated)
1256 {
1257 unsigned int bytes;
1258
1259 bytes = bufptr - buffer;
1260 buffer_allocated *= 2;
1261 buffer = xrealloc (buffer, buffer_allocated);
1262 bufptr = buffer + bytes;
1263 }
1264
1265 for (i = 0; i < tlen; i++)
1266 {
1267 *bufptr++ = buf[i + offset];
1268 if (buf[i + offset] == '\000')
1269 {
1270 nbytes_read += i + 1;
1271 goto done;
1272 }
1273 }
1274
1275 memaddr += tlen;
1276 len -= tlen;
1277 nbytes_read += tlen;
1278 }
1279 done:
1280 *string = buffer;
1281 if (errnop != NULL)
1282 *errnop = errcode;
1283 return nbytes_read;
1284 }
1285
1286 struct target_section_table *
1287 target_get_section_table (struct target_ops *target)
1288 {
1289 struct target_ops *t;
1290
1291 if (targetdebug)
1292 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1293
1294 for (t = target; t != NULL; t = t->beneath)
1295 if (t->to_get_section_table != NULL)
1296 return (*t->to_get_section_table) (t);
1297
1298 return NULL;
1299 }
1300
1301 /* Find a section containing ADDR. */
1302
1303 struct target_section *
1304 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1305 {
1306 struct target_section_table *table = target_get_section_table (target);
1307 struct target_section *secp;
1308
1309 if (table == NULL)
1310 return NULL;
1311
1312 for (secp = table->sections; secp < table->sections_end; secp++)
1313 {
1314 if (addr >= secp->addr && addr < secp->endaddr)
1315 return secp;
1316 }
1317 return NULL;
1318 }
1319
1320 /* Read memory from the live target, even if currently inspecting a
1321 traceframe. The return is the same as that of target_read. */
1322
1323 static LONGEST
1324 target_read_live_memory (enum target_object object,
1325 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1326 {
1327 int ret;
1328 struct cleanup *cleanup;
1329
1330 /* Switch momentarily out of tfind mode so to access live memory.
1331 Note that this must not clear global state, such as the frame
1332 cache, which must still remain valid for the previous traceframe.
1333 We may be _building_ the frame cache at this point. */
1334 cleanup = make_cleanup_restore_traceframe_number ();
1335 set_traceframe_number (-1);
1336
1337 ret = target_read (current_target.beneath, object, NULL,
1338 myaddr, memaddr, len);
1339
1340 do_cleanups (cleanup);
1341 return ret;
1342 }
1343
1344 /* Using the set of read-only target sections of OPS, read live
1345 read-only memory. Note that the actual reads start from the
1346 top-most target again.
1347
1348 For interface/parameters/return description see target.h,
1349 to_xfer_partial. */
1350
1351 static LONGEST
1352 memory_xfer_live_readonly_partial (struct target_ops *ops,
1353 enum target_object object,
1354 gdb_byte *readbuf, ULONGEST memaddr,
1355 LONGEST len)
1356 {
1357 struct target_section *secp;
1358 struct target_section_table *table;
1359
1360 secp = target_section_by_addr (ops, memaddr);
1361 if (secp != NULL
1362 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1363 & SEC_READONLY))
1364 {
1365 struct target_section *p;
1366 ULONGEST memend = memaddr + len;
1367
1368 table = target_get_section_table (ops);
1369
1370 for (p = table->sections; p < table->sections_end; p++)
1371 {
1372 if (memaddr >= p->addr)
1373 {
1374 if (memend <= p->endaddr)
1375 {
1376 /* Entire transfer is within this section. */
1377 return target_read_live_memory (object, memaddr,
1378 readbuf, len);
1379 }
1380 else if (memaddr >= p->endaddr)
1381 {
1382 /* This section ends before the transfer starts. */
1383 continue;
1384 }
1385 else
1386 {
1387 /* This section overlaps the transfer. Just do half. */
1388 len = p->endaddr - memaddr;
1389 return target_read_live_memory (object, memaddr,
1390 readbuf, len);
1391 }
1392 }
1393 }
1394 }
1395
1396 return 0;
1397 }
1398
1399 /* Perform a partial memory transfer.
1400 For docs see target.h, to_xfer_partial. */
1401
1402 static LONGEST
1403 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1404 void *readbuf, const void *writebuf, ULONGEST memaddr,
1405 LONGEST len)
1406 {
1407 LONGEST res;
1408 int reg_len;
1409 struct mem_region *region;
1410 struct inferior *inf;
1411
1412 /* For accesses to unmapped overlay sections, read directly from
1413 files. Must do this first, as MEMADDR may need adjustment. */
1414 if (readbuf != NULL && overlay_debugging)
1415 {
1416 struct obj_section *section = find_pc_overlay (memaddr);
1417
1418 if (pc_in_unmapped_range (memaddr, section))
1419 {
1420 struct target_section_table *table
1421 = target_get_section_table (ops);
1422 const char *section_name = section->the_bfd_section->name;
1423
1424 memaddr = overlay_mapped_address (memaddr, section);
1425 return section_table_xfer_memory_partial (readbuf, writebuf,
1426 memaddr, len,
1427 table->sections,
1428 table->sections_end,
1429 section_name);
1430 }
1431 }
1432
1433 /* Try the executable files, if "trust-readonly-sections" is set. */
1434 if (readbuf != NULL && trust_readonly)
1435 {
1436 struct target_section *secp;
1437 struct target_section_table *table;
1438
1439 secp = target_section_by_addr (ops, memaddr);
1440 if (secp != NULL
1441 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1442 & SEC_READONLY))
1443 {
1444 table = target_get_section_table (ops);
1445 return section_table_xfer_memory_partial (readbuf, writebuf,
1446 memaddr, len,
1447 table->sections,
1448 table->sections_end,
1449 NULL);
1450 }
1451 }
1452
1453 /* If reading unavailable memory in the context of traceframes, and
1454 this address falls within a read-only section, fallback to
1455 reading from live memory. */
1456 if (readbuf != NULL && get_traceframe_number () != -1)
1457 {
1458 VEC(mem_range_s) *available;
1459
1460 /* If we fail to get the set of available memory, then the
1461 target does not support querying traceframe info, and so we
1462 attempt reading from the traceframe anyway (assuming the
1463 target implements the old QTro packet then). */
1464 if (traceframe_available_memory (&available, memaddr, len))
1465 {
1466 struct cleanup *old_chain;
1467
1468 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1469
1470 if (VEC_empty (mem_range_s, available)
1471 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1472 {
1473 /* Don't read into the traceframe's available
1474 memory. */
1475 if (!VEC_empty (mem_range_s, available))
1476 {
1477 LONGEST oldlen = len;
1478
1479 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1480 gdb_assert (len <= oldlen);
1481 }
1482
1483 do_cleanups (old_chain);
1484
1485 /* This goes through the topmost target again. */
1486 res = memory_xfer_live_readonly_partial (ops, object,
1487 readbuf, memaddr, len);
1488 if (res > 0)
1489 return res;
1490
1491 /* No use trying further, we know some memory starting
1492 at MEMADDR isn't available. */
1493 return -1;
1494 }
1495
1496 /* Don't try to read more than how much is available, in
1497 case the target implements the deprecated QTro packet to
1498 cater for older GDBs (the target's knowledge of read-only
1499 sections may be outdated by now). */
1500 len = VEC_index (mem_range_s, available, 0)->length;
1501
1502 do_cleanups (old_chain);
1503 }
1504 }
1505
1506 /* Try GDB's internal data cache. */
1507 region = lookup_mem_region (memaddr);
1508 /* region->hi == 0 means there's no upper bound. */
1509 if (memaddr + len < region->hi || region->hi == 0)
1510 reg_len = len;
1511 else
1512 reg_len = region->hi - memaddr;
1513
1514 switch (region->attrib.mode)
1515 {
1516 case MEM_RO:
1517 if (writebuf != NULL)
1518 return -1;
1519 break;
1520
1521 case MEM_WO:
1522 if (readbuf != NULL)
1523 return -1;
1524 break;
1525
1526 case MEM_FLASH:
1527 /* We only support writing to flash during "load" for now. */
1528 if (writebuf != NULL)
1529 error (_("Writing to flash memory forbidden in this context"));
1530 break;
1531
1532 case MEM_NONE:
1533 return -1;
1534 }
1535
1536 if (!ptid_equal (inferior_ptid, null_ptid))
1537 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1538 else
1539 inf = NULL;
1540
1541 if (inf != NULL
1542 /* The dcache reads whole cache lines; that doesn't play well
1543 with reading from a trace buffer, because reading outside of
1544 the collected memory range fails. */
1545 && get_traceframe_number () == -1
1546 && (region->attrib.cache
1547 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1548 {
1549 if (readbuf != NULL)
1550 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1551 reg_len, 0);
1552 else
1553 /* FIXME drow/2006-08-09: If we're going to preserve const
1554 correctness dcache_xfer_memory should take readbuf and
1555 writebuf. */
1556 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1557 (void *) writebuf,
1558 reg_len, 1);
1559 if (res <= 0)
1560 return -1;
1561 else
1562 return res;
1563 }
1564
1565 /* If none of those methods found the memory we wanted, fall back
1566 to a target partial transfer. Normally a single call to
1567 to_xfer_partial is enough; if it doesn't recognize an object
1568 it will call the to_xfer_partial of the next target down.
1569 But for memory this won't do. Memory is the only target
1570 object which can be read from more than one valid target.
1571 A core file, for instance, could have some of memory but
1572 delegate other bits to the target below it. So, we must
1573 manually try all targets. */
1574
1575 do
1576 {
1577 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1578 readbuf, writebuf, memaddr, reg_len);
1579 if (res > 0)
1580 break;
1581
1582 /* We want to continue past core files to executables, but not
1583 past a running target's memory. */
1584 if (ops->to_has_all_memory (ops))
1585 break;
1586
1587 ops = ops->beneath;
1588 }
1589 while (ops != NULL);
1590
1591 /* Make sure the cache gets updated no matter what - if we are writing
1592 to the stack. Even if this write is not tagged as such, we still need
1593 to update the cache. */
1594
1595 if (res > 0
1596 && inf != NULL
1597 && writebuf != NULL
1598 && !region->attrib.cache
1599 && stack_cache_enabled_p
1600 && object != TARGET_OBJECT_STACK_MEMORY)
1601 {
1602 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1603 }
1604
1605 /* If we still haven't got anything, return the last error. We
1606 give up. */
1607 return res;
1608 }
1609
1610 /* Perform a partial memory transfer. For docs see target.h,
1611 to_xfer_partial. */
1612
1613 static LONGEST
1614 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1615 void *readbuf, const void *writebuf, ULONGEST memaddr,
1616 LONGEST len)
1617 {
1618 int res;
1619
1620 /* Zero length requests are ok and require no work. */
1621 if (len == 0)
1622 return 0;
1623
1624 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1625 breakpoint insns, thus hiding out from higher layers whether
1626 there are software breakpoints inserted in the code stream. */
1627 if (readbuf != NULL)
1628 {
1629 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1630
1631 if (res > 0 && !show_memory_breakpoints)
1632 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1633 }
1634 else
1635 {
1636 void *buf;
1637 struct cleanup *old_chain;
1638
1639 buf = xmalloc (len);
1640 old_chain = make_cleanup (xfree, buf);
1641 memcpy (buf, writebuf, len);
1642
1643 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1644 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1645
1646 do_cleanups (old_chain);
1647 }
1648
1649 return res;
1650 }
1651
1652 static void
1653 restore_show_memory_breakpoints (void *arg)
1654 {
1655 show_memory_breakpoints = (uintptr_t) arg;
1656 }
1657
1658 struct cleanup *
1659 make_show_memory_breakpoints_cleanup (int show)
1660 {
1661 int current = show_memory_breakpoints;
1662
1663 show_memory_breakpoints = show;
1664 return make_cleanup (restore_show_memory_breakpoints,
1665 (void *) (uintptr_t) current);
1666 }
1667
1668 /* For docs see target.h, to_xfer_partial. */
1669
1670 static LONGEST
1671 target_xfer_partial (struct target_ops *ops,
1672 enum target_object object, const char *annex,
1673 void *readbuf, const void *writebuf,
1674 ULONGEST offset, LONGEST len)
1675 {
1676 LONGEST retval;
1677
1678 gdb_assert (ops->to_xfer_partial != NULL);
1679
1680 if (writebuf && !may_write_memory)
1681 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1682 core_addr_to_string_nz (offset), plongest (len));
1683
1684 /* If this is a memory transfer, let the memory-specific code
1685 have a look at it instead. Memory transfers are more
1686 complicated. */
1687 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1688 retval = memory_xfer_partial (ops, object, readbuf,
1689 writebuf, offset, len);
1690 else
1691 {
1692 enum target_object raw_object = object;
1693
1694 /* If this is a raw memory transfer, request the normal
1695 memory object from other layers. */
1696 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1697 raw_object = TARGET_OBJECT_MEMORY;
1698
1699 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1700 writebuf, offset, len);
1701 }
1702
1703 if (targetdebug)
1704 {
1705 const unsigned char *myaddr = NULL;
1706
1707 fprintf_unfiltered (gdb_stdlog,
1708 "%s:target_xfer_partial "
1709 "(%d, %s, %s, %s, %s, %s) = %s",
1710 ops->to_shortname,
1711 (int) object,
1712 (annex ? annex : "(null)"),
1713 host_address_to_string (readbuf),
1714 host_address_to_string (writebuf),
1715 core_addr_to_string_nz (offset),
1716 plongest (len), plongest (retval));
1717
1718 if (readbuf)
1719 myaddr = readbuf;
1720 if (writebuf)
1721 myaddr = writebuf;
1722 if (retval > 0 && myaddr != NULL)
1723 {
1724 int i;
1725
1726 fputs_unfiltered (", bytes =", gdb_stdlog);
1727 for (i = 0; i < retval; i++)
1728 {
1729 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1730 {
1731 if (targetdebug < 2 && i > 0)
1732 {
1733 fprintf_unfiltered (gdb_stdlog, " ...");
1734 break;
1735 }
1736 fprintf_unfiltered (gdb_stdlog, "\n");
1737 }
1738
1739 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1740 }
1741 }
1742
1743 fputc_unfiltered ('\n', gdb_stdlog);
1744 }
1745 return retval;
1746 }
1747
1748 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1749 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1750 if any error occurs.
1751
1752 If an error occurs, no guarantee is made about the contents of the data at
1753 MYADDR. In particular, the caller should not depend upon partial reads
1754 filling the buffer with good data. There is no way for the caller to know
1755 how much good data might have been transfered anyway. Callers that can
1756 deal with partial reads should call target_read (which will retry until
1757 it makes no progress, and then return how much was transferred). */
1758
1759 int
1760 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1761 {
1762 /* Dispatch to the topmost target, not the flattened current_target.
1763 Memory accesses check target->to_has_(all_)memory, and the
1764 flattened target doesn't inherit those. */
1765 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1766 myaddr, memaddr, len) == len)
1767 return 0;
1768 else
1769 return EIO;
1770 }
1771
1772 /* Like target_read_memory, but specify explicitly that this is a read from
1773 the target's stack. This may trigger different cache behavior. */
1774
1775 int
1776 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1777 {
1778 /* Dispatch to the topmost target, not the flattened current_target.
1779 Memory accesses check target->to_has_(all_)memory, and the
1780 flattened target doesn't inherit those. */
1781
1782 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1783 myaddr, memaddr, len) == len)
1784 return 0;
1785 else
1786 return EIO;
1787 }
1788
1789 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1790 Returns either 0 for success or an errno value if any error occurs.
1791 If an error occurs, no guarantee is made about how much data got written.
1792 Callers that can deal with partial writes should call target_write. */
1793
1794 int
1795 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1796 {
1797 /* Dispatch to the topmost target, not the flattened current_target.
1798 Memory accesses check target->to_has_(all_)memory, and the
1799 flattened target doesn't inherit those. */
1800 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1801 myaddr, memaddr, len) == len)
1802 return 0;
1803 else
1804 return EIO;
1805 }
1806
1807 /* Write LEN bytes from MYADDR to target raw memory at address
1808 MEMADDR. Returns either 0 for success or an errno value if any
1809 error occurs. If an error occurs, no guarantee is made about how
1810 much data got written. Callers that can deal with partial writes
1811 should call target_write. */
1812
1813 int
1814 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1815 {
1816 /* Dispatch to the topmost target, not the flattened current_target.
1817 Memory accesses check target->to_has_(all_)memory, and the
1818 flattened target doesn't inherit those. */
1819 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1820 myaddr, memaddr, len) == len)
1821 return 0;
1822 else
1823 return EIO;
1824 }
1825
1826 /* Fetch the target's memory map. */
1827
1828 VEC(mem_region_s) *
1829 target_memory_map (void)
1830 {
1831 VEC(mem_region_s) *result;
1832 struct mem_region *last_one, *this_one;
1833 int ix;
1834 struct target_ops *t;
1835
1836 if (targetdebug)
1837 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1838
1839 for (t = current_target.beneath; t != NULL; t = t->beneath)
1840 if (t->to_memory_map != NULL)
1841 break;
1842
1843 if (t == NULL)
1844 return NULL;
1845
1846 result = t->to_memory_map (t);
1847 if (result == NULL)
1848 return NULL;
1849
1850 qsort (VEC_address (mem_region_s, result),
1851 VEC_length (mem_region_s, result),
1852 sizeof (struct mem_region), mem_region_cmp);
1853
1854 /* Check that regions do not overlap. Simultaneously assign
1855 a numbering for the "mem" commands to use to refer to
1856 each region. */
1857 last_one = NULL;
1858 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1859 {
1860 this_one->number = ix;
1861
1862 if (last_one && last_one->hi > this_one->lo)
1863 {
1864 warning (_("Overlapping regions in memory map: ignoring"));
1865 VEC_free (mem_region_s, result);
1866 return NULL;
1867 }
1868 last_one = this_one;
1869 }
1870
1871 return result;
1872 }
1873
1874 void
1875 target_flash_erase (ULONGEST address, LONGEST length)
1876 {
1877 struct target_ops *t;
1878
1879 for (t = current_target.beneath; t != NULL; t = t->beneath)
1880 if (t->to_flash_erase != NULL)
1881 {
1882 if (targetdebug)
1883 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1884 hex_string (address), phex (length, 0));
1885 t->to_flash_erase (t, address, length);
1886 return;
1887 }
1888
1889 tcomplain ();
1890 }
1891
1892 void
1893 target_flash_done (void)
1894 {
1895 struct target_ops *t;
1896
1897 for (t = current_target.beneath; t != NULL; t = t->beneath)
1898 if (t->to_flash_done != NULL)
1899 {
1900 if (targetdebug)
1901 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1902 t->to_flash_done (t);
1903 return;
1904 }
1905
1906 tcomplain ();
1907 }
1908
1909 static void
1910 show_trust_readonly (struct ui_file *file, int from_tty,
1911 struct cmd_list_element *c, const char *value)
1912 {
1913 fprintf_filtered (file,
1914 _("Mode for reading from readonly sections is %s.\n"),
1915 value);
1916 }
1917
1918 /* More generic transfers. */
1919
1920 static LONGEST
1921 default_xfer_partial (struct target_ops *ops, enum target_object object,
1922 const char *annex, gdb_byte *readbuf,
1923 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1924 {
1925 if (object == TARGET_OBJECT_MEMORY
1926 && ops->deprecated_xfer_memory != NULL)
1927 /* If available, fall back to the target's
1928 "deprecated_xfer_memory" method. */
1929 {
1930 int xfered = -1;
1931
1932 errno = 0;
1933 if (writebuf != NULL)
1934 {
1935 void *buffer = xmalloc (len);
1936 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1937
1938 memcpy (buffer, writebuf, len);
1939 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1940 1/*write*/, NULL, ops);
1941 do_cleanups (cleanup);
1942 }
1943 if (readbuf != NULL)
1944 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1945 0/*read*/, NULL, ops);
1946 if (xfered > 0)
1947 return xfered;
1948 else if (xfered == 0 && errno == 0)
1949 /* "deprecated_xfer_memory" uses 0, cross checked against
1950 ERRNO as one indication of an error. */
1951 return 0;
1952 else
1953 return -1;
1954 }
1955 else if (ops->beneath != NULL)
1956 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1957 readbuf, writebuf, offset, len);
1958 else
1959 return -1;
1960 }
1961
1962 /* The xfer_partial handler for the topmost target. Unlike the default,
1963 it does not need to handle memory specially; it just passes all
1964 requests down the stack. */
1965
1966 static LONGEST
1967 current_xfer_partial (struct target_ops *ops, enum target_object object,
1968 const char *annex, gdb_byte *readbuf,
1969 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1970 {
1971 if (ops->beneath != NULL)
1972 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1973 readbuf, writebuf, offset, len);
1974 else
1975 return -1;
1976 }
1977
1978 /* Target vector read/write partial wrapper functions. */
1979
1980 static LONGEST
1981 target_read_partial (struct target_ops *ops,
1982 enum target_object object,
1983 const char *annex, gdb_byte *buf,
1984 ULONGEST offset, LONGEST len)
1985 {
1986 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1987 }
1988
1989 static LONGEST
1990 target_write_partial (struct target_ops *ops,
1991 enum target_object object,
1992 const char *annex, const gdb_byte *buf,
1993 ULONGEST offset, LONGEST len)
1994 {
1995 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1996 }
1997
1998 /* Wrappers to perform the full transfer. */
1999
2000 /* For docs on target_read see target.h. */
2001
2002 LONGEST
2003 target_read (struct target_ops *ops,
2004 enum target_object object,
2005 const char *annex, gdb_byte *buf,
2006 ULONGEST offset, LONGEST len)
2007 {
2008 LONGEST xfered = 0;
2009
2010 while (xfered < len)
2011 {
2012 LONGEST xfer = target_read_partial (ops, object, annex,
2013 (gdb_byte *) buf + xfered,
2014 offset + xfered, len - xfered);
2015
2016 /* Call an observer, notifying them of the xfer progress? */
2017 if (xfer == 0)
2018 return xfered;
2019 if (xfer < 0)
2020 return -1;
2021 xfered += xfer;
2022 QUIT;
2023 }
2024 return len;
2025 }
2026
2027 /* Assuming that the entire [begin, end) range of memory cannot be
2028 read, try to read whatever subrange is possible to read.
2029
2030 The function returns, in RESULT, either zero or one memory block.
2031 If there's a readable subrange at the beginning, it is completely
2032 read and returned. Any further readable subrange will not be read.
2033 Otherwise, if there's a readable subrange at the end, it will be
2034 completely read and returned. Any readable subranges before it
2035 (obviously, not starting at the beginning), will be ignored. In
2036 other cases -- either no readable subrange, or readable subrange(s)
2037 that is neither at the beginning, or end, nothing is returned.
2038
2039 The purpose of this function is to handle a read across a boundary
2040 of accessible memory in a case when memory map is not available.
2041 The above restrictions are fine for this case, but will give
2042 incorrect results if the memory is 'patchy'. However, supporting
2043 'patchy' memory would require trying to read every single byte,
2044 and it seems unacceptable solution. Explicit memory map is
2045 recommended for this case -- and target_read_memory_robust will
2046 take care of reading multiple ranges then. */
2047
2048 static void
2049 read_whatever_is_readable (struct target_ops *ops,
2050 ULONGEST begin, ULONGEST end,
2051 VEC(memory_read_result_s) **result)
2052 {
2053 gdb_byte *buf = xmalloc (end - begin);
2054 ULONGEST current_begin = begin;
2055 ULONGEST current_end = end;
2056 int forward;
2057 memory_read_result_s r;
2058
2059 /* If we previously failed to read 1 byte, nothing can be done here. */
2060 if (end - begin <= 1)
2061 {
2062 xfree (buf);
2063 return;
2064 }
2065
2066 /* Check that either first or the last byte is readable, and give up
2067 if not. This heuristic is meant to permit reading accessible memory
2068 at the boundary of accessible region. */
2069 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2070 buf, begin, 1) == 1)
2071 {
2072 forward = 1;
2073 ++current_begin;
2074 }
2075 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2076 buf + (end-begin) - 1, end - 1, 1) == 1)
2077 {
2078 forward = 0;
2079 --current_end;
2080 }
2081 else
2082 {
2083 xfree (buf);
2084 return;
2085 }
2086
2087 /* Loop invariant is that the [current_begin, current_end) was previously
2088 found to be not readable as a whole.
2089
2090 Note loop condition -- if the range has 1 byte, we can't divide the range
2091 so there's no point trying further. */
2092 while (current_end - current_begin > 1)
2093 {
2094 ULONGEST first_half_begin, first_half_end;
2095 ULONGEST second_half_begin, second_half_end;
2096 LONGEST xfer;
2097 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2098
2099 if (forward)
2100 {
2101 first_half_begin = current_begin;
2102 first_half_end = middle;
2103 second_half_begin = middle;
2104 second_half_end = current_end;
2105 }
2106 else
2107 {
2108 first_half_begin = middle;
2109 first_half_end = current_end;
2110 second_half_begin = current_begin;
2111 second_half_end = middle;
2112 }
2113
2114 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2115 buf + (first_half_begin - begin),
2116 first_half_begin,
2117 first_half_end - first_half_begin);
2118
2119 if (xfer == first_half_end - first_half_begin)
2120 {
2121 /* This half reads up fine. So, the error must be in the
2122 other half. */
2123 current_begin = second_half_begin;
2124 current_end = second_half_end;
2125 }
2126 else
2127 {
2128 /* This half is not readable. Because we've tried one byte, we
2129 know some part of this half if actually redable. Go to the next
2130 iteration to divide again and try to read.
2131
2132 We don't handle the other half, because this function only tries
2133 to read a single readable subrange. */
2134 current_begin = first_half_begin;
2135 current_end = first_half_end;
2136 }
2137 }
2138
2139 if (forward)
2140 {
2141 /* The [begin, current_begin) range has been read. */
2142 r.begin = begin;
2143 r.end = current_begin;
2144 r.data = buf;
2145 }
2146 else
2147 {
2148 /* The [current_end, end) range has been read. */
2149 LONGEST rlen = end - current_end;
2150
2151 r.data = xmalloc (rlen);
2152 memcpy (r.data, buf + current_end - begin, rlen);
2153 r.begin = current_end;
2154 r.end = end;
2155 xfree (buf);
2156 }
2157 VEC_safe_push(memory_read_result_s, (*result), &r);
2158 }
2159
2160 void
2161 free_memory_read_result_vector (void *x)
2162 {
2163 VEC(memory_read_result_s) *v = x;
2164 memory_read_result_s *current;
2165 int ix;
2166
2167 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2168 {
2169 xfree (current->data);
2170 }
2171 VEC_free (memory_read_result_s, v);
2172 }
2173
2174 VEC(memory_read_result_s) *
2175 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2176 {
2177 VEC(memory_read_result_s) *result = 0;
2178
2179 LONGEST xfered = 0;
2180 while (xfered < len)
2181 {
2182 struct mem_region *region = lookup_mem_region (offset + xfered);
2183 LONGEST rlen;
2184
2185 /* If there is no explicit region, a fake one should be created. */
2186 gdb_assert (region);
2187
2188 if (region->hi == 0)
2189 rlen = len - xfered;
2190 else
2191 rlen = region->hi - offset;
2192
2193 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2194 {
2195 /* Cannot read this region. Note that we can end up here only
2196 if the region is explicitly marked inaccessible, or
2197 'inaccessible-by-default' is in effect. */
2198 xfered += rlen;
2199 }
2200 else
2201 {
2202 LONGEST to_read = min (len - xfered, rlen);
2203 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2204
2205 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2206 (gdb_byte *) buffer,
2207 offset + xfered, to_read);
2208 /* Call an observer, notifying them of the xfer progress? */
2209 if (xfer <= 0)
2210 {
2211 /* Got an error reading full chunk. See if maybe we can read
2212 some subrange. */
2213 xfree (buffer);
2214 read_whatever_is_readable (ops, offset + xfered,
2215 offset + xfered + to_read, &result);
2216 xfered += to_read;
2217 }
2218 else
2219 {
2220 struct memory_read_result r;
2221 r.data = buffer;
2222 r.begin = offset + xfered;
2223 r.end = r.begin + xfer;
2224 VEC_safe_push (memory_read_result_s, result, &r);
2225 xfered += xfer;
2226 }
2227 QUIT;
2228 }
2229 }
2230 return result;
2231 }
2232
2233
2234 /* An alternative to target_write with progress callbacks. */
2235
2236 LONGEST
2237 target_write_with_progress (struct target_ops *ops,
2238 enum target_object object,
2239 const char *annex, const gdb_byte *buf,
2240 ULONGEST offset, LONGEST len,
2241 void (*progress) (ULONGEST, void *), void *baton)
2242 {
2243 LONGEST xfered = 0;
2244
2245 /* Give the progress callback a chance to set up. */
2246 if (progress)
2247 (*progress) (0, baton);
2248
2249 while (xfered < len)
2250 {
2251 LONGEST xfer = target_write_partial (ops, object, annex,
2252 (gdb_byte *) buf + xfered,
2253 offset + xfered, len - xfered);
2254
2255 if (xfer == 0)
2256 return xfered;
2257 if (xfer < 0)
2258 return -1;
2259
2260 if (progress)
2261 (*progress) (xfer, baton);
2262
2263 xfered += xfer;
2264 QUIT;
2265 }
2266 return len;
2267 }
2268
2269 /* For docs on target_write see target.h. */
2270
2271 LONGEST
2272 target_write (struct target_ops *ops,
2273 enum target_object object,
2274 const char *annex, const gdb_byte *buf,
2275 ULONGEST offset, LONGEST len)
2276 {
2277 return target_write_with_progress (ops, object, annex, buf, offset, len,
2278 NULL, NULL);
2279 }
2280
2281 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2282 the size of the transferred data. PADDING additional bytes are
2283 available in *BUF_P. This is a helper function for
2284 target_read_alloc; see the declaration of that function for more
2285 information. */
2286
2287 static LONGEST
2288 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2289 const char *annex, gdb_byte **buf_p, int padding)
2290 {
2291 size_t buf_alloc, buf_pos;
2292 gdb_byte *buf;
2293 LONGEST n;
2294
2295 /* This function does not have a length parameter; it reads the
2296 entire OBJECT). Also, it doesn't support objects fetched partly
2297 from one target and partly from another (in a different stratum,
2298 e.g. a core file and an executable). Both reasons make it
2299 unsuitable for reading memory. */
2300 gdb_assert (object != TARGET_OBJECT_MEMORY);
2301
2302 /* Start by reading up to 4K at a time. The target will throttle
2303 this number down if necessary. */
2304 buf_alloc = 4096;
2305 buf = xmalloc (buf_alloc);
2306 buf_pos = 0;
2307 while (1)
2308 {
2309 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2310 buf_pos, buf_alloc - buf_pos - padding);
2311 if (n < 0)
2312 {
2313 /* An error occurred. */
2314 xfree (buf);
2315 return -1;
2316 }
2317 else if (n == 0)
2318 {
2319 /* Read all there was. */
2320 if (buf_pos == 0)
2321 xfree (buf);
2322 else
2323 *buf_p = buf;
2324 return buf_pos;
2325 }
2326
2327 buf_pos += n;
2328
2329 /* If the buffer is filling up, expand it. */
2330 if (buf_alloc < buf_pos * 2)
2331 {
2332 buf_alloc *= 2;
2333 buf = xrealloc (buf, buf_alloc);
2334 }
2335
2336 QUIT;
2337 }
2338 }
2339
2340 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2341 the size of the transferred data. See the declaration in "target.h"
2342 function for more information about the return value. */
2343
2344 LONGEST
2345 target_read_alloc (struct target_ops *ops, enum target_object object,
2346 const char *annex, gdb_byte **buf_p)
2347 {
2348 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2349 }
2350
2351 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2352 returned as a string, allocated using xmalloc. If an error occurs
2353 or the transfer is unsupported, NULL is returned. Empty objects
2354 are returned as allocated but empty strings. A warning is issued
2355 if the result contains any embedded NUL bytes. */
2356
2357 char *
2358 target_read_stralloc (struct target_ops *ops, enum target_object object,
2359 const char *annex)
2360 {
2361 gdb_byte *buffer;
2362 LONGEST i, transferred;
2363
2364 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2365
2366 if (transferred < 0)
2367 return NULL;
2368
2369 if (transferred == 0)
2370 return xstrdup ("");
2371
2372 buffer[transferred] = 0;
2373
2374 /* Check for embedded NUL bytes; but allow trailing NULs. */
2375 for (i = strlen (buffer); i < transferred; i++)
2376 if (buffer[i] != 0)
2377 {
2378 warning (_("target object %d, annex %s, "
2379 "contained unexpected null characters"),
2380 (int) object, annex ? annex : "(none)");
2381 break;
2382 }
2383
2384 return (char *) buffer;
2385 }
2386
2387 /* Memory transfer methods. */
2388
2389 void
2390 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2391 LONGEST len)
2392 {
2393 /* This method is used to read from an alternate, non-current
2394 target. This read must bypass the overlay support (as symbols
2395 don't match this target), and GDB's internal cache (wrong cache
2396 for this target). */
2397 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2398 != len)
2399 memory_error (EIO, addr);
2400 }
2401
2402 ULONGEST
2403 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2404 int len, enum bfd_endian byte_order)
2405 {
2406 gdb_byte buf[sizeof (ULONGEST)];
2407
2408 gdb_assert (len <= sizeof (buf));
2409 get_target_memory (ops, addr, buf, len);
2410 return extract_unsigned_integer (buf, len, byte_order);
2411 }
2412
2413 int
2414 target_insert_breakpoint (struct gdbarch *gdbarch,
2415 struct bp_target_info *bp_tgt)
2416 {
2417 if (!may_insert_breakpoints)
2418 {
2419 warning (_("May not insert breakpoints"));
2420 return 1;
2421 }
2422
2423 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2424 }
2425
2426 int
2427 target_remove_breakpoint (struct gdbarch *gdbarch,
2428 struct bp_target_info *bp_tgt)
2429 {
2430 /* This is kind of a weird case to handle, but the permission might
2431 have been changed after breakpoints were inserted - in which case
2432 we should just take the user literally and assume that any
2433 breakpoints should be left in place. */
2434 if (!may_insert_breakpoints)
2435 {
2436 warning (_("May not remove breakpoints"));
2437 return 1;
2438 }
2439
2440 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2441 }
2442
2443 static void
2444 target_info (char *args, int from_tty)
2445 {
2446 struct target_ops *t;
2447 int has_all_mem = 0;
2448
2449 if (symfile_objfile != NULL)
2450 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2451
2452 for (t = target_stack; t != NULL; t = t->beneath)
2453 {
2454 if (!(*t->to_has_memory) (t))
2455 continue;
2456
2457 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2458 continue;
2459 if (has_all_mem)
2460 printf_unfiltered (_("\tWhile running this, "
2461 "GDB does not access memory from...\n"));
2462 printf_unfiltered ("%s:\n", t->to_longname);
2463 (t->to_files_info) (t);
2464 has_all_mem = (*t->to_has_all_memory) (t);
2465 }
2466 }
2467
2468 /* This function is called before any new inferior is created, e.g.
2469 by running a program, attaching, or connecting to a target.
2470 It cleans up any state from previous invocations which might
2471 change between runs. This is a subset of what target_preopen
2472 resets (things which might change between targets). */
2473
2474 void
2475 target_pre_inferior (int from_tty)
2476 {
2477 /* Clear out solib state. Otherwise the solib state of the previous
2478 inferior might have survived and is entirely wrong for the new
2479 target. This has been observed on GNU/Linux using glibc 2.3. How
2480 to reproduce:
2481
2482 bash$ ./foo&
2483 [1] 4711
2484 bash$ ./foo&
2485 [1] 4712
2486 bash$ gdb ./foo
2487 [...]
2488 (gdb) attach 4711
2489 (gdb) detach
2490 (gdb) attach 4712
2491 Cannot access memory at address 0xdeadbeef
2492 */
2493
2494 /* In some OSs, the shared library list is the same/global/shared
2495 across inferiors. If code is shared between processes, so are
2496 memory regions and features. */
2497 if (!gdbarch_has_global_solist (target_gdbarch))
2498 {
2499 no_shared_libraries (NULL, from_tty);
2500
2501 invalidate_target_mem_regions ();
2502
2503 target_clear_description ();
2504 }
2505
2506 agent_capability_invalidate ();
2507 }
2508
2509 /* Callback for iterate_over_inferiors. Gets rid of the given
2510 inferior. */
2511
2512 static int
2513 dispose_inferior (struct inferior *inf, void *args)
2514 {
2515 struct thread_info *thread;
2516
2517 thread = any_thread_of_process (inf->pid);
2518 if (thread)
2519 {
2520 switch_to_thread (thread->ptid);
2521
2522 /* Core inferiors actually should be detached, not killed. */
2523 if (target_has_execution)
2524 target_kill ();
2525 else
2526 target_detach (NULL, 0);
2527 }
2528
2529 return 0;
2530 }
2531
2532 /* This is to be called by the open routine before it does
2533 anything. */
2534
2535 void
2536 target_preopen (int from_tty)
2537 {
2538 dont_repeat ();
2539
2540 if (have_inferiors ())
2541 {
2542 if (!from_tty
2543 || !have_live_inferiors ()
2544 || query (_("A program is being debugged already. Kill it? ")))
2545 iterate_over_inferiors (dispose_inferior, NULL);
2546 else
2547 error (_("Program not killed."));
2548 }
2549
2550 /* Calling target_kill may remove the target from the stack. But if
2551 it doesn't (which seems like a win for UDI), remove it now. */
2552 /* Leave the exec target, though. The user may be switching from a
2553 live process to a core of the same program. */
2554 pop_all_targets_above (file_stratum, 0);
2555
2556 target_pre_inferior (from_tty);
2557 }
2558
2559 /* Detach a target after doing deferred register stores. */
2560
2561 void
2562 target_detach (char *args, int from_tty)
2563 {
2564 struct target_ops* t;
2565
2566 if (gdbarch_has_global_breakpoints (target_gdbarch))
2567 /* Don't remove global breakpoints here. They're removed on
2568 disconnection from the target. */
2569 ;
2570 else
2571 /* If we're in breakpoints-always-inserted mode, have to remove
2572 them before detaching. */
2573 remove_breakpoints_pid (PIDGET (inferior_ptid));
2574
2575 prepare_for_detach ();
2576
2577 for (t = current_target.beneath; t != NULL; t = t->beneath)
2578 {
2579 if (t->to_detach != NULL)
2580 {
2581 t->to_detach (t, args, from_tty);
2582 if (targetdebug)
2583 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2584 args, from_tty);
2585 return;
2586 }
2587 }
2588
2589 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2590 }
2591
2592 void
2593 target_disconnect (char *args, int from_tty)
2594 {
2595 struct target_ops *t;
2596
2597 /* If we're in breakpoints-always-inserted mode or if breakpoints
2598 are global across processes, we have to remove them before
2599 disconnecting. */
2600 remove_breakpoints ();
2601
2602 for (t = current_target.beneath; t != NULL; t = t->beneath)
2603 if (t->to_disconnect != NULL)
2604 {
2605 if (targetdebug)
2606 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2607 args, from_tty);
2608 t->to_disconnect (t, args, from_tty);
2609 return;
2610 }
2611
2612 tcomplain ();
2613 }
2614
2615 ptid_t
2616 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2617 {
2618 struct target_ops *t;
2619
2620 for (t = current_target.beneath; t != NULL; t = t->beneath)
2621 {
2622 if (t->to_wait != NULL)
2623 {
2624 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2625
2626 if (targetdebug)
2627 {
2628 char *status_string;
2629
2630 status_string = target_waitstatus_to_string (status);
2631 fprintf_unfiltered (gdb_stdlog,
2632 "target_wait (%d, status) = %d, %s\n",
2633 PIDGET (ptid), PIDGET (retval),
2634 status_string);
2635 xfree (status_string);
2636 }
2637
2638 return retval;
2639 }
2640 }
2641
2642 noprocess ();
2643 }
2644
2645 char *
2646 target_pid_to_str (ptid_t ptid)
2647 {
2648 struct target_ops *t;
2649
2650 for (t = current_target.beneath; t != NULL; t = t->beneath)
2651 {
2652 if (t->to_pid_to_str != NULL)
2653 return (*t->to_pid_to_str) (t, ptid);
2654 }
2655
2656 return normal_pid_to_str (ptid);
2657 }
2658
2659 char *
2660 target_thread_name (struct thread_info *info)
2661 {
2662 struct target_ops *t;
2663
2664 for (t = current_target.beneath; t != NULL; t = t->beneath)
2665 {
2666 if (t->to_thread_name != NULL)
2667 return (*t->to_thread_name) (info);
2668 }
2669
2670 return NULL;
2671 }
2672
2673 void
2674 target_resume (ptid_t ptid, int step, enum target_signal signal)
2675 {
2676 struct target_ops *t;
2677
2678 target_dcache_invalidate ();
2679
2680 for (t = current_target.beneath; t != NULL; t = t->beneath)
2681 {
2682 if (t->to_resume != NULL)
2683 {
2684 t->to_resume (t, ptid, step, signal);
2685 if (targetdebug)
2686 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2687 PIDGET (ptid),
2688 step ? "step" : "continue",
2689 target_signal_to_name (signal));
2690
2691 registers_changed_ptid (ptid);
2692 set_executing (ptid, 1);
2693 set_running (ptid, 1);
2694 clear_inline_frame_state (ptid);
2695 return;
2696 }
2697 }
2698
2699 noprocess ();
2700 }
2701
2702 void
2703 target_pass_signals (int numsigs, unsigned char *pass_signals)
2704 {
2705 struct target_ops *t;
2706
2707 for (t = current_target.beneath; t != NULL; t = t->beneath)
2708 {
2709 if (t->to_pass_signals != NULL)
2710 {
2711 if (targetdebug)
2712 {
2713 int i;
2714
2715 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2716 numsigs);
2717
2718 for (i = 0; i < numsigs; i++)
2719 if (pass_signals[i])
2720 fprintf_unfiltered (gdb_stdlog, " %s",
2721 target_signal_to_name (i));
2722
2723 fprintf_unfiltered (gdb_stdlog, " })\n");
2724 }
2725
2726 (*t->to_pass_signals) (numsigs, pass_signals);
2727 return;
2728 }
2729 }
2730 }
2731
2732 void
2733 target_program_signals (int numsigs, unsigned char *program_signals)
2734 {
2735 struct target_ops *t;
2736
2737 for (t = current_target.beneath; t != NULL; t = t->beneath)
2738 {
2739 if (t->to_program_signals != NULL)
2740 {
2741 if (targetdebug)
2742 {
2743 int i;
2744
2745 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2746 numsigs);
2747
2748 for (i = 0; i < numsigs; i++)
2749 if (program_signals[i])
2750 fprintf_unfiltered (gdb_stdlog, " %s",
2751 target_signal_to_name (i));
2752
2753 fprintf_unfiltered (gdb_stdlog, " })\n");
2754 }
2755
2756 (*t->to_program_signals) (numsigs, program_signals);
2757 return;
2758 }
2759 }
2760 }
2761
2762 /* Look through the list of possible targets for a target that can
2763 follow forks. */
2764
2765 int
2766 target_follow_fork (int follow_child)
2767 {
2768 struct target_ops *t;
2769
2770 for (t = current_target.beneath; t != NULL; t = t->beneath)
2771 {
2772 if (t->to_follow_fork != NULL)
2773 {
2774 int retval = t->to_follow_fork (t, follow_child);
2775
2776 if (targetdebug)
2777 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2778 follow_child, retval);
2779 return retval;
2780 }
2781 }
2782
2783 /* Some target returned a fork event, but did not know how to follow it. */
2784 internal_error (__FILE__, __LINE__,
2785 _("could not find a target to follow fork"));
2786 }
2787
2788 void
2789 target_mourn_inferior (void)
2790 {
2791 struct target_ops *t;
2792
2793 for (t = current_target.beneath; t != NULL; t = t->beneath)
2794 {
2795 if (t->to_mourn_inferior != NULL)
2796 {
2797 t->to_mourn_inferior (t);
2798 if (targetdebug)
2799 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2800
2801 /* We no longer need to keep handles on any of the object files.
2802 Make sure to release them to avoid unnecessarily locking any
2803 of them while we're not actually debugging. */
2804 bfd_cache_close_all ();
2805
2806 return;
2807 }
2808 }
2809
2810 internal_error (__FILE__, __LINE__,
2811 _("could not find a target to follow mourn inferior"));
2812 }
2813
2814 /* Look for a target which can describe architectural features, starting
2815 from TARGET. If we find one, return its description. */
2816
2817 const struct target_desc *
2818 target_read_description (struct target_ops *target)
2819 {
2820 struct target_ops *t;
2821
2822 for (t = target; t != NULL; t = t->beneath)
2823 if (t->to_read_description != NULL)
2824 {
2825 const struct target_desc *tdesc;
2826
2827 tdesc = t->to_read_description (t);
2828 if (tdesc)
2829 return tdesc;
2830 }
2831
2832 return NULL;
2833 }
2834
2835 /* The default implementation of to_search_memory.
2836 This implements a basic search of memory, reading target memory and
2837 performing the search here (as opposed to performing the search in on the
2838 target side with, for example, gdbserver). */
2839
2840 int
2841 simple_search_memory (struct target_ops *ops,
2842 CORE_ADDR start_addr, ULONGEST search_space_len,
2843 const gdb_byte *pattern, ULONGEST pattern_len,
2844 CORE_ADDR *found_addrp)
2845 {
2846 /* NOTE: also defined in find.c testcase. */
2847 #define SEARCH_CHUNK_SIZE 16000
2848 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2849 /* Buffer to hold memory contents for searching. */
2850 gdb_byte *search_buf;
2851 unsigned search_buf_size;
2852 struct cleanup *old_cleanups;
2853
2854 search_buf_size = chunk_size + pattern_len - 1;
2855
2856 /* No point in trying to allocate a buffer larger than the search space. */
2857 if (search_space_len < search_buf_size)
2858 search_buf_size = search_space_len;
2859
2860 search_buf = malloc (search_buf_size);
2861 if (search_buf == NULL)
2862 error (_("Unable to allocate memory to perform the search."));
2863 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2864
2865 /* Prime the search buffer. */
2866
2867 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2868 search_buf, start_addr, search_buf_size) != search_buf_size)
2869 {
2870 warning (_("Unable to access target memory at %s, halting search."),
2871 hex_string (start_addr));
2872 do_cleanups (old_cleanups);
2873 return -1;
2874 }
2875
2876 /* Perform the search.
2877
2878 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2879 When we've scanned N bytes we copy the trailing bytes to the start and
2880 read in another N bytes. */
2881
2882 while (search_space_len >= pattern_len)
2883 {
2884 gdb_byte *found_ptr;
2885 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2886
2887 found_ptr = memmem (search_buf, nr_search_bytes,
2888 pattern, pattern_len);
2889
2890 if (found_ptr != NULL)
2891 {
2892 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2893
2894 *found_addrp = found_addr;
2895 do_cleanups (old_cleanups);
2896 return 1;
2897 }
2898
2899 /* Not found in this chunk, skip to next chunk. */
2900
2901 /* Don't let search_space_len wrap here, it's unsigned. */
2902 if (search_space_len >= chunk_size)
2903 search_space_len -= chunk_size;
2904 else
2905 search_space_len = 0;
2906
2907 if (search_space_len >= pattern_len)
2908 {
2909 unsigned keep_len = search_buf_size - chunk_size;
2910 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2911 int nr_to_read;
2912
2913 /* Copy the trailing part of the previous iteration to the front
2914 of the buffer for the next iteration. */
2915 gdb_assert (keep_len == pattern_len - 1);
2916 memcpy (search_buf, search_buf + chunk_size, keep_len);
2917
2918 nr_to_read = min (search_space_len - keep_len, chunk_size);
2919
2920 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2921 search_buf + keep_len, read_addr,
2922 nr_to_read) != nr_to_read)
2923 {
2924 warning (_("Unable to access target "
2925 "memory at %s, halting search."),
2926 hex_string (read_addr));
2927 do_cleanups (old_cleanups);
2928 return -1;
2929 }
2930
2931 start_addr += chunk_size;
2932 }
2933 }
2934
2935 /* Not found. */
2936
2937 do_cleanups (old_cleanups);
2938 return 0;
2939 }
2940
2941 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2942 sequence of bytes in PATTERN with length PATTERN_LEN.
2943
2944 The result is 1 if found, 0 if not found, and -1 if there was an error
2945 requiring halting of the search (e.g. memory read error).
2946 If the pattern is found the address is recorded in FOUND_ADDRP. */
2947
2948 int
2949 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2950 const gdb_byte *pattern, ULONGEST pattern_len,
2951 CORE_ADDR *found_addrp)
2952 {
2953 struct target_ops *t;
2954 int found;
2955
2956 /* We don't use INHERIT to set current_target.to_search_memory,
2957 so we have to scan the target stack and handle targetdebug
2958 ourselves. */
2959
2960 if (targetdebug)
2961 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2962 hex_string (start_addr));
2963
2964 for (t = current_target.beneath; t != NULL; t = t->beneath)
2965 if (t->to_search_memory != NULL)
2966 break;
2967
2968 if (t != NULL)
2969 {
2970 found = t->to_search_memory (t, start_addr, search_space_len,
2971 pattern, pattern_len, found_addrp);
2972 }
2973 else
2974 {
2975 /* If a special version of to_search_memory isn't available, use the
2976 simple version. */
2977 found = simple_search_memory (current_target.beneath,
2978 start_addr, search_space_len,
2979 pattern, pattern_len, found_addrp);
2980 }
2981
2982 if (targetdebug)
2983 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2984
2985 return found;
2986 }
2987
2988 /* Look through the currently pushed targets. If none of them will
2989 be able to restart the currently running process, issue an error
2990 message. */
2991
2992 void
2993 target_require_runnable (void)
2994 {
2995 struct target_ops *t;
2996
2997 for (t = target_stack; t != NULL; t = t->beneath)
2998 {
2999 /* If this target knows how to create a new program, then
3000 assume we will still be able to after killing the current
3001 one. Either killing and mourning will not pop T, or else
3002 find_default_run_target will find it again. */
3003 if (t->to_create_inferior != NULL)
3004 return;
3005
3006 /* Do not worry about thread_stratum targets that can not
3007 create inferiors. Assume they will be pushed again if
3008 necessary, and continue to the process_stratum. */
3009 if (t->to_stratum == thread_stratum
3010 || t->to_stratum == arch_stratum)
3011 continue;
3012
3013 error (_("The \"%s\" target does not support \"run\". "
3014 "Try \"help target\" or \"continue\"."),
3015 t->to_shortname);
3016 }
3017
3018 /* This function is only called if the target is running. In that
3019 case there should have been a process_stratum target and it
3020 should either know how to create inferiors, or not... */
3021 internal_error (__FILE__, __LINE__, _("No targets found"));
3022 }
3023
3024 /* Look through the list of possible targets for a target that can
3025 execute a run or attach command without any other data. This is
3026 used to locate the default process stratum.
3027
3028 If DO_MESG is not NULL, the result is always valid (error() is
3029 called for errors); else, return NULL on error. */
3030
3031 static struct target_ops *
3032 find_default_run_target (char *do_mesg)
3033 {
3034 struct target_ops **t;
3035 struct target_ops *runable = NULL;
3036 int count;
3037
3038 count = 0;
3039
3040 for (t = target_structs; t < target_structs + target_struct_size;
3041 ++t)
3042 {
3043 if ((*t)->to_can_run && target_can_run (*t))
3044 {
3045 runable = *t;
3046 ++count;
3047 }
3048 }
3049
3050 if (count != 1)
3051 {
3052 if (do_mesg)
3053 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3054 else
3055 return NULL;
3056 }
3057
3058 return runable;
3059 }
3060
3061 void
3062 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3063 {
3064 struct target_ops *t;
3065
3066 t = find_default_run_target ("attach");
3067 (t->to_attach) (t, args, from_tty);
3068 return;
3069 }
3070
3071 void
3072 find_default_create_inferior (struct target_ops *ops,
3073 char *exec_file, char *allargs, char **env,
3074 int from_tty)
3075 {
3076 struct target_ops *t;
3077
3078 t = find_default_run_target ("run");
3079 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3080 return;
3081 }
3082
3083 static int
3084 find_default_can_async_p (void)
3085 {
3086 struct target_ops *t;
3087
3088 /* This may be called before the target is pushed on the stack;
3089 look for the default process stratum. If there's none, gdb isn't
3090 configured with a native debugger, and target remote isn't
3091 connected yet. */
3092 t = find_default_run_target (NULL);
3093 if (t && t->to_can_async_p)
3094 return (t->to_can_async_p) ();
3095 return 0;
3096 }
3097
3098 static int
3099 find_default_is_async_p (void)
3100 {
3101 struct target_ops *t;
3102
3103 /* This may be called before the target is pushed on the stack;
3104 look for the default process stratum. If there's none, gdb isn't
3105 configured with a native debugger, and target remote isn't
3106 connected yet. */
3107 t = find_default_run_target (NULL);
3108 if (t && t->to_is_async_p)
3109 return (t->to_is_async_p) ();
3110 return 0;
3111 }
3112
3113 static int
3114 find_default_supports_non_stop (void)
3115 {
3116 struct target_ops *t;
3117
3118 t = find_default_run_target (NULL);
3119 if (t && t->to_supports_non_stop)
3120 return (t->to_supports_non_stop) ();
3121 return 0;
3122 }
3123
3124 int
3125 target_supports_non_stop (void)
3126 {
3127 struct target_ops *t;
3128
3129 for (t = &current_target; t != NULL; t = t->beneath)
3130 if (t->to_supports_non_stop)
3131 return t->to_supports_non_stop ();
3132
3133 return 0;
3134 }
3135
3136 /* Implement the "info proc" command. */
3137
3138 void
3139 target_info_proc (char *args, enum info_proc_what what)
3140 {
3141 struct target_ops *t;
3142
3143 /* If we're already connected to something that can get us OS
3144 related data, use it. Otherwise, try using the native
3145 target. */
3146 if (current_target.to_stratum >= process_stratum)
3147 t = current_target.beneath;
3148 else
3149 t = find_default_run_target (NULL);
3150
3151 for (; t != NULL; t = t->beneath)
3152 {
3153 if (t->to_info_proc != NULL)
3154 {
3155 t->to_info_proc (t, args, what);
3156
3157 if (targetdebug)
3158 fprintf_unfiltered (gdb_stdlog,
3159 "target_info_proc (\"%s\", %d)\n", args, what);
3160
3161 return;
3162 }
3163 }
3164
3165 error (_("Not supported on this target."));
3166 }
3167
3168 static int
3169 find_default_supports_disable_randomization (void)
3170 {
3171 struct target_ops *t;
3172
3173 t = find_default_run_target (NULL);
3174 if (t && t->to_supports_disable_randomization)
3175 return (t->to_supports_disable_randomization) ();
3176 return 0;
3177 }
3178
3179 int
3180 target_supports_disable_randomization (void)
3181 {
3182 struct target_ops *t;
3183
3184 for (t = &current_target; t != NULL; t = t->beneath)
3185 if (t->to_supports_disable_randomization)
3186 return t->to_supports_disable_randomization ();
3187
3188 return 0;
3189 }
3190
3191 char *
3192 target_get_osdata (const char *type)
3193 {
3194 struct target_ops *t;
3195
3196 /* If we're already connected to something that can get us OS
3197 related data, use it. Otherwise, try using the native
3198 target. */
3199 if (current_target.to_stratum >= process_stratum)
3200 t = current_target.beneath;
3201 else
3202 t = find_default_run_target ("get OS data");
3203
3204 if (!t)
3205 return NULL;
3206
3207 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3208 }
3209
3210 /* Determine the current address space of thread PTID. */
3211
3212 struct address_space *
3213 target_thread_address_space (ptid_t ptid)
3214 {
3215 struct address_space *aspace;
3216 struct inferior *inf;
3217 struct target_ops *t;
3218
3219 for (t = current_target.beneath; t != NULL; t = t->beneath)
3220 {
3221 if (t->to_thread_address_space != NULL)
3222 {
3223 aspace = t->to_thread_address_space (t, ptid);
3224 gdb_assert (aspace);
3225
3226 if (targetdebug)
3227 fprintf_unfiltered (gdb_stdlog,
3228 "target_thread_address_space (%s) = %d\n",
3229 target_pid_to_str (ptid),
3230 address_space_num (aspace));
3231 return aspace;
3232 }
3233 }
3234
3235 /* Fall-back to the "main" address space of the inferior. */
3236 inf = find_inferior_pid (ptid_get_pid (ptid));
3237
3238 if (inf == NULL || inf->aspace == NULL)
3239 internal_error (__FILE__, __LINE__,
3240 _("Can't determine the current "
3241 "address space of thread %s\n"),
3242 target_pid_to_str (ptid));
3243
3244 return inf->aspace;
3245 }
3246
3247
3248 /* Target file operations. */
3249
3250 static struct target_ops *
3251 default_fileio_target (void)
3252 {
3253 /* If we're already connected to something that can perform
3254 file I/O, use it. Otherwise, try using the native target. */
3255 if (current_target.to_stratum >= process_stratum)
3256 return current_target.beneath;
3257 else
3258 return find_default_run_target ("file I/O");
3259 }
3260
3261 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3262 target file descriptor, or -1 if an error occurs (and set
3263 *TARGET_ERRNO). */
3264 int
3265 target_fileio_open (const char *filename, int flags, int mode,
3266 int *target_errno)
3267 {
3268 struct target_ops *t;
3269
3270 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3271 {
3272 if (t->to_fileio_open != NULL)
3273 {
3274 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3275
3276 if (targetdebug)
3277 fprintf_unfiltered (gdb_stdlog,
3278 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3279 filename, flags, mode,
3280 fd, fd != -1 ? 0 : *target_errno);
3281 return fd;
3282 }
3283 }
3284
3285 *target_errno = FILEIO_ENOSYS;
3286 return -1;
3287 }
3288
3289 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3290 Return the number of bytes written, or -1 if an error occurs
3291 (and set *TARGET_ERRNO). */
3292 int
3293 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3294 ULONGEST offset, int *target_errno)
3295 {
3296 struct target_ops *t;
3297
3298 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3299 {
3300 if (t->to_fileio_pwrite != NULL)
3301 {
3302 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3303 target_errno);
3304
3305 if (targetdebug)
3306 fprintf_unfiltered (gdb_stdlog,
3307 "target_fileio_pwrite (%d,...,%d,%s) "
3308 "= %d (%d)\n",
3309 fd, len, pulongest (offset),
3310 ret, ret != -1 ? 0 : *target_errno);
3311 return ret;
3312 }
3313 }
3314
3315 *target_errno = FILEIO_ENOSYS;
3316 return -1;
3317 }
3318
3319 /* Read up to LEN bytes FD on the target into READ_BUF.
3320 Return the number of bytes read, or -1 if an error occurs
3321 (and set *TARGET_ERRNO). */
3322 int
3323 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3324 ULONGEST offset, int *target_errno)
3325 {
3326 struct target_ops *t;
3327
3328 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3329 {
3330 if (t->to_fileio_pread != NULL)
3331 {
3332 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3333 target_errno);
3334
3335 if (targetdebug)
3336 fprintf_unfiltered (gdb_stdlog,
3337 "target_fileio_pread (%d,...,%d,%s) "
3338 "= %d (%d)\n",
3339 fd, len, pulongest (offset),
3340 ret, ret != -1 ? 0 : *target_errno);
3341 return ret;
3342 }
3343 }
3344
3345 *target_errno = FILEIO_ENOSYS;
3346 return -1;
3347 }
3348
3349 /* Close FD on the target. Return 0, or -1 if an error occurs
3350 (and set *TARGET_ERRNO). */
3351 int
3352 target_fileio_close (int fd, int *target_errno)
3353 {
3354 struct target_ops *t;
3355
3356 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3357 {
3358 if (t->to_fileio_close != NULL)
3359 {
3360 int ret = t->to_fileio_close (fd, target_errno);
3361
3362 if (targetdebug)
3363 fprintf_unfiltered (gdb_stdlog,
3364 "target_fileio_close (%d) = %d (%d)\n",
3365 fd, ret, ret != -1 ? 0 : *target_errno);
3366 return ret;
3367 }
3368 }
3369
3370 *target_errno = FILEIO_ENOSYS;
3371 return -1;
3372 }
3373
3374 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3375 occurs (and set *TARGET_ERRNO). */
3376 int
3377 target_fileio_unlink (const char *filename, int *target_errno)
3378 {
3379 struct target_ops *t;
3380
3381 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3382 {
3383 if (t->to_fileio_unlink != NULL)
3384 {
3385 int ret = t->to_fileio_unlink (filename, target_errno);
3386
3387 if (targetdebug)
3388 fprintf_unfiltered (gdb_stdlog,
3389 "target_fileio_unlink (%s) = %d (%d)\n",
3390 filename, ret, ret != -1 ? 0 : *target_errno);
3391 return ret;
3392 }
3393 }
3394
3395 *target_errno = FILEIO_ENOSYS;
3396 return -1;
3397 }
3398
3399 /* Read value of symbolic link FILENAME on the target. Return a
3400 null-terminated string allocated via xmalloc, or NULL if an error
3401 occurs (and set *TARGET_ERRNO). */
3402 char *
3403 target_fileio_readlink (const char *filename, int *target_errno)
3404 {
3405 struct target_ops *t;
3406
3407 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3408 {
3409 if (t->to_fileio_readlink != NULL)
3410 {
3411 char *ret = t->to_fileio_readlink (filename, target_errno);
3412
3413 if (targetdebug)
3414 fprintf_unfiltered (gdb_stdlog,
3415 "target_fileio_readlink (%s) = %s (%d)\n",
3416 filename, ret? ret : "(nil)",
3417 ret? 0 : *target_errno);
3418 return ret;
3419 }
3420 }
3421
3422 *target_errno = FILEIO_ENOSYS;
3423 return NULL;
3424 }
3425
3426 static void
3427 target_fileio_close_cleanup (void *opaque)
3428 {
3429 int fd = *(int *) opaque;
3430 int target_errno;
3431
3432 target_fileio_close (fd, &target_errno);
3433 }
3434
3435 /* Read target file FILENAME. Store the result in *BUF_P and
3436 return the size of the transferred data. PADDING additional bytes are
3437 available in *BUF_P. This is a helper function for
3438 target_fileio_read_alloc; see the declaration of that function for more
3439 information. */
3440
3441 static LONGEST
3442 target_fileio_read_alloc_1 (const char *filename,
3443 gdb_byte **buf_p, int padding)
3444 {
3445 struct cleanup *close_cleanup;
3446 size_t buf_alloc, buf_pos;
3447 gdb_byte *buf;
3448 LONGEST n;
3449 int fd;
3450 int target_errno;
3451
3452 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3453 if (fd == -1)
3454 return -1;
3455
3456 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3457
3458 /* Start by reading up to 4K at a time. The target will throttle
3459 this number down if necessary. */
3460 buf_alloc = 4096;
3461 buf = xmalloc (buf_alloc);
3462 buf_pos = 0;
3463 while (1)
3464 {
3465 n = target_fileio_pread (fd, &buf[buf_pos],
3466 buf_alloc - buf_pos - padding, buf_pos,
3467 &target_errno);
3468 if (n < 0)
3469 {
3470 /* An error occurred. */
3471 do_cleanups (close_cleanup);
3472 xfree (buf);
3473 return -1;
3474 }
3475 else if (n == 0)
3476 {
3477 /* Read all there was. */
3478 do_cleanups (close_cleanup);
3479 if (buf_pos == 0)
3480 xfree (buf);
3481 else
3482 *buf_p = buf;
3483 return buf_pos;
3484 }
3485
3486 buf_pos += n;
3487
3488 /* If the buffer is filling up, expand it. */
3489 if (buf_alloc < buf_pos * 2)
3490 {
3491 buf_alloc *= 2;
3492 buf = xrealloc (buf, buf_alloc);
3493 }
3494
3495 QUIT;
3496 }
3497 }
3498
3499 /* Read target file FILENAME. Store the result in *BUF_P and return
3500 the size of the transferred data. See the declaration in "target.h"
3501 function for more information about the return value. */
3502
3503 LONGEST
3504 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3505 {
3506 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3507 }
3508
3509 /* Read target file FILENAME. The result is NUL-terminated and
3510 returned as a string, allocated using xmalloc. If an error occurs
3511 or the transfer is unsupported, NULL is returned. Empty objects
3512 are returned as allocated but empty strings. A warning is issued
3513 if the result contains any embedded NUL bytes. */
3514
3515 char *
3516 target_fileio_read_stralloc (const char *filename)
3517 {
3518 gdb_byte *buffer;
3519 LONGEST i, transferred;
3520
3521 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3522
3523 if (transferred < 0)
3524 return NULL;
3525
3526 if (transferred == 0)
3527 return xstrdup ("");
3528
3529 buffer[transferred] = 0;
3530
3531 /* Check for embedded NUL bytes; but allow trailing NULs. */
3532 for (i = strlen (buffer); i < transferred; i++)
3533 if (buffer[i] != 0)
3534 {
3535 warning (_("target file %s "
3536 "contained unexpected null characters"),
3537 filename);
3538 break;
3539 }
3540
3541 return (char *) buffer;
3542 }
3543
3544
3545 static int
3546 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3547 {
3548 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3549 }
3550
3551 static int
3552 default_watchpoint_addr_within_range (struct target_ops *target,
3553 CORE_ADDR addr,
3554 CORE_ADDR start, int length)
3555 {
3556 return addr >= start && addr < start + length;
3557 }
3558
3559 static struct gdbarch *
3560 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3561 {
3562 return target_gdbarch;
3563 }
3564
3565 static int
3566 return_zero (void)
3567 {
3568 return 0;
3569 }
3570
3571 static int
3572 return_one (void)
3573 {
3574 return 1;
3575 }
3576
3577 static int
3578 return_minus_one (void)
3579 {
3580 return -1;
3581 }
3582
3583 /* Find a single runnable target in the stack and return it. If for
3584 some reason there is more than one, return NULL. */
3585
3586 struct target_ops *
3587 find_run_target (void)
3588 {
3589 struct target_ops **t;
3590 struct target_ops *runable = NULL;
3591 int count;
3592
3593 count = 0;
3594
3595 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3596 {
3597 if ((*t)->to_can_run && target_can_run (*t))
3598 {
3599 runable = *t;
3600 ++count;
3601 }
3602 }
3603
3604 return (count == 1 ? runable : NULL);
3605 }
3606
3607 /*
3608 * Find the next target down the stack from the specified target.
3609 */
3610
3611 struct target_ops *
3612 find_target_beneath (struct target_ops *t)
3613 {
3614 return t->beneath;
3615 }
3616
3617 \f
3618 /* The inferior process has died. Long live the inferior! */
3619
3620 void
3621 generic_mourn_inferior (void)
3622 {
3623 ptid_t ptid;
3624
3625 ptid = inferior_ptid;
3626 inferior_ptid = null_ptid;
3627
3628 /* Mark breakpoints uninserted in case something tries to delete a
3629 breakpoint while we delete the inferior's threads (which would
3630 fail, since the inferior is long gone). */
3631 mark_breakpoints_out ();
3632
3633 if (!ptid_equal (ptid, null_ptid))
3634 {
3635 int pid = ptid_get_pid (ptid);
3636 exit_inferior (pid);
3637 }
3638
3639 /* Note this wipes step-resume breakpoints, so needs to be done
3640 after exit_inferior, which ends up referencing the step-resume
3641 breakpoints through clear_thread_inferior_resources. */
3642 breakpoint_init_inferior (inf_exited);
3643
3644 registers_changed ();
3645
3646 reopen_exec_file ();
3647 reinit_frame_cache ();
3648
3649 if (deprecated_detach_hook)
3650 deprecated_detach_hook ();
3651 }
3652 \f
3653 /* Helper function for child_wait and the derivatives of child_wait.
3654 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3655 translation of that in OURSTATUS. */
3656 void
3657 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
3658 {
3659 if (WIFEXITED (hoststatus))
3660 {
3661 ourstatus->kind = TARGET_WAITKIND_EXITED;
3662 ourstatus->value.integer = WEXITSTATUS (hoststatus);
3663 }
3664 else if (!WIFSTOPPED (hoststatus))
3665 {
3666 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3667 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
3668 }
3669 else
3670 {
3671 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3672 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
3673 }
3674 }
3675 \f
3676 /* Convert a normal process ID to a string. Returns the string in a
3677 static buffer. */
3678
3679 char *
3680 normal_pid_to_str (ptid_t ptid)
3681 {
3682 static char buf[32];
3683
3684 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3685 return buf;
3686 }
3687
3688 static char *
3689 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3690 {
3691 return normal_pid_to_str (ptid);
3692 }
3693
3694 /* Error-catcher for target_find_memory_regions. */
3695 static int
3696 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3697 {
3698 error (_("Command not implemented for this target."));
3699 return 0;
3700 }
3701
3702 /* Error-catcher for target_make_corefile_notes. */
3703 static char *
3704 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3705 {
3706 error (_("Command not implemented for this target."));
3707 return NULL;
3708 }
3709
3710 /* Error-catcher for target_get_bookmark. */
3711 static gdb_byte *
3712 dummy_get_bookmark (char *ignore1, int ignore2)
3713 {
3714 tcomplain ();
3715 return NULL;
3716 }
3717
3718 /* Error-catcher for target_goto_bookmark. */
3719 static void
3720 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3721 {
3722 tcomplain ();
3723 }
3724
3725 /* Set up the handful of non-empty slots needed by the dummy target
3726 vector. */
3727
3728 static void
3729 init_dummy_target (void)
3730 {
3731 dummy_target.to_shortname = "None";
3732 dummy_target.to_longname = "None";
3733 dummy_target.to_doc = "";
3734 dummy_target.to_attach = find_default_attach;
3735 dummy_target.to_detach =
3736 (void (*)(struct target_ops *, char *, int))target_ignore;
3737 dummy_target.to_create_inferior = find_default_create_inferior;
3738 dummy_target.to_can_async_p = find_default_can_async_p;
3739 dummy_target.to_is_async_p = find_default_is_async_p;
3740 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3741 dummy_target.to_supports_disable_randomization
3742 = find_default_supports_disable_randomization;
3743 dummy_target.to_pid_to_str = dummy_pid_to_str;
3744 dummy_target.to_stratum = dummy_stratum;
3745 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3746 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3747 dummy_target.to_get_bookmark = dummy_get_bookmark;
3748 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3749 dummy_target.to_xfer_partial = default_xfer_partial;
3750 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3751 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3752 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3753 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3754 dummy_target.to_has_execution
3755 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3756 dummy_target.to_stopped_by_watchpoint = return_zero;
3757 dummy_target.to_stopped_data_address =
3758 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3759 dummy_target.to_magic = OPS_MAGIC;
3760 }
3761 \f
3762 static void
3763 debug_to_open (char *args, int from_tty)
3764 {
3765 debug_target.to_open (args, from_tty);
3766
3767 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3768 }
3769
3770 void
3771 target_close (struct target_ops *targ, int quitting)
3772 {
3773 if (targ->to_xclose != NULL)
3774 targ->to_xclose (targ, quitting);
3775 else if (targ->to_close != NULL)
3776 targ->to_close (quitting);
3777
3778 if (targetdebug)
3779 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3780 }
3781
3782 void
3783 target_attach (char *args, int from_tty)
3784 {
3785 struct target_ops *t;
3786
3787 for (t = current_target.beneath; t != NULL; t = t->beneath)
3788 {
3789 if (t->to_attach != NULL)
3790 {
3791 t->to_attach (t, args, from_tty);
3792 if (targetdebug)
3793 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3794 args, from_tty);
3795 return;
3796 }
3797 }
3798
3799 internal_error (__FILE__, __LINE__,
3800 _("could not find a target to attach"));
3801 }
3802
3803 int
3804 target_thread_alive (ptid_t ptid)
3805 {
3806 struct target_ops *t;
3807
3808 for (t = current_target.beneath; t != NULL; t = t->beneath)
3809 {
3810 if (t->to_thread_alive != NULL)
3811 {
3812 int retval;
3813
3814 retval = t->to_thread_alive (t, ptid);
3815 if (targetdebug)
3816 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3817 PIDGET (ptid), retval);
3818
3819 return retval;
3820 }
3821 }
3822
3823 return 0;
3824 }
3825
3826 void
3827 target_find_new_threads (void)
3828 {
3829 struct target_ops *t;
3830
3831 for (t = current_target.beneath; t != NULL; t = t->beneath)
3832 {
3833 if (t->to_find_new_threads != NULL)
3834 {
3835 t->to_find_new_threads (t);
3836 if (targetdebug)
3837 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3838
3839 return;
3840 }
3841 }
3842 }
3843
3844 void
3845 target_stop (ptid_t ptid)
3846 {
3847 if (!may_stop)
3848 {
3849 warning (_("May not interrupt or stop the target, ignoring attempt"));
3850 return;
3851 }
3852
3853 (*current_target.to_stop) (ptid);
3854 }
3855
3856 static void
3857 debug_to_post_attach (int pid)
3858 {
3859 debug_target.to_post_attach (pid);
3860
3861 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3862 }
3863
3864 /* Return a pretty printed form of target_waitstatus.
3865 Space for the result is malloc'd, caller must free. */
3866
3867 char *
3868 target_waitstatus_to_string (const struct target_waitstatus *ws)
3869 {
3870 const char *kind_str = "status->kind = ";
3871
3872 switch (ws->kind)
3873 {
3874 case TARGET_WAITKIND_EXITED:
3875 return xstrprintf ("%sexited, status = %d",
3876 kind_str, ws->value.integer);
3877 case TARGET_WAITKIND_STOPPED:
3878 return xstrprintf ("%sstopped, signal = %s",
3879 kind_str, target_signal_to_name (ws->value.sig));
3880 case TARGET_WAITKIND_SIGNALLED:
3881 return xstrprintf ("%ssignalled, signal = %s",
3882 kind_str, target_signal_to_name (ws->value.sig));
3883 case TARGET_WAITKIND_LOADED:
3884 return xstrprintf ("%sloaded", kind_str);
3885 case TARGET_WAITKIND_FORKED:
3886 return xstrprintf ("%sforked", kind_str);
3887 case TARGET_WAITKIND_VFORKED:
3888 return xstrprintf ("%svforked", kind_str);
3889 case TARGET_WAITKIND_EXECD:
3890 return xstrprintf ("%sexecd", kind_str);
3891 case TARGET_WAITKIND_SYSCALL_ENTRY:
3892 return xstrprintf ("%sentered syscall", kind_str);
3893 case TARGET_WAITKIND_SYSCALL_RETURN:
3894 return xstrprintf ("%sexited syscall", kind_str);
3895 case TARGET_WAITKIND_SPURIOUS:
3896 return xstrprintf ("%sspurious", kind_str);
3897 case TARGET_WAITKIND_IGNORE:
3898 return xstrprintf ("%signore", kind_str);
3899 case TARGET_WAITKIND_NO_HISTORY:
3900 return xstrprintf ("%sno-history", kind_str);
3901 case TARGET_WAITKIND_NO_RESUMED:
3902 return xstrprintf ("%sno-resumed", kind_str);
3903 default:
3904 return xstrprintf ("%sunknown???", kind_str);
3905 }
3906 }
3907
3908 static void
3909 debug_print_register (const char * func,
3910 struct regcache *regcache, int regno)
3911 {
3912 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3913
3914 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3915 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3916 && gdbarch_register_name (gdbarch, regno) != NULL
3917 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3918 fprintf_unfiltered (gdb_stdlog, "(%s)",
3919 gdbarch_register_name (gdbarch, regno));
3920 else
3921 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3922 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3923 {
3924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3925 int i, size = register_size (gdbarch, regno);
3926 unsigned char buf[MAX_REGISTER_SIZE];
3927
3928 regcache_raw_collect (regcache, regno, buf);
3929 fprintf_unfiltered (gdb_stdlog, " = ");
3930 for (i = 0; i < size; i++)
3931 {
3932 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3933 }
3934 if (size <= sizeof (LONGEST))
3935 {
3936 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3937
3938 fprintf_unfiltered (gdb_stdlog, " %s %s",
3939 core_addr_to_string_nz (val), plongest (val));
3940 }
3941 }
3942 fprintf_unfiltered (gdb_stdlog, "\n");
3943 }
3944
3945 void
3946 target_fetch_registers (struct regcache *regcache, int regno)
3947 {
3948 struct target_ops *t;
3949
3950 for (t = current_target.beneath; t != NULL; t = t->beneath)
3951 {
3952 if (t->to_fetch_registers != NULL)
3953 {
3954 t->to_fetch_registers (t, regcache, regno);
3955 if (targetdebug)
3956 debug_print_register ("target_fetch_registers", regcache, regno);
3957 return;
3958 }
3959 }
3960 }
3961
3962 void
3963 target_store_registers (struct regcache *regcache, int regno)
3964 {
3965 struct target_ops *t;
3966
3967 if (!may_write_registers)
3968 error (_("Writing to registers is not allowed (regno %d)"), regno);
3969
3970 for (t = current_target.beneath; t != NULL; t = t->beneath)
3971 {
3972 if (t->to_store_registers != NULL)
3973 {
3974 t->to_store_registers (t, regcache, regno);
3975 if (targetdebug)
3976 {
3977 debug_print_register ("target_store_registers", regcache, regno);
3978 }
3979 return;
3980 }
3981 }
3982
3983 noprocess ();
3984 }
3985
3986 int
3987 target_core_of_thread (ptid_t ptid)
3988 {
3989 struct target_ops *t;
3990
3991 for (t = current_target.beneath; t != NULL; t = t->beneath)
3992 {
3993 if (t->to_core_of_thread != NULL)
3994 {
3995 int retval = t->to_core_of_thread (t, ptid);
3996
3997 if (targetdebug)
3998 fprintf_unfiltered (gdb_stdlog,
3999 "target_core_of_thread (%d) = %d\n",
4000 PIDGET (ptid), retval);
4001 return retval;
4002 }
4003 }
4004
4005 return -1;
4006 }
4007
4008 int
4009 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4010 {
4011 struct target_ops *t;
4012
4013 for (t = current_target.beneath; t != NULL; t = t->beneath)
4014 {
4015 if (t->to_verify_memory != NULL)
4016 {
4017 int retval = t->to_verify_memory (t, data, memaddr, size);
4018
4019 if (targetdebug)
4020 fprintf_unfiltered (gdb_stdlog,
4021 "target_verify_memory (%s, %s) = %d\n",
4022 paddress (target_gdbarch, memaddr),
4023 pulongest (size),
4024 retval);
4025 return retval;
4026 }
4027 }
4028
4029 tcomplain ();
4030 }
4031
4032 /* The documentation for this function is in its prototype declaration in
4033 target.h. */
4034
4035 int
4036 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4037 {
4038 struct target_ops *t;
4039
4040 for (t = current_target.beneath; t != NULL; t = t->beneath)
4041 if (t->to_insert_mask_watchpoint != NULL)
4042 {
4043 int ret;
4044
4045 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4046
4047 if (targetdebug)
4048 fprintf_unfiltered (gdb_stdlog, "\
4049 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4050 core_addr_to_string (addr),
4051 core_addr_to_string (mask), rw, ret);
4052
4053 return ret;
4054 }
4055
4056 return 1;
4057 }
4058
4059 /* The documentation for this function is in its prototype declaration in
4060 target.h. */
4061
4062 int
4063 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4064 {
4065 struct target_ops *t;
4066
4067 for (t = current_target.beneath; t != NULL; t = t->beneath)
4068 if (t->to_remove_mask_watchpoint != NULL)
4069 {
4070 int ret;
4071
4072 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4073
4074 if (targetdebug)
4075 fprintf_unfiltered (gdb_stdlog, "\
4076 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4077 core_addr_to_string (addr),
4078 core_addr_to_string (mask), rw, ret);
4079
4080 return ret;
4081 }
4082
4083 return 1;
4084 }
4085
4086 /* The documentation for this function is in its prototype declaration
4087 in target.h. */
4088
4089 int
4090 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4091 {
4092 struct target_ops *t;
4093
4094 for (t = current_target.beneath; t != NULL; t = t->beneath)
4095 if (t->to_masked_watch_num_registers != NULL)
4096 return t->to_masked_watch_num_registers (t, addr, mask);
4097
4098 return -1;
4099 }
4100
4101 /* The documentation for this function is in its prototype declaration
4102 in target.h. */
4103
4104 int
4105 target_ranged_break_num_registers (void)
4106 {
4107 struct target_ops *t;
4108
4109 for (t = current_target.beneath; t != NULL; t = t->beneath)
4110 if (t->to_ranged_break_num_registers != NULL)
4111 return t->to_ranged_break_num_registers (t);
4112
4113 return -1;
4114 }
4115
4116 static void
4117 debug_to_prepare_to_store (struct regcache *regcache)
4118 {
4119 debug_target.to_prepare_to_store (regcache);
4120
4121 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4122 }
4123
4124 static int
4125 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4126 int write, struct mem_attrib *attrib,
4127 struct target_ops *target)
4128 {
4129 int retval;
4130
4131 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4132 attrib, target);
4133
4134 fprintf_unfiltered (gdb_stdlog,
4135 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4136 paddress (target_gdbarch, memaddr), len,
4137 write ? "write" : "read", retval);
4138
4139 if (retval > 0)
4140 {
4141 int i;
4142
4143 fputs_unfiltered (", bytes =", gdb_stdlog);
4144 for (i = 0; i < retval; i++)
4145 {
4146 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4147 {
4148 if (targetdebug < 2 && i > 0)
4149 {
4150 fprintf_unfiltered (gdb_stdlog, " ...");
4151 break;
4152 }
4153 fprintf_unfiltered (gdb_stdlog, "\n");
4154 }
4155
4156 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4157 }
4158 }
4159
4160 fputc_unfiltered ('\n', gdb_stdlog);
4161
4162 return retval;
4163 }
4164
4165 static void
4166 debug_to_files_info (struct target_ops *target)
4167 {
4168 debug_target.to_files_info (target);
4169
4170 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4171 }
4172
4173 static int
4174 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4175 struct bp_target_info *bp_tgt)
4176 {
4177 int retval;
4178
4179 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4180
4181 fprintf_unfiltered (gdb_stdlog,
4182 "target_insert_breakpoint (%s, xxx) = %ld\n",
4183 core_addr_to_string (bp_tgt->placed_address),
4184 (unsigned long) retval);
4185 return retval;
4186 }
4187
4188 static int
4189 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4190 struct bp_target_info *bp_tgt)
4191 {
4192 int retval;
4193
4194 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4195
4196 fprintf_unfiltered (gdb_stdlog,
4197 "target_remove_breakpoint (%s, xxx) = %ld\n",
4198 core_addr_to_string (bp_tgt->placed_address),
4199 (unsigned long) retval);
4200 return retval;
4201 }
4202
4203 static int
4204 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4205 {
4206 int retval;
4207
4208 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4209
4210 fprintf_unfiltered (gdb_stdlog,
4211 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4212 (unsigned long) type,
4213 (unsigned long) cnt,
4214 (unsigned long) from_tty,
4215 (unsigned long) retval);
4216 return retval;
4217 }
4218
4219 static int
4220 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4221 {
4222 CORE_ADDR retval;
4223
4224 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4225
4226 fprintf_unfiltered (gdb_stdlog,
4227 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4228 core_addr_to_string (addr), (unsigned long) len,
4229 core_addr_to_string (retval));
4230 return retval;
4231 }
4232
4233 static int
4234 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4235 struct expression *cond)
4236 {
4237 int retval;
4238
4239 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4240 rw, cond);
4241
4242 fprintf_unfiltered (gdb_stdlog,
4243 "target_can_accel_watchpoint_condition "
4244 "(%s, %d, %d, %s) = %ld\n",
4245 core_addr_to_string (addr), len, rw,
4246 host_address_to_string (cond), (unsigned long) retval);
4247 return retval;
4248 }
4249
4250 static int
4251 debug_to_stopped_by_watchpoint (void)
4252 {
4253 int retval;
4254
4255 retval = debug_target.to_stopped_by_watchpoint ();
4256
4257 fprintf_unfiltered (gdb_stdlog,
4258 "target_stopped_by_watchpoint () = %ld\n",
4259 (unsigned long) retval);
4260 return retval;
4261 }
4262
4263 static int
4264 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4265 {
4266 int retval;
4267
4268 retval = debug_target.to_stopped_data_address (target, addr);
4269
4270 fprintf_unfiltered (gdb_stdlog,
4271 "target_stopped_data_address ([%s]) = %ld\n",
4272 core_addr_to_string (*addr),
4273 (unsigned long)retval);
4274 return retval;
4275 }
4276
4277 static int
4278 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4279 CORE_ADDR addr,
4280 CORE_ADDR start, int length)
4281 {
4282 int retval;
4283
4284 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4285 start, length);
4286
4287 fprintf_filtered (gdb_stdlog,
4288 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4289 core_addr_to_string (addr), core_addr_to_string (start),
4290 length, retval);
4291 return retval;
4292 }
4293
4294 static int
4295 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4296 struct bp_target_info *bp_tgt)
4297 {
4298 int retval;
4299
4300 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4301
4302 fprintf_unfiltered (gdb_stdlog,
4303 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4304 core_addr_to_string (bp_tgt->placed_address),
4305 (unsigned long) retval);
4306 return retval;
4307 }
4308
4309 static int
4310 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4311 struct bp_target_info *bp_tgt)
4312 {
4313 int retval;
4314
4315 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4316
4317 fprintf_unfiltered (gdb_stdlog,
4318 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4319 core_addr_to_string (bp_tgt->placed_address),
4320 (unsigned long) retval);
4321 return retval;
4322 }
4323
4324 static int
4325 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4326 struct expression *cond)
4327 {
4328 int retval;
4329
4330 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4331
4332 fprintf_unfiltered (gdb_stdlog,
4333 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4334 core_addr_to_string (addr), len, type,
4335 host_address_to_string (cond), (unsigned long) retval);
4336 return retval;
4337 }
4338
4339 static int
4340 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4341 struct expression *cond)
4342 {
4343 int retval;
4344
4345 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4346
4347 fprintf_unfiltered (gdb_stdlog,
4348 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4349 core_addr_to_string (addr), len, type,
4350 host_address_to_string (cond), (unsigned long) retval);
4351 return retval;
4352 }
4353
4354 static void
4355 debug_to_terminal_init (void)
4356 {
4357 debug_target.to_terminal_init ();
4358
4359 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4360 }
4361
4362 static void
4363 debug_to_terminal_inferior (void)
4364 {
4365 debug_target.to_terminal_inferior ();
4366
4367 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4368 }
4369
4370 static void
4371 debug_to_terminal_ours_for_output (void)
4372 {
4373 debug_target.to_terminal_ours_for_output ();
4374
4375 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4376 }
4377
4378 static void
4379 debug_to_terminal_ours (void)
4380 {
4381 debug_target.to_terminal_ours ();
4382
4383 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4384 }
4385
4386 static void
4387 debug_to_terminal_save_ours (void)
4388 {
4389 debug_target.to_terminal_save_ours ();
4390
4391 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4392 }
4393
4394 static void
4395 debug_to_terminal_info (char *arg, int from_tty)
4396 {
4397 debug_target.to_terminal_info (arg, from_tty);
4398
4399 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4400 from_tty);
4401 }
4402
4403 static void
4404 debug_to_load (char *args, int from_tty)
4405 {
4406 debug_target.to_load (args, from_tty);
4407
4408 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4409 }
4410
4411 static void
4412 debug_to_post_startup_inferior (ptid_t ptid)
4413 {
4414 debug_target.to_post_startup_inferior (ptid);
4415
4416 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4417 PIDGET (ptid));
4418 }
4419
4420 static int
4421 debug_to_insert_fork_catchpoint (int pid)
4422 {
4423 int retval;
4424
4425 retval = debug_target.to_insert_fork_catchpoint (pid);
4426
4427 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4428 pid, retval);
4429
4430 return retval;
4431 }
4432
4433 static int
4434 debug_to_remove_fork_catchpoint (int pid)
4435 {
4436 int retval;
4437
4438 retval = debug_target.to_remove_fork_catchpoint (pid);
4439
4440 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4441 pid, retval);
4442
4443 return retval;
4444 }
4445
4446 static int
4447 debug_to_insert_vfork_catchpoint (int pid)
4448 {
4449 int retval;
4450
4451 retval = debug_target.to_insert_vfork_catchpoint (pid);
4452
4453 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4454 pid, retval);
4455
4456 return retval;
4457 }
4458
4459 static int
4460 debug_to_remove_vfork_catchpoint (int pid)
4461 {
4462 int retval;
4463
4464 retval = debug_target.to_remove_vfork_catchpoint (pid);
4465
4466 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4467 pid, retval);
4468
4469 return retval;
4470 }
4471
4472 static int
4473 debug_to_insert_exec_catchpoint (int pid)
4474 {
4475 int retval;
4476
4477 retval = debug_target.to_insert_exec_catchpoint (pid);
4478
4479 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4480 pid, retval);
4481
4482 return retval;
4483 }
4484
4485 static int
4486 debug_to_remove_exec_catchpoint (int pid)
4487 {
4488 int retval;
4489
4490 retval = debug_target.to_remove_exec_catchpoint (pid);
4491
4492 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4493 pid, retval);
4494
4495 return retval;
4496 }
4497
4498 static int
4499 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4500 {
4501 int has_exited;
4502
4503 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4504
4505 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4506 pid, wait_status, *exit_status, has_exited);
4507
4508 return has_exited;
4509 }
4510
4511 static int
4512 debug_to_can_run (void)
4513 {
4514 int retval;
4515
4516 retval = debug_target.to_can_run ();
4517
4518 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4519
4520 return retval;
4521 }
4522
4523 static struct gdbarch *
4524 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4525 {
4526 struct gdbarch *retval;
4527
4528 retval = debug_target.to_thread_architecture (ops, ptid);
4529
4530 fprintf_unfiltered (gdb_stdlog,
4531 "target_thread_architecture (%s) = %s [%s]\n",
4532 target_pid_to_str (ptid),
4533 host_address_to_string (retval),
4534 gdbarch_bfd_arch_info (retval)->printable_name);
4535 return retval;
4536 }
4537
4538 static void
4539 debug_to_stop (ptid_t ptid)
4540 {
4541 debug_target.to_stop (ptid);
4542
4543 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4544 target_pid_to_str (ptid));
4545 }
4546
4547 static void
4548 debug_to_rcmd (char *command,
4549 struct ui_file *outbuf)
4550 {
4551 debug_target.to_rcmd (command, outbuf);
4552 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4553 }
4554
4555 static char *
4556 debug_to_pid_to_exec_file (int pid)
4557 {
4558 char *exec_file;
4559
4560 exec_file = debug_target.to_pid_to_exec_file (pid);
4561
4562 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4563 pid, exec_file);
4564
4565 return exec_file;
4566 }
4567
4568 static void
4569 setup_target_debug (void)
4570 {
4571 memcpy (&debug_target, &current_target, sizeof debug_target);
4572
4573 current_target.to_open = debug_to_open;
4574 current_target.to_post_attach = debug_to_post_attach;
4575 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4576 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4577 current_target.to_files_info = debug_to_files_info;
4578 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4579 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4580 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4581 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4582 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4583 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4584 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4585 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4586 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4587 current_target.to_watchpoint_addr_within_range
4588 = debug_to_watchpoint_addr_within_range;
4589 current_target.to_region_ok_for_hw_watchpoint
4590 = debug_to_region_ok_for_hw_watchpoint;
4591 current_target.to_can_accel_watchpoint_condition
4592 = debug_to_can_accel_watchpoint_condition;
4593 current_target.to_terminal_init = debug_to_terminal_init;
4594 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4595 current_target.to_terminal_ours_for_output
4596 = debug_to_terminal_ours_for_output;
4597 current_target.to_terminal_ours = debug_to_terminal_ours;
4598 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4599 current_target.to_terminal_info = debug_to_terminal_info;
4600 current_target.to_load = debug_to_load;
4601 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4602 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4603 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4604 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4605 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4606 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4607 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4608 current_target.to_has_exited = debug_to_has_exited;
4609 current_target.to_can_run = debug_to_can_run;
4610 current_target.to_stop = debug_to_stop;
4611 current_target.to_rcmd = debug_to_rcmd;
4612 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4613 current_target.to_thread_architecture = debug_to_thread_architecture;
4614 }
4615 \f
4616
4617 static char targ_desc[] =
4618 "Names of targets and files being debugged.\nShows the entire \
4619 stack of targets currently in use (including the exec-file,\n\
4620 core-file, and process, if any), as well as the symbol file name.";
4621
4622 static void
4623 do_monitor_command (char *cmd,
4624 int from_tty)
4625 {
4626 if ((current_target.to_rcmd
4627 == (void (*) (char *, struct ui_file *)) tcomplain)
4628 || (current_target.to_rcmd == debug_to_rcmd
4629 && (debug_target.to_rcmd
4630 == (void (*) (char *, struct ui_file *)) tcomplain)))
4631 error (_("\"monitor\" command not supported by this target."));
4632 target_rcmd (cmd, gdb_stdtarg);
4633 }
4634
4635 /* Print the name of each layers of our target stack. */
4636
4637 static void
4638 maintenance_print_target_stack (char *cmd, int from_tty)
4639 {
4640 struct target_ops *t;
4641
4642 printf_filtered (_("The current target stack is:\n"));
4643
4644 for (t = target_stack; t != NULL; t = t->beneath)
4645 {
4646 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4647 }
4648 }
4649
4650 /* Controls if async mode is permitted. */
4651 int target_async_permitted = 0;
4652
4653 /* The set command writes to this variable. If the inferior is
4654 executing, linux_nat_async_permitted is *not* updated. */
4655 static int target_async_permitted_1 = 0;
4656
4657 static void
4658 set_maintenance_target_async_permitted (char *args, int from_tty,
4659 struct cmd_list_element *c)
4660 {
4661 if (have_live_inferiors ())
4662 {
4663 target_async_permitted_1 = target_async_permitted;
4664 error (_("Cannot change this setting while the inferior is running."));
4665 }
4666
4667 target_async_permitted = target_async_permitted_1;
4668 }
4669
4670 static void
4671 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
4672 struct cmd_list_element *c,
4673 const char *value)
4674 {
4675 fprintf_filtered (file,
4676 _("Controlling the inferior in "
4677 "asynchronous mode is %s.\n"), value);
4678 }
4679
4680 /* Temporary copies of permission settings. */
4681
4682 static int may_write_registers_1 = 1;
4683 static int may_write_memory_1 = 1;
4684 static int may_insert_breakpoints_1 = 1;
4685 static int may_insert_tracepoints_1 = 1;
4686 static int may_insert_fast_tracepoints_1 = 1;
4687 static int may_stop_1 = 1;
4688
4689 /* Make the user-set values match the real values again. */
4690
4691 void
4692 update_target_permissions (void)
4693 {
4694 may_write_registers_1 = may_write_registers;
4695 may_write_memory_1 = may_write_memory;
4696 may_insert_breakpoints_1 = may_insert_breakpoints;
4697 may_insert_tracepoints_1 = may_insert_tracepoints;
4698 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4699 may_stop_1 = may_stop;
4700 }
4701
4702 /* The one function handles (most of) the permission flags in the same
4703 way. */
4704
4705 static void
4706 set_target_permissions (char *args, int from_tty,
4707 struct cmd_list_element *c)
4708 {
4709 if (target_has_execution)
4710 {
4711 update_target_permissions ();
4712 error (_("Cannot change this setting while the inferior is running."));
4713 }
4714
4715 /* Make the real values match the user-changed values. */
4716 may_write_registers = may_write_registers_1;
4717 may_insert_breakpoints = may_insert_breakpoints_1;
4718 may_insert_tracepoints = may_insert_tracepoints_1;
4719 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4720 may_stop = may_stop_1;
4721 update_observer_mode ();
4722 }
4723
4724 /* Set memory write permission independently of observer mode. */
4725
4726 static void
4727 set_write_memory_permission (char *args, int from_tty,
4728 struct cmd_list_element *c)
4729 {
4730 /* Make the real values match the user-changed values. */
4731 may_write_memory = may_write_memory_1;
4732 update_observer_mode ();
4733 }
4734
4735
4736 void
4737 initialize_targets (void)
4738 {
4739 init_dummy_target ();
4740 push_target (&dummy_target);
4741
4742 add_info ("target", target_info, targ_desc);
4743 add_info ("files", target_info, targ_desc);
4744
4745 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4746 Set target debugging."), _("\
4747 Show target debugging."), _("\
4748 When non-zero, target debugging is enabled. Higher numbers are more\n\
4749 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4750 command."),
4751 NULL,
4752 show_targetdebug,
4753 &setdebuglist, &showdebuglist);
4754
4755 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4756 &trust_readonly, _("\
4757 Set mode for reading from readonly sections."), _("\
4758 Show mode for reading from readonly sections."), _("\
4759 When this mode is on, memory reads from readonly sections (such as .text)\n\
4760 will be read from the object file instead of from the target. This will\n\
4761 result in significant performance improvement for remote targets."),
4762 NULL,
4763 show_trust_readonly,
4764 &setlist, &showlist);
4765
4766 add_com ("monitor", class_obscure, do_monitor_command,
4767 _("Send a command to the remote monitor (remote targets only)."));
4768
4769 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4770 _("Print the name of each layer of the internal target stack."),
4771 &maintenanceprintlist);
4772
4773 add_setshow_boolean_cmd ("target-async", no_class,
4774 &target_async_permitted_1, _("\
4775 Set whether gdb controls the inferior in asynchronous mode."), _("\
4776 Show whether gdb controls the inferior in asynchronous mode."), _("\
4777 Tells gdb whether to control the inferior in asynchronous mode."),
4778 set_maintenance_target_async_permitted,
4779 show_maintenance_target_async_permitted,
4780 &setlist,
4781 &showlist);
4782
4783 add_setshow_boolean_cmd ("stack-cache", class_support,
4784 &stack_cache_enabled_p_1, _("\
4785 Set cache use for stack access."), _("\
4786 Show cache use for stack access."), _("\
4787 When on, use the data cache for all stack access, regardless of any\n\
4788 configured memory regions. This improves remote performance significantly.\n\
4789 By default, caching for stack access is on."),
4790 set_stack_cache_enabled_p,
4791 show_stack_cache_enabled_p,
4792 &setlist, &showlist);
4793
4794 add_setshow_boolean_cmd ("may-write-registers", class_support,
4795 &may_write_registers_1, _("\
4796 Set permission to write into registers."), _("\
4797 Show permission to write into registers."), _("\
4798 When this permission is on, GDB may write into the target's registers.\n\
4799 Otherwise, any sort of write attempt will result in an error."),
4800 set_target_permissions, NULL,
4801 &setlist, &showlist);
4802
4803 add_setshow_boolean_cmd ("may-write-memory", class_support,
4804 &may_write_memory_1, _("\
4805 Set permission to write into target memory."), _("\
4806 Show permission to write into target memory."), _("\
4807 When this permission is on, GDB may write into the target's memory.\n\
4808 Otherwise, any sort of write attempt will result in an error."),
4809 set_write_memory_permission, NULL,
4810 &setlist, &showlist);
4811
4812 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4813 &may_insert_breakpoints_1, _("\
4814 Set permission to insert breakpoints in the target."), _("\
4815 Show permission to insert breakpoints in the target."), _("\
4816 When this permission is on, GDB may insert breakpoints in the program.\n\
4817 Otherwise, any sort of insertion attempt will result in an error."),
4818 set_target_permissions, NULL,
4819 &setlist, &showlist);
4820
4821 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4822 &may_insert_tracepoints_1, _("\
4823 Set permission to insert tracepoints in the target."), _("\
4824 Show permission to insert tracepoints in the target."), _("\
4825 When this permission is on, GDB may insert tracepoints in the program.\n\
4826 Otherwise, any sort of insertion attempt will result in an error."),
4827 set_target_permissions, NULL,
4828 &setlist, &showlist);
4829
4830 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4831 &may_insert_fast_tracepoints_1, _("\
4832 Set permission to insert fast tracepoints in the target."), _("\
4833 Show permission to insert fast tracepoints in the target."), _("\
4834 When this permission is on, GDB may insert fast tracepoints.\n\
4835 Otherwise, any sort of insertion attempt will result in an error."),
4836 set_target_permissions, NULL,
4837 &setlist, &showlist);
4838
4839 add_setshow_boolean_cmd ("may-interrupt", class_support,
4840 &may_stop_1, _("\
4841 Set permission to interrupt or signal the target."), _("\
4842 Show permission to interrupt or signal the target."), _("\
4843 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4844 Otherwise, any attempt to interrupt or stop will be ignored."),
4845 set_target_permissions, NULL,
4846 &setlist, &showlist);
4847
4848
4849 target_dcache = dcache_init ();
4850 }
This page took 0.129693 seconds and 4 git commands to generate.