gdb/
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46
47 static void target_info (char *, int);
48
49 static void kill_or_be_killed (int);
50
51 static void default_terminal_info (char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
57
58 static int nosymbol (char *, CORE_ADDR *);
59
60 static void tcomplain (void) ATTR_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static void nosupport_runtime (void);
77
78 static LONGEST default_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST current_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex, gdb_byte *readbuf,
87 const gdb_byte *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static LONGEST target_xfer_partial (struct target_ops *ops,
91 enum target_object object,
92 const char *annex,
93 void *readbuf, const void *writebuf,
94 ULONGEST offset, LONGEST len);
95
96 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
97 ptid_t ptid);
98
99 static void init_dummy_target (void);
100
101 static struct target_ops debug_target;
102
103 static void debug_to_open (char *, int);
104
105 static void debug_to_prepare_to_store (struct regcache *);
106
107 static void debug_to_files_info (struct target_ops *);
108
109 static int debug_to_insert_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_remove_breakpoint (struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_can_use_hw_breakpoint (int, int, int);
116
117 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
121 struct bp_target_info *);
122
123 static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
124
125 static int debug_to_remove_watchpoint (CORE_ADDR, int, int);
126
127 static int debug_to_stopped_by_watchpoint (void);
128
129 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
130
131 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
132 CORE_ADDR, CORE_ADDR, int);
133
134 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
135
136 static void debug_to_terminal_init (void);
137
138 static void debug_to_terminal_inferior (void);
139
140 static void debug_to_terminal_ours_for_output (void);
141
142 static void debug_to_terminal_save_ours (void);
143
144 static void debug_to_terminal_ours (void);
145
146 static void debug_to_terminal_info (char *, int);
147
148 static void debug_to_load (char *, int);
149
150 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
151
152 static int debug_to_can_run (void);
153
154 static void debug_to_notice_signals (ptid_t);
155
156 static void debug_to_stop (ptid_t);
157
158 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
159 wierd and mysterious ways. Putting the variable here lets those
160 wierd and mysterious ways keep building while they are being
161 converted to the inferior inheritance structure. */
162 struct target_ops deprecated_child_ops;
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_index;
170 unsigned target_struct_allocsize;
171 #define DEFAULT_ALLOCSIZE 10
172
173 /* The initial current target, so that there is always a semi-valid
174 current target. */
175
176 static struct target_ops dummy_target;
177
178 /* Top of target stack. */
179
180 static struct target_ops *target_stack;
181
182 /* The target structure we are currently using to talk to a process
183 or file or whatever "inferior" we have. */
184
185 struct target_ops current_target;
186
187 /* Command list for target. */
188
189 static struct cmd_list_element *targetlist = NULL;
190
191 /* Nonzero if we should trust readonly sections from the
192 executable when reading memory. */
193
194 static int trust_readonly = 0;
195
196 /* Nonzero if we should show true memory content including
197 memory breakpoint inserted by gdb. */
198
199 static int show_memory_breakpoints = 0;
200
201 /* Non-zero if we want to see trace of target level stuff. */
202
203 static int targetdebug = 0;
204 static void
205 show_targetdebug (struct ui_file *file, int from_tty,
206 struct cmd_list_element *c, const char *value)
207 {
208 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
209 }
210
211 static void setup_target_debug (void);
212
213 /* The option sets this. */
214 static int stack_cache_enabled_p_1 = 1;
215 /* And set_stack_cache_enabled_p updates this.
216 The reason for the separation is so that we don't flush the cache for
217 on->on transitions. */
218 static int stack_cache_enabled_p = 1;
219
220 /* This is called *after* the stack-cache has been set.
221 Flush the cache for off->on and on->off transitions.
222 There's no real need to flush the cache for on->off transitions,
223 except cleanliness. */
224
225 static void
226 set_stack_cache_enabled_p (char *args, int from_tty,
227 struct cmd_list_element *c)
228 {
229 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
230 target_dcache_invalidate ();
231
232 stack_cache_enabled_p = stack_cache_enabled_p_1;
233 }
234
235 static void
236 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
237 struct cmd_list_element *c, const char *value)
238 {
239 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
240 }
241
242 /* Cache of memory operations, to speed up remote access. */
243 static DCACHE *target_dcache;
244
245 /* Invalidate the target dcache. */
246
247 void
248 target_dcache_invalidate (void)
249 {
250 dcache_invalidate (target_dcache);
251 }
252
253 /* The user just typed 'target' without the name of a target. */
254
255 static void
256 target_command (char *arg, int from_tty)
257 {
258 fputs_filtered ("Argument required (target name). Try `help target'\n",
259 gdb_stdout);
260 }
261
262 /* Default target_has_* methods for process_stratum targets. */
263
264 int
265 default_child_has_all_memory (struct target_ops *ops)
266 {
267 /* If no inferior selected, then we can't read memory here. */
268 if (ptid_equal (inferior_ptid, null_ptid))
269 return 0;
270
271 return 1;
272 }
273
274 int
275 default_child_has_memory (struct target_ops *ops)
276 {
277 /* If no inferior selected, then we can't read memory here. */
278 if (ptid_equal (inferior_ptid, null_ptid))
279 return 0;
280
281 return 1;
282 }
283
284 int
285 default_child_has_stack (struct target_ops *ops)
286 {
287 /* If no inferior selected, there's no stack. */
288 if (ptid_equal (inferior_ptid, null_ptid))
289 return 0;
290
291 return 1;
292 }
293
294 int
295 default_child_has_registers (struct target_ops *ops)
296 {
297 /* Can't read registers from no inferior. */
298 if (ptid_equal (inferior_ptid, null_ptid))
299 return 0;
300
301 return 1;
302 }
303
304 int
305 default_child_has_execution (struct target_ops *ops)
306 {
307 /* If there's no thread selected, then we can't make it run through
308 hoops. */
309 if (ptid_equal (inferior_ptid, null_ptid))
310 return 0;
311
312 return 1;
313 }
314
315
316 int
317 target_has_all_memory_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_all_memory (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_memory_1 (void)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_memory (t))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_stack_1 (void)
342 {
343 struct target_ops *t;
344
345 for (t = current_target.beneath; t != NULL; t = t->beneath)
346 if (t->to_has_stack (t))
347 return 1;
348
349 return 0;
350 }
351
352 int
353 target_has_registers_1 (void)
354 {
355 struct target_ops *t;
356
357 for (t = current_target.beneath; t != NULL; t = t->beneath)
358 if (t->to_has_registers (t))
359 return 1;
360
361 return 0;
362 }
363
364 int
365 target_has_execution_1 (void)
366 {
367 struct target_ops *t;
368
369 for (t = current_target.beneath; t != NULL; t = t->beneath)
370 if (t->to_has_execution (t))
371 return 1;
372
373 return 0;
374 }
375
376 /* Add a possible target architecture to the list. */
377
378 void
379 add_target (struct target_ops *t)
380 {
381 /* Provide default values for all "must have" methods. */
382 if (t->to_xfer_partial == NULL)
383 t->to_xfer_partial = default_xfer_partial;
384
385 if (t->to_has_all_memory == NULL)
386 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
387
388 if (t->to_has_memory == NULL)
389 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
390
391 if (t->to_has_stack == NULL)
392 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
393
394 if (t->to_has_registers == NULL)
395 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
396
397 if (t->to_has_execution == NULL)
398 t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
399
400 if (!target_structs)
401 {
402 target_struct_allocsize = DEFAULT_ALLOCSIZE;
403 target_structs = (struct target_ops **) xmalloc
404 (target_struct_allocsize * sizeof (*target_structs));
405 }
406 if (target_struct_size >= target_struct_allocsize)
407 {
408 target_struct_allocsize *= 2;
409 target_structs = (struct target_ops **)
410 xrealloc ((char *) target_structs,
411 target_struct_allocsize * sizeof (*target_structs));
412 }
413 target_structs[target_struct_size++] = t;
414
415 if (targetlist == NULL)
416 add_prefix_cmd ("target", class_run, target_command, _("\
417 Connect to a target machine or process.\n\
418 The first argument is the type or protocol of the target machine.\n\
419 Remaining arguments are interpreted by the target protocol. For more\n\
420 information on the arguments for a particular protocol, type\n\
421 `help target ' followed by the protocol name."),
422 &targetlist, "target ", 0, &cmdlist);
423 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
424 }
425
426 /* Stub functions */
427
428 void
429 target_ignore (void)
430 {
431 }
432
433 void
434 target_kill (void)
435 {
436 struct target_ops *t;
437
438 for (t = current_target.beneath; t != NULL; t = t->beneath)
439 if (t->to_kill != NULL)
440 {
441 if (targetdebug)
442 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
443
444 t->to_kill (t);
445 return;
446 }
447
448 noprocess ();
449 }
450
451 void
452 target_load (char *arg, int from_tty)
453 {
454 target_dcache_invalidate ();
455 (*current_target.to_load) (arg, from_tty);
456 }
457
458 void
459 target_create_inferior (char *exec_file, char *args,
460 char **env, int from_tty)
461 {
462 struct target_ops *t;
463 for (t = current_target.beneath; t != NULL; t = t->beneath)
464 {
465 if (t->to_create_inferior != NULL)
466 {
467 t->to_create_inferior (t, exec_file, args, env, from_tty);
468 if (targetdebug)
469 fprintf_unfiltered (gdb_stdlog,
470 "target_create_inferior (%s, %s, xxx, %d)\n",
471 exec_file, args, from_tty);
472 return;
473 }
474 }
475
476 internal_error (__FILE__, __LINE__,
477 "could not find a target to create inferior");
478 }
479
480 void
481 target_terminal_inferior (void)
482 {
483 /* A background resume (``run&'') should leave GDB in control of the
484 terminal. */
485 if (target_is_async_p () && !sync_execution)
486 return;
487
488 /* If GDB is resuming the inferior in the foreground, install
489 inferior's terminal modes. */
490 (*current_target.to_terminal_inferior) ();
491 }
492
493 static int
494 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
495 struct target_ops *t)
496 {
497 errno = EIO; /* Can't read/write this location */
498 return 0; /* No bytes handled */
499 }
500
501 static void
502 tcomplain (void)
503 {
504 error (_("You can't do that when your target is `%s'"),
505 current_target.to_shortname);
506 }
507
508 void
509 noprocess (void)
510 {
511 error (_("You can't do that without a process to debug."));
512 }
513
514 static int
515 nosymbol (char *name, CORE_ADDR *addrp)
516 {
517 return 1; /* Symbol does not exist in target env */
518 }
519
520 static void
521 nosupport_runtime (void)
522 {
523 if (ptid_equal (inferior_ptid, null_ptid))
524 noprocess ();
525 else
526 error (_("No run-time support for this"));
527 }
528
529
530 static void
531 default_terminal_info (char *args, int from_tty)
532 {
533 printf_unfiltered (_("No saved terminal information.\n"));
534 }
535
536 /* This is the default target_create_inferior and target_attach function.
537 If the current target is executing, it asks whether to kill it off.
538 If this function returns without calling error(), it has killed off
539 the target, and the operation should be attempted. */
540
541 static void
542 kill_or_be_killed (int from_tty)
543 {
544 if (target_has_execution)
545 {
546 printf_unfiltered (_("You are already running a program:\n"));
547 target_files_info ();
548 if (query (_("Kill it? ")))
549 {
550 target_kill ();
551 if (target_has_execution)
552 error (_("Killing the program did not help."));
553 return;
554 }
555 else
556 {
557 error (_("Program not killed."));
558 }
559 }
560 tcomplain ();
561 }
562
563 /* A default implementation for the to_get_ada_task_ptid target method.
564
565 This function builds the PTID by using both LWP and TID as part of
566 the PTID lwp and tid elements. The pid used is the pid of the
567 inferior_ptid. */
568
569 static ptid_t
570 default_get_ada_task_ptid (long lwp, long tid)
571 {
572 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
573 }
574
575 /* Go through the target stack from top to bottom, copying over zero
576 entries in current_target, then filling in still empty entries. In
577 effect, we are doing class inheritance through the pushed target
578 vectors.
579
580 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
581 is currently implemented, is that it discards any knowledge of
582 which target an inherited method originally belonged to.
583 Consequently, new new target methods should instead explicitly and
584 locally search the target stack for the target that can handle the
585 request. */
586
587 static void
588 update_current_target (void)
589 {
590 struct target_ops *t;
591
592 /* First, reset current's contents. */
593 memset (&current_target, 0, sizeof (current_target));
594
595 #define INHERIT(FIELD, TARGET) \
596 if (!current_target.FIELD) \
597 current_target.FIELD = (TARGET)->FIELD
598
599 for (t = target_stack; t; t = t->beneath)
600 {
601 INHERIT (to_shortname, t);
602 INHERIT (to_longname, t);
603 INHERIT (to_doc, t);
604 /* Do not inherit to_open. */
605 /* Do not inherit to_close. */
606 /* Do not inherit to_attach. */
607 INHERIT (to_post_attach, t);
608 INHERIT (to_attach_no_wait, t);
609 /* Do not inherit to_detach. */
610 /* Do not inherit to_disconnect. */
611 /* Do not inherit to_resume. */
612 /* Do not inherit to_wait. */
613 /* Do not inherit to_fetch_registers. */
614 /* Do not inherit to_store_registers. */
615 INHERIT (to_prepare_to_store, t);
616 INHERIT (deprecated_xfer_memory, t);
617 INHERIT (to_files_info, t);
618 INHERIT (to_insert_breakpoint, t);
619 INHERIT (to_remove_breakpoint, t);
620 INHERIT (to_can_use_hw_breakpoint, t);
621 INHERIT (to_insert_hw_breakpoint, t);
622 INHERIT (to_remove_hw_breakpoint, t);
623 INHERIT (to_insert_watchpoint, t);
624 INHERIT (to_remove_watchpoint, t);
625 INHERIT (to_stopped_data_address, t);
626 INHERIT (to_have_steppable_watchpoint, t);
627 INHERIT (to_have_continuable_watchpoint, t);
628 INHERIT (to_stopped_by_watchpoint, t);
629 INHERIT (to_watchpoint_addr_within_range, t);
630 INHERIT (to_region_ok_for_hw_watchpoint, t);
631 INHERIT (to_terminal_init, t);
632 INHERIT (to_terminal_inferior, t);
633 INHERIT (to_terminal_ours_for_output, t);
634 INHERIT (to_terminal_ours, t);
635 INHERIT (to_terminal_save_ours, t);
636 INHERIT (to_terminal_info, t);
637 /* Do not inherit to_kill. */
638 INHERIT (to_load, t);
639 INHERIT (to_lookup_symbol, t);
640 /* Do no inherit to_create_inferior. */
641 INHERIT (to_post_startup_inferior, t);
642 INHERIT (to_acknowledge_created_inferior, t);
643 INHERIT (to_insert_fork_catchpoint, t);
644 INHERIT (to_remove_fork_catchpoint, t);
645 INHERIT (to_insert_vfork_catchpoint, t);
646 INHERIT (to_remove_vfork_catchpoint, t);
647 /* Do not inherit to_follow_fork. */
648 INHERIT (to_insert_exec_catchpoint, t);
649 INHERIT (to_remove_exec_catchpoint, t);
650 INHERIT (to_has_exited, t);
651 /* Do not inherit to_mourn_inferiour. */
652 INHERIT (to_can_run, t);
653 INHERIT (to_notice_signals, t);
654 /* Do not inherit to_thread_alive. */
655 /* Do not inherit to_find_new_threads. */
656 /* Do not inherit to_pid_to_str. */
657 INHERIT (to_extra_thread_info, t);
658 INHERIT (to_stop, t);
659 /* Do not inherit to_xfer_partial. */
660 INHERIT (to_rcmd, t);
661 INHERIT (to_pid_to_exec_file, t);
662 INHERIT (to_log_command, t);
663 INHERIT (to_stratum, t);
664 /* Do not inherit to_has_all_memory */
665 /* Do not inherit to_has_memory */
666 /* Do not inherit to_has_stack */
667 /* Do not inherit to_has_registers */
668 /* Do not inherit to_has_execution */
669 INHERIT (to_has_thread_control, t);
670 INHERIT (to_can_async_p, t);
671 INHERIT (to_is_async_p, t);
672 INHERIT (to_async, t);
673 INHERIT (to_async_mask, t);
674 INHERIT (to_find_memory_regions, t);
675 INHERIT (to_make_corefile_notes, t);
676 /* Do not inherit to_get_thread_local_address. */
677 INHERIT (to_can_execute_reverse, t);
678 INHERIT (to_thread_architecture, t);
679 /* Do not inherit to_read_description. */
680 INHERIT (to_get_ada_task_ptid, t);
681 /* Do not inherit to_search_memory. */
682 INHERIT (to_supports_multi_process, t);
683 INHERIT (to_magic, t);
684 /* Do not inherit to_memory_map. */
685 /* Do not inherit to_flash_erase. */
686 /* Do not inherit to_flash_done. */
687 }
688 #undef INHERIT
689
690 /* Clean up a target struct so it no longer has any zero pointers in
691 it. Some entries are defaulted to a method that print an error,
692 others are hard-wired to a standard recursive default. */
693
694 #define de_fault(field, value) \
695 if (!current_target.field) \
696 current_target.field = value
697
698 de_fault (to_open,
699 (void (*) (char *, int))
700 tcomplain);
701 de_fault (to_close,
702 (void (*) (int))
703 target_ignore);
704 de_fault (to_post_attach,
705 (void (*) (int))
706 target_ignore);
707 de_fault (to_prepare_to_store,
708 (void (*) (struct regcache *))
709 noprocess);
710 de_fault (deprecated_xfer_memory,
711 (int (*) (CORE_ADDR, gdb_byte *, int, int, struct mem_attrib *, struct target_ops *))
712 nomemory);
713 de_fault (to_files_info,
714 (void (*) (struct target_ops *))
715 target_ignore);
716 de_fault (to_insert_breakpoint,
717 memory_insert_breakpoint);
718 de_fault (to_remove_breakpoint,
719 memory_remove_breakpoint);
720 de_fault (to_can_use_hw_breakpoint,
721 (int (*) (int, int, int))
722 return_zero);
723 de_fault (to_insert_hw_breakpoint,
724 (int (*) (struct gdbarch *, struct bp_target_info *))
725 return_minus_one);
726 de_fault (to_remove_hw_breakpoint,
727 (int (*) (struct gdbarch *, struct bp_target_info *))
728 return_minus_one);
729 de_fault (to_insert_watchpoint,
730 (int (*) (CORE_ADDR, int, int))
731 return_minus_one);
732 de_fault (to_remove_watchpoint,
733 (int (*) (CORE_ADDR, int, int))
734 return_minus_one);
735 de_fault (to_stopped_by_watchpoint,
736 (int (*) (void))
737 return_zero);
738 de_fault (to_stopped_data_address,
739 (int (*) (struct target_ops *, CORE_ADDR *))
740 return_zero);
741 de_fault (to_watchpoint_addr_within_range,
742 default_watchpoint_addr_within_range);
743 de_fault (to_region_ok_for_hw_watchpoint,
744 default_region_ok_for_hw_watchpoint);
745 de_fault (to_terminal_init,
746 (void (*) (void))
747 target_ignore);
748 de_fault (to_terminal_inferior,
749 (void (*) (void))
750 target_ignore);
751 de_fault (to_terminal_ours_for_output,
752 (void (*) (void))
753 target_ignore);
754 de_fault (to_terminal_ours,
755 (void (*) (void))
756 target_ignore);
757 de_fault (to_terminal_save_ours,
758 (void (*) (void))
759 target_ignore);
760 de_fault (to_terminal_info,
761 default_terminal_info);
762 de_fault (to_load,
763 (void (*) (char *, int))
764 tcomplain);
765 de_fault (to_lookup_symbol,
766 (int (*) (char *, CORE_ADDR *))
767 nosymbol);
768 de_fault (to_post_startup_inferior,
769 (void (*) (ptid_t))
770 target_ignore);
771 de_fault (to_acknowledge_created_inferior,
772 (void (*) (int))
773 target_ignore);
774 de_fault (to_insert_fork_catchpoint,
775 (void (*) (int))
776 tcomplain);
777 de_fault (to_remove_fork_catchpoint,
778 (int (*) (int))
779 tcomplain);
780 de_fault (to_insert_vfork_catchpoint,
781 (void (*) (int))
782 tcomplain);
783 de_fault (to_remove_vfork_catchpoint,
784 (int (*) (int))
785 tcomplain);
786 de_fault (to_insert_exec_catchpoint,
787 (void (*) (int))
788 tcomplain);
789 de_fault (to_remove_exec_catchpoint,
790 (int (*) (int))
791 tcomplain);
792 de_fault (to_has_exited,
793 (int (*) (int, int, int *))
794 return_zero);
795 de_fault (to_can_run,
796 return_zero);
797 de_fault (to_notice_signals,
798 (void (*) (ptid_t))
799 target_ignore);
800 de_fault (to_extra_thread_info,
801 (char *(*) (struct thread_info *))
802 return_zero);
803 de_fault (to_stop,
804 (void (*) (ptid_t))
805 target_ignore);
806 current_target.to_xfer_partial = current_xfer_partial;
807 de_fault (to_rcmd,
808 (void (*) (char *, struct ui_file *))
809 tcomplain);
810 de_fault (to_pid_to_exec_file,
811 (char *(*) (int))
812 return_zero);
813 de_fault (to_async,
814 (void (*) (void (*) (enum inferior_event_type, void*), void*))
815 tcomplain);
816 de_fault (to_async_mask,
817 (int (*) (int))
818 return_one);
819 de_fault (to_thread_architecture,
820 default_thread_architecture);
821 current_target.to_read_description = NULL;
822 de_fault (to_get_ada_task_ptid,
823 (ptid_t (*) (long, long))
824 default_get_ada_task_ptid);
825 de_fault (to_supports_multi_process,
826 (int (*) (void))
827 return_zero);
828 #undef de_fault
829
830 /* Finally, position the target-stack beneath the squashed
831 "current_target". That way code looking for a non-inherited
832 target method can quickly and simply find it. */
833 current_target.beneath = target_stack;
834
835 if (targetdebug)
836 setup_target_debug ();
837 }
838
839 /* Push a new target type into the stack of the existing target accessors,
840 possibly superseding some of the existing accessors.
841
842 Result is zero if the pushed target ended up on top of the stack,
843 nonzero if at least one target is on top of it.
844
845 Rather than allow an empty stack, we always have the dummy target at
846 the bottom stratum, so we can call the function vectors without
847 checking them. */
848
849 int
850 push_target (struct target_ops *t)
851 {
852 struct target_ops **cur;
853
854 /* Check magic number. If wrong, it probably means someone changed
855 the struct definition, but not all the places that initialize one. */
856 if (t->to_magic != OPS_MAGIC)
857 {
858 fprintf_unfiltered (gdb_stderr,
859 "Magic number of %s target struct wrong\n",
860 t->to_shortname);
861 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
862 }
863
864 /* Find the proper stratum to install this target in. */
865 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
866 {
867 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
868 break;
869 }
870
871 /* If there's already targets at this stratum, remove them. */
872 /* FIXME: cagney/2003-10-15: I think this should be popping all
873 targets to CUR, and not just those at this stratum level. */
874 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
875 {
876 /* There's already something at this stratum level. Close it,
877 and un-hook it from the stack. */
878 struct target_ops *tmp = (*cur);
879 (*cur) = (*cur)->beneath;
880 tmp->beneath = NULL;
881 target_close (tmp, 0);
882 }
883
884 /* We have removed all targets in our stratum, now add the new one. */
885 t->beneath = (*cur);
886 (*cur) = t;
887
888 update_current_target ();
889
890 /* Not on top? */
891 return (t != target_stack);
892 }
893
894 /* Remove a target_ops vector from the stack, wherever it may be.
895 Return how many times it was removed (0 or 1). */
896
897 int
898 unpush_target (struct target_ops *t)
899 {
900 struct target_ops **cur;
901 struct target_ops *tmp;
902
903 if (t->to_stratum == dummy_stratum)
904 internal_error (__FILE__, __LINE__,
905 "Attempt to unpush the dummy target");
906
907 /* Look for the specified target. Note that we assume that a target
908 can only occur once in the target stack. */
909
910 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
911 {
912 if ((*cur) == t)
913 break;
914 }
915
916 if ((*cur) == NULL)
917 return 0; /* Didn't find target_ops, quit now */
918
919 /* NOTE: cagney/2003-12-06: In '94 the close call was made
920 unconditional by moving it to before the above check that the
921 target was in the target stack (something about "Change the way
922 pushing and popping of targets work to support target overlays
923 and inheritance"). This doesn't make much sense - only open
924 targets should be closed. */
925 target_close (t, 0);
926
927 /* Unchain the target */
928 tmp = (*cur);
929 (*cur) = (*cur)->beneath;
930 tmp->beneath = NULL;
931
932 update_current_target ();
933
934 return 1;
935 }
936
937 void
938 pop_target (void)
939 {
940 target_close (target_stack, 0); /* Let it clean up */
941 if (unpush_target (target_stack) == 1)
942 return;
943
944 fprintf_unfiltered (gdb_stderr,
945 "pop_target couldn't find target %s\n",
946 current_target.to_shortname);
947 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
948 }
949
950 void
951 pop_all_targets_above (enum strata above_stratum, int quitting)
952 {
953 while ((int) (current_target.to_stratum) > (int) above_stratum)
954 {
955 target_close (target_stack, quitting);
956 if (!unpush_target (target_stack))
957 {
958 fprintf_unfiltered (gdb_stderr,
959 "pop_all_targets couldn't find target %s\n",
960 target_stack->to_shortname);
961 internal_error (__FILE__, __LINE__,
962 _("failed internal consistency check"));
963 break;
964 }
965 }
966 }
967
968 void
969 pop_all_targets (int quitting)
970 {
971 pop_all_targets_above (dummy_stratum, quitting);
972 }
973
974 /* Using the objfile specified in OBJFILE, find the address for the
975 current thread's thread-local storage with offset OFFSET. */
976 CORE_ADDR
977 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
978 {
979 volatile CORE_ADDR addr = 0;
980 struct target_ops *target;
981
982 for (target = current_target.beneath;
983 target != NULL;
984 target = target->beneath)
985 {
986 if (target->to_get_thread_local_address != NULL)
987 break;
988 }
989
990 if (target != NULL
991 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
992 {
993 ptid_t ptid = inferior_ptid;
994 volatile struct gdb_exception ex;
995
996 TRY_CATCH (ex, RETURN_MASK_ALL)
997 {
998 CORE_ADDR lm_addr;
999
1000 /* Fetch the load module address for this objfile. */
1001 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1002 objfile);
1003 /* If it's 0, throw the appropriate exception. */
1004 if (lm_addr == 0)
1005 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1006 _("TLS load module not found"));
1007
1008 addr = target->to_get_thread_local_address (target, ptid, lm_addr, offset);
1009 }
1010 /* If an error occurred, print TLS related messages here. Otherwise,
1011 throw the error to some higher catcher. */
1012 if (ex.reason < 0)
1013 {
1014 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1015
1016 switch (ex.error)
1017 {
1018 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1019 error (_("Cannot find thread-local variables in this thread library."));
1020 break;
1021 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1022 if (objfile_is_library)
1023 error (_("Cannot find shared library `%s' in dynamic"
1024 " linker's load module list"), objfile->name);
1025 else
1026 error (_("Cannot find executable file `%s' in dynamic"
1027 " linker's load module list"), objfile->name);
1028 break;
1029 case TLS_NOT_ALLOCATED_YET_ERROR:
1030 if (objfile_is_library)
1031 error (_("The inferior has not yet allocated storage for"
1032 " thread-local variables in\n"
1033 "the shared library `%s'\n"
1034 "for %s"),
1035 objfile->name, target_pid_to_str (ptid));
1036 else
1037 error (_("The inferior has not yet allocated storage for"
1038 " thread-local variables in\n"
1039 "the executable `%s'\n"
1040 "for %s"),
1041 objfile->name, target_pid_to_str (ptid));
1042 break;
1043 case TLS_GENERIC_ERROR:
1044 if (objfile_is_library)
1045 error (_("Cannot find thread-local storage for %s, "
1046 "shared library %s:\n%s"),
1047 target_pid_to_str (ptid),
1048 objfile->name, ex.message);
1049 else
1050 error (_("Cannot find thread-local storage for %s, "
1051 "executable file %s:\n%s"),
1052 target_pid_to_str (ptid),
1053 objfile->name, ex.message);
1054 break;
1055 default:
1056 throw_exception (ex);
1057 break;
1058 }
1059 }
1060 }
1061 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1062 TLS is an ABI-specific thing. But we don't do that yet. */
1063 else
1064 error (_("Cannot find thread-local variables on this target"));
1065
1066 return addr;
1067 }
1068
1069 #undef MIN
1070 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1071
1072 /* target_read_string -- read a null terminated string, up to LEN bytes,
1073 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1074 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1075 is responsible for freeing it. Return the number of bytes successfully
1076 read. */
1077
1078 int
1079 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1080 {
1081 int tlen, origlen, offset, i;
1082 gdb_byte buf[4];
1083 int errcode = 0;
1084 char *buffer;
1085 int buffer_allocated;
1086 char *bufptr;
1087 unsigned int nbytes_read = 0;
1088
1089 gdb_assert (string);
1090
1091 /* Small for testing. */
1092 buffer_allocated = 4;
1093 buffer = xmalloc (buffer_allocated);
1094 bufptr = buffer;
1095
1096 origlen = len;
1097
1098 while (len > 0)
1099 {
1100 tlen = MIN (len, 4 - (memaddr & 3));
1101 offset = memaddr & 3;
1102
1103 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1104 if (errcode != 0)
1105 {
1106 /* The transfer request might have crossed the boundary to an
1107 unallocated region of memory. Retry the transfer, requesting
1108 a single byte. */
1109 tlen = 1;
1110 offset = 0;
1111 errcode = target_read_memory (memaddr, buf, 1);
1112 if (errcode != 0)
1113 goto done;
1114 }
1115
1116 if (bufptr - buffer + tlen > buffer_allocated)
1117 {
1118 unsigned int bytes;
1119 bytes = bufptr - buffer;
1120 buffer_allocated *= 2;
1121 buffer = xrealloc (buffer, buffer_allocated);
1122 bufptr = buffer + bytes;
1123 }
1124
1125 for (i = 0; i < tlen; i++)
1126 {
1127 *bufptr++ = buf[i + offset];
1128 if (buf[i + offset] == '\000')
1129 {
1130 nbytes_read += i + 1;
1131 goto done;
1132 }
1133 }
1134
1135 memaddr += tlen;
1136 len -= tlen;
1137 nbytes_read += tlen;
1138 }
1139 done:
1140 *string = buffer;
1141 if (errnop != NULL)
1142 *errnop = errcode;
1143 return nbytes_read;
1144 }
1145
1146 struct target_section_table *
1147 target_get_section_table (struct target_ops *target)
1148 {
1149 struct target_ops *t;
1150
1151 if (targetdebug)
1152 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1153
1154 for (t = target; t != NULL; t = t->beneath)
1155 if (t->to_get_section_table != NULL)
1156 return (*t->to_get_section_table) (t);
1157
1158 return NULL;
1159 }
1160
1161 /* Find a section containing ADDR. */
1162
1163 struct target_section *
1164 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1165 {
1166 struct target_section_table *table = target_get_section_table (target);
1167 struct target_section *secp;
1168
1169 if (table == NULL)
1170 return NULL;
1171
1172 for (secp = table->sections; secp < table->sections_end; secp++)
1173 {
1174 if (addr >= secp->addr && addr < secp->endaddr)
1175 return secp;
1176 }
1177 return NULL;
1178 }
1179
1180 /* Perform a partial memory transfer. The arguments and return
1181 value are just as for target_xfer_partial. */
1182
1183 static LONGEST
1184 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1185 void *readbuf, const void *writebuf, ULONGEST memaddr,
1186 LONGEST len)
1187 {
1188 LONGEST res;
1189 int reg_len;
1190 struct mem_region *region;
1191 struct inferior *inf;
1192
1193 /* Zero length requests are ok and require no work. */
1194 if (len == 0)
1195 return 0;
1196
1197 /* For accesses to unmapped overlay sections, read directly from
1198 files. Must do this first, as MEMADDR may need adjustment. */
1199 if (readbuf != NULL && overlay_debugging)
1200 {
1201 struct obj_section *section = find_pc_overlay (memaddr);
1202 if (pc_in_unmapped_range (memaddr, section))
1203 {
1204 struct target_section_table *table
1205 = target_get_section_table (ops);
1206 const char *section_name = section->the_bfd_section->name;
1207 memaddr = overlay_mapped_address (memaddr, section);
1208 return section_table_xfer_memory_partial (readbuf, writebuf,
1209 memaddr, len,
1210 table->sections,
1211 table->sections_end,
1212 section_name);
1213 }
1214 }
1215
1216 /* Try the executable files, if "trust-readonly-sections" is set. */
1217 if (readbuf != NULL && trust_readonly)
1218 {
1219 struct target_section *secp;
1220 struct target_section_table *table;
1221
1222 secp = target_section_by_addr (ops, memaddr);
1223 if (secp != NULL
1224 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1225 & SEC_READONLY))
1226 {
1227 table = target_get_section_table (ops);
1228 return section_table_xfer_memory_partial (readbuf, writebuf,
1229 memaddr, len,
1230 table->sections,
1231 table->sections_end,
1232 NULL);
1233 }
1234 }
1235
1236 /* Try GDB's internal data cache. */
1237 region = lookup_mem_region (memaddr);
1238 /* region->hi == 0 means there's no upper bound. */
1239 if (memaddr + len < region->hi || region->hi == 0)
1240 reg_len = len;
1241 else
1242 reg_len = region->hi - memaddr;
1243
1244 switch (region->attrib.mode)
1245 {
1246 case MEM_RO:
1247 if (writebuf != NULL)
1248 return -1;
1249 break;
1250
1251 case MEM_WO:
1252 if (readbuf != NULL)
1253 return -1;
1254 break;
1255
1256 case MEM_FLASH:
1257 /* We only support writing to flash during "load" for now. */
1258 if (writebuf != NULL)
1259 error (_("Writing to flash memory forbidden in this context"));
1260 break;
1261
1262 case MEM_NONE:
1263 return -1;
1264 }
1265
1266 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1267
1268 if (inf != NULL
1269 && (region->attrib.cache
1270 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1271 {
1272 if (readbuf != NULL)
1273 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1274 reg_len, 0);
1275 else
1276 /* FIXME drow/2006-08-09: If we're going to preserve const
1277 correctness dcache_xfer_memory should take readbuf and
1278 writebuf. */
1279 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1280 (void *) writebuf,
1281 reg_len, 1);
1282 if (res <= 0)
1283 return -1;
1284 else
1285 {
1286 if (readbuf && !show_memory_breakpoints)
1287 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1288 return res;
1289 }
1290 }
1291
1292 /* Make sure the cache gets updated no matter what - if we are writing
1293 to the stack, even if this write is not tagged as such, we still need
1294 to update the cache. */
1295
1296 if (inf != NULL
1297 && readbuf == NULL
1298 && !region->attrib.cache
1299 && stack_cache_enabled_p
1300 && object != TARGET_OBJECT_STACK_MEMORY)
1301 {
1302 dcache_update (target_dcache, memaddr, (void *) writebuf, reg_len);
1303 }
1304
1305 /* If none of those methods found the memory we wanted, fall back
1306 to a target partial transfer. Normally a single call to
1307 to_xfer_partial is enough; if it doesn't recognize an object
1308 it will call the to_xfer_partial of the next target down.
1309 But for memory this won't do. Memory is the only target
1310 object which can be read from more than one valid target.
1311 A core file, for instance, could have some of memory but
1312 delegate other bits to the target below it. So, we must
1313 manually try all targets. */
1314
1315 do
1316 {
1317 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1318 readbuf, writebuf, memaddr, reg_len);
1319 if (res > 0)
1320 break;
1321
1322 /* We want to continue past core files to executables, but not
1323 past a running target's memory. */
1324 if (ops->to_has_all_memory (ops))
1325 break;
1326
1327 ops = ops->beneath;
1328 }
1329 while (ops != NULL);
1330
1331 if (readbuf && !show_memory_breakpoints)
1332 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1333
1334 /* If we still haven't got anything, return the last error. We
1335 give up. */
1336 return res;
1337 }
1338
1339 static void
1340 restore_show_memory_breakpoints (void *arg)
1341 {
1342 show_memory_breakpoints = (uintptr_t) arg;
1343 }
1344
1345 struct cleanup *
1346 make_show_memory_breakpoints_cleanup (int show)
1347 {
1348 int current = show_memory_breakpoints;
1349 show_memory_breakpoints = show;
1350
1351 return make_cleanup (restore_show_memory_breakpoints,
1352 (void *) (uintptr_t) current);
1353 }
1354
1355 static LONGEST
1356 target_xfer_partial (struct target_ops *ops,
1357 enum target_object object, const char *annex,
1358 void *readbuf, const void *writebuf,
1359 ULONGEST offset, LONGEST len)
1360 {
1361 LONGEST retval;
1362
1363 gdb_assert (ops->to_xfer_partial != NULL);
1364
1365 /* If this is a memory transfer, let the memory-specific code
1366 have a look at it instead. Memory transfers are more
1367 complicated. */
1368 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1369 retval = memory_xfer_partial (ops, object, readbuf,
1370 writebuf, offset, len);
1371 else
1372 {
1373 enum target_object raw_object = object;
1374
1375 /* If this is a raw memory transfer, request the normal
1376 memory object from other layers. */
1377 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1378 raw_object = TARGET_OBJECT_MEMORY;
1379
1380 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1381 writebuf, offset, len);
1382 }
1383
1384 if (targetdebug)
1385 {
1386 const unsigned char *myaddr = NULL;
1387
1388 fprintf_unfiltered (gdb_stdlog,
1389 "%s:target_xfer_partial (%d, %s, %s, %s, %s, %s) = %s",
1390 ops->to_shortname,
1391 (int) object,
1392 (annex ? annex : "(null)"),
1393 host_address_to_string (readbuf),
1394 host_address_to_string (writebuf),
1395 core_addr_to_string_nz (offset),
1396 plongest (len), plongest (retval));
1397
1398 if (readbuf)
1399 myaddr = readbuf;
1400 if (writebuf)
1401 myaddr = writebuf;
1402 if (retval > 0 && myaddr != NULL)
1403 {
1404 int i;
1405
1406 fputs_unfiltered (", bytes =", gdb_stdlog);
1407 for (i = 0; i < retval; i++)
1408 {
1409 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1410 {
1411 if (targetdebug < 2 && i > 0)
1412 {
1413 fprintf_unfiltered (gdb_stdlog, " ...");
1414 break;
1415 }
1416 fprintf_unfiltered (gdb_stdlog, "\n");
1417 }
1418
1419 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1420 }
1421 }
1422
1423 fputc_unfiltered ('\n', gdb_stdlog);
1424 }
1425 return retval;
1426 }
1427
1428 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1429 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1430 if any error occurs.
1431
1432 If an error occurs, no guarantee is made about the contents of the data at
1433 MYADDR. In particular, the caller should not depend upon partial reads
1434 filling the buffer with good data. There is no way for the caller to know
1435 how much good data might have been transfered anyway. Callers that can
1436 deal with partial reads should call target_read (which will retry until
1437 it makes no progress, and then return how much was transferred). */
1438
1439 int
1440 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1441 {
1442 /* Dispatch to the topmost target, not the flattened current_target.
1443 Memory accesses check target->to_has_(all_)memory, and the
1444 flattened target doesn't inherit those. */
1445 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1446 myaddr, memaddr, len) == len)
1447 return 0;
1448 else
1449 return EIO;
1450 }
1451
1452 /* Like target_read_memory, but specify explicitly that this is a read from
1453 the target's stack. This may trigger different cache behavior. */
1454
1455 int
1456 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1457 {
1458 /* Dispatch to the topmost target, not the flattened current_target.
1459 Memory accesses check target->to_has_(all_)memory, and the
1460 flattened target doesn't inherit those. */
1461
1462 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1463 myaddr, memaddr, len) == len)
1464 return 0;
1465 else
1466 return EIO;
1467 }
1468
1469 int
1470 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1471 {
1472 /* Dispatch to the topmost target, not the flattened current_target.
1473 Memory accesses check target->to_has_(all_)memory, and the
1474 flattened target doesn't inherit those. */
1475 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1476 myaddr, memaddr, len) == len)
1477 return 0;
1478 else
1479 return EIO;
1480 }
1481
1482 /* Fetch the target's memory map. */
1483
1484 VEC(mem_region_s) *
1485 target_memory_map (void)
1486 {
1487 VEC(mem_region_s) *result;
1488 struct mem_region *last_one, *this_one;
1489 int ix;
1490 struct target_ops *t;
1491
1492 if (targetdebug)
1493 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1494
1495 for (t = current_target.beneath; t != NULL; t = t->beneath)
1496 if (t->to_memory_map != NULL)
1497 break;
1498
1499 if (t == NULL)
1500 return NULL;
1501
1502 result = t->to_memory_map (t);
1503 if (result == NULL)
1504 return NULL;
1505
1506 qsort (VEC_address (mem_region_s, result),
1507 VEC_length (mem_region_s, result),
1508 sizeof (struct mem_region), mem_region_cmp);
1509
1510 /* Check that regions do not overlap. Simultaneously assign
1511 a numbering for the "mem" commands to use to refer to
1512 each region. */
1513 last_one = NULL;
1514 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1515 {
1516 this_one->number = ix;
1517
1518 if (last_one && last_one->hi > this_one->lo)
1519 {
1520 warning (_("Overlapping regions in memory map: ignoring"));
1521 VEC_free (mem_region_s, result);
1522 return NULL;
1523 }
1524 last_one = this_one;
1525 }
1526
1527 return result;
1528 }
1529
1530 void
1531 target_flash_erase (ULONGEST address, LONGEST length)
1532 {
1533 struct target_ops *t;
1534
1535 for (t = current_target.beneath; t != NULL; t = t->beneath)
1536 if (t->to_flash_erase != NULL)
1537 {
1538 if (targetdebug)
1539 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1540 hex_string (address), phex (length, 0));
1541 t->to_flash_erase (t, address, length);
1542 return;
1543 }
1544
1545 tcomplain ();
1546 }
1547
1548 void
1549 target_flash_done (void)
1550 {
1551 struct target_ops *t;
1552
1553 for (t = current_target.beneath; t != NULL; t = t->beneath)
1554 if (t->to_flash_done != NULL)
1555 {
1556 if (targetdebug)
1557 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1558 t->to_flash_done (t);
1559 return;
1560 }
1561
1562 tcomplain ();
1563 }
1564
1565 static void
1566 show_trust_readonly (struct ui_file *file, int from_tty,
1567 struct cmd_list_element *c, const char *value)
1568 {
1569 fprintf_filtered (file, _("\
1570 Mode for reading from readonly sections is %s.\n"),
1571 value);
1572 }
1573
1574 /* More generic transfers. */
1575
1576 static LONGEST
1577 default_xfer_partial (struct target_ops *ops, enum target_object object,
1578 const char *annex, gdb_byte *readbuf,
1579 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1580 {
1581 if (object == TARGET_OBJECT_MEMORY
1582 && ops->deprecated_xfer_memory != NULL)
1583 /* If available, fall back to the target's
1584 "deprecated_xfer_memory" method. */
1585 {
1586 int xfered = -1;
1587 errno = 0;
1588 if (writebuf != NULL)
1589 {
1590 void *buffer = xmalloc (len);
1591 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1592 memcpy (buffer, writebuf, len);
1593 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1594 1/*write*/, NULL, ops);
1595 do_cleanups (cleanup);
1596 }
1597 if (readbuf != NULL)
1598 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1599 0/*read*/, NULL, ops);
1600 if (xfered > 0)
1601 return xfered;
1602 else if (xfered == 0 && errno == 0)
1603 /* "deprecated_xfer_memory" uses 0, cross checked against
1604 ERRNO as one indication of an error. */
1605 return 0;
1606 else
1607 return -1;
1608 }
1609 else if (ops->beneath != NULL)
1610 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1611 readbuf, writebuf, offset, len);
1612 else
1613 return -1;
1614 }
1615
1616 /* The xfer_partial handler for the topmost target. Unlike the default,
1617 it does not need to handle memory specially; it just passes all
1618 requests down the stack. */
1619
1620 static LONGEST
1621 current_xfer_partial (struct target_ops *ops, enum target_object object,
1622 const char *annex, gdb_byte *readbuf,
1623 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1624 {
1625 if (ops->beneath != NULL)
1626 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1627 readbuf, writebuf, offset, len);
1628 else
1629 return -1;
1630 }
1631
1632 /* Target vector read/write partial wrapper functions.
1633
1634 NOTE: cagney/2003-10-21: I wonder if having "to_xfer_partial
1635 (inbuf, outbuf)", instead of separate read/write methods, make life
1636 easier. */
1637
1638 static LONGEST
1639 target_read_partial (struct target_ops *ops,
1640 enum target_object object,
1641 const char *annex, gdb_byte *buf,
1642 ULONGEST offset, LONGEST len)
1643 {
1644 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1645 }
1646
1647 static LONGEST
1648 target_write_partial (struct target_ops *ops,
1649 enum target_object object,
1650 const char *annex, const gdb_byte *buf,
1651 ULONGEST offset, LONGEST len)
1652 {
1653 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1654 }
1655
1656 /* Wrappers to perform the full transfer. */
1657 LONGEST
1658 target_read (struct target_ops *ops,
1659 enum target_object object,
1660 const char *annex, gdb_byte *buf,
1661 ULONGEST offset, LONGEST len)
1662 {
1663 LONGEST xfered = 0;
1664 while (xfered < len)
1665 {
1666 LONGEST xfer = target_read_partial (ops, object, annex,
1667 (gdb_byte *) buf + xfered,
1668 offset + xfered, len - xfered);
1669 /* Call an observer, notifying them of the xfer progress? */
1670 if (xfer == 0)
1671 return xfered;
1672 if (xfer < 0)
1673 return -1;
1674 xfered += xfer;
1675 QUIT;
1676 }
1677 return len;
1678 }
1679
1680 LONGEST
1681 target_read_until_error (struct target_ops *ops,
1682 enum target_object object,
1683 const char *annex, gdb_byte *buf,
1684 ULONGEST offset, LONGEST len)
1685 {
1686 LONGEST xfered = 0;
1687 while (xfered < len)
1688 {
1689 LONGEST xfer = target_read_partial (ops, object, annex,
1690 (gdb_byte *) buf + xfered,
1691 offset + xfered, len - xfered);
1692 /* Call an observer, notifying them of the xfer progress? */
1693 if (xfer == 0)
1694 return xfered;
1695 if (xfer < 0)
1696 {
1697 /* We've got an error. Try to read in smaller blocks. */
1698 ULONGEST start = offset + xfered;
1699 ULONGEST remaining = len - xfered;
1700 ULONGEST half;
1701
1702 /* If an attempt was made to read a random memory address,
1703 it's likely that the very first byte is not accessible.
1704 Try reading the first byte, to avoid doing log N tries
1705 below. */
1706 xfer = target_read_partial (ops, object, annex,
1707 (gdb_byte *) buf + xfered, start, 1);
1708 if (xfer <= 0)
1709 return xfered;
1710 start += 1;
1711 remaining -= 1;
1712 half = remaining/2;
1713
1714 while (half > 0)
1715 {
1716 xfer = target_read_partial (ops, object, annex,
1717 (gdb_byte *) buf + xfered,
1718 start, half);
1719 if (xfer == 0)
1720 return xfered;
1721 if (xfer < 0)
1722 {
1723 remaining = half;
1724 }
1725 else
1726 {
1727 /* We have successfully read the first half. So, the
1728 error must be in the second half. Adjust start and
1729 remaining to point at the second half. */
1730 xfered += xfer;
1731 start += xfer;
1732 remaining -= xfer;
1733 }
1734 half = remaining/2;
1735 }
1736
1737 return xfered;
1738 }
1739 xfered += xfer;
1740 QUIT;
1741 }
1742 return len;
1743 }
1744
1745
1746 /* An alternative to target_write with progress callbacks. */
1747
1748 LONGEST
1749 target_write_with_progress (struct target_ops *ops,
1750 enum target_object object,
1751 const char *annex, const gdb_byte *buf,
1752 ULONGEST offset, LONGEST len,
1753 void (*progress) (ULONGEST, void *), void *baton)
1754 {
1755 LONGEST xfered = 0;
1756
1757 /* Give the progress callback a chance to set up. */
1758 if (progress)
1759 (*progress) (0, baton);
1760
1761 while (xfered < len)
1762 {
1763 LONGEST xfer = target_write_partial (ops, object, annex,
1764 (gdb_byte *) buf + xfered,
1765 offset + xfered, len - xfered);
1766
1767 if (xfer == 0)
1768 return xfered;
1769 if (xfer < 0)
1770 return -1;
1771
1772 if (progress)
1773 (*progress) (xfer, baton);
1774
1775 xfered += xfer;
1776 QUIT;
1777 }
1778 return len;
1779 }
1780
1781 LONGEST
1782 target_write (struct target_ops *ops,
1783 enum target_object object,
1784 const char *annex, const gdb_byte *buf,
1785 ULONGEST offset, LONGEST len)
1786 {
1787 return target_write_with_progress (ops, object, annex, buf, offset, len,
1788 NULL, NULL);
1789 }
1790
1791 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1792 the size of the transferred data. PADDING additional bytes are
1793 available in *BUF_P. This is a helper function for
1794 target_read_alloc; see the declaration of that function for more
1795 information. */
1796
1797 static LONGEST
1798 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1799 const char *annex, gdb_byte **buf_p, int padding)
1800 {
1801 size_t buf_alloc, buf_pos;
1802 gdb_byte *buf;
1803 LONGEST n;
1804
1805 /* This function does not have a length parameter; it reads the
1806 entire OBJECT). Also, it doesn't support objects fetched partly
1807 from one target and partly from another (in a different stratum,
1808 e.g. a core file and an executable). Both reasons make it
1809 unsuitable for reading memory. */
1810 gdb_assert (object != TARGET_OBJECT_MEMORY);
1811
1812 /* Start by reading up to 4K at a time. The target will throttle
1813 this number down if necessary. */
1814 buf_alloc = 4096;
1815 buf = xmalloc (buf_alloc);
1816 buf_pos = 0;
1817 while (1)
1818 {
1819 n = target_read_partial (ops, object, annex, &buf[buf_pos],
1820 buf_pos, buf_alloc - buf_pos - padding);
1821 if (n < 0)
1822 {
1823 /* An error occurred. */
1824 xfree (buf);
1825 return -1;
1826 }
1827 else if (n == 0)
1828 {
1829 /* Read all there was. */
1830 if (buf_pos == 0)
1831 xfree (buf);
1832 else
1833 *buf_p = buf;
1834 return buf_pos;
1835 }
1836
1837 buf_pos += n;
1838
1839 /* If the buffer is filling up, expand it. */
1840 if (buf_alloc < buf_pos * 2)
1841 {
1842 buf_alloc *= 2;
1843 buf = xrealloc (buf, buf_alloc);
1844 }
1845
1846 QUIT;
1847 }
1848 }
1849
1850 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1851 the size of the transferred data. See the declaration in "target.h"
1852 function for more information about the return value. */
1853
1854 LONGEST
1855 target_read_alloc (struct target_ops *ops, enum target_object object,
1856 const char *annex, gdb_byte **buf_p)
1857 {
1858 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1859 }
1860
1861 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1862 returned as a string, allocated using xmalloc. If an error occurs
1863 or the transfer is unsupported, NULL is returned. Empty objects
1864 are returned as allocated but empty strings. A warning is issued
1865 if the result contains any embedded NUL bytes. */
1866
1867 char *
1868 target_read_stralloc (struct target_ops *ops, enum target_object object,
1869 const char *annex)
1870 {
1871 gdb_byte *buffer;
1872 LONGEST transferred;
1873
1874 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1875
1876 if (transferred < 0)
1877 return NULL;
1878
1879 if (transferred == 0)
1880 return xstrdup ("");
1881
1882 buffer[transferred] = 0;
1883 if (strlen (buffer) < transferred)
1884 warning (_("target object %d, annex %s, "
1885 "contained unexpected null characters"),
1886 (int) object, annex ? annex : "(none)");
1887
1888 return (char *) buffer;
1889 }
1890
1891 /* Memory transfer methods. */
1892
1893 void
1894 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1895 LONGEST len)
1896 {
1897 /* This method is used to read from an alternate, non-current
1898 target. This read must bypass the overlay support (as symbols
1899 don't match this target), and GDB's internal cache (wrong cache
1900 for this target). */
1901 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1902 != len)
1903 memory_error (EIO, addr);
1904 }
1905
1906 ULONGEST
1907 get_target_memory_unsigned (struct target_ops *ops,
1908 CORE_ADDR addr, int len, enum bfd_endian byte_order)
1909 {
1910 gdb_byte buf[sizeof (ULONGEST)];
1911
1912 gdb_assert (len <= sizeof (buf));
1913 get_target_memory (ops, addr, buf, len);
1914 return extract_unsigned_integer (buf, len, byte_order);
1915 }
1916
1917 static void
1918 target_info (char *args, int from_tty)
1919 {
1920 struct target_ops *t;
1921 int has_all_mem = 0;
1922
1923 if (symfile_objfile != NULL)
1924 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
1925
1926 for (t = target_stack; t != NULL; t = t->beneath)
1927 {
1928 if (!(*t->to_has_memory) (t))
1929 continue;
1930
1931 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1932 continue;
1933 if (has_all_mem)
1934 printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
1935 printf_unfiltered ("%s:\n", t->to_longname);
1936 (t->to_files_info) (t);
1937 has_all_mem = (*t->to_has_all_memory) (t);
1938 }
1939 }
1940
1941 /* This function is called before any new inferior is created, e.g.
1942 by running a program, attaching, or connecting to a target.
1943 It cleans up any state from previous invocations which might
1944 change between runs. This is a subset of what target_preopen
1945 resets (things which might change between targets). */
1946
1947 void
1948 target_pre_inferior (int from_tty)
1949 {
1950 /* Clear out solib state. Otherwise the solib state of the previous
1951 inferior might have survived and is entirely wrong for the new
1952 target. This has been observed on GNU/Linux using glibc 2.3. How
1953 to reproduce:
1954
1955 bash$ ./foo&
1956 [1] 4711
1957 bash$ ./foo&
1958 [1] 4712
1959 bash$ gdb ./foo
1960 [...]
1961 (gdb) attach 4711
1962 (gdb) detach
1963 (gdb) attach 4712
1964 Cannot access memory at address 0xdeadbeef
1965 */
1966
1967 /* In some OSs, the shared library list is the same/global/shared
1968 across inferiors. If code is shared between processes, so are
1969 memory regions and features. */
1970 if (!gdbarch_has_global_solist (target_gdbarch))
1971 {
1972 no_shared_libraries (NULL, from_tty);
1973
1974 invalidate_target_mem_regions ();
1975
1976 target_clear_description ();
1977 }
1978 }
1979
1980 /* Callback for iterate_over_inferiors. Gets rid of the given
1981 inferior. */
1982
1983 static int
1984 dispose_inferior (struct inferior *inf, void *args)
1985 {
1986 struct thread_info *thread;
1987
1988 thread = any_thread_of_process (inf->pid);
1989 if (thread)
1990 {
1991 switch_to_thread (thread->ptid);
1992
1993 /* Core inferiors actually should be detached, not killed. */
1994 if (target_has_execution)
1995 target_kill ();
1996 else
1997 target_detach (NULL, 0);
1998 }
1999
2000 return 0;
2001 }
2002
2003 /* This is to be called by the open routine before it does
2004 anything. */
2005
2006 void
2007 target_preopen (int from_tty)
2008 {
2009 dont_repeat ();
2010
2011 if (have_inferiors ())
2012 {
2013 if (!from_tty
2014 || !have_live_inferiors ()
2015 || query (_("A program is being debugged already. Kill it? ")))
2016 iterate_over_inferiors (dispose_inferior, NULL);
2017 else
2018 error (_("Program not killed."));
2019 }
2020
2021 /* Calling target_kill may remove the target from the stack. But if
2022 it doesn't (which seems like a win for UDI), remove it now. */
2023 /* Leave the exec target, though. The user may be switching from a
2024 live process to a core of the same program. */
2025 pop_all_targets_above (file_stratum, 0);
2026
2027 target_pre_inferior (from_tty);
2028 }
2029
2030 /* Detach a target after doing deferred register stores. */
2031
2032 void
2033 target_detach (char *args, int from_tty)
2034 {
2035 struct target_ops* t;
2036
2037 if (gdbarch_has_global_breakpoints (target_gdbarch))
2038 /* Don't remove global breakpoints here. They're removed on
2039 disconnection from the target. */
2040 ;
2041 else
2042 /* If we're in breakpoints-always-inserted mode, have to remove
2043 them before detaching. */
2044 remove_breakpoints ();
2045
2046 for (t = current_target.beneath; t != NULL; t = t->beneath)
2047 {
2048 if (t->to_detach != NULL)
2049 {
2050 t->to_detach (t, args, from_tty);
2051 if (targetdebug)
2052 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2053 args, from_tty);
2054 return;
2055 }
2056 }
2057
2058 internal_error (__FILE__, __LINE__, "could not find a target to detach");
2059 }
2060
2061 void
2062 target_disconnect (char *args, int from_tty)
2063 {
2064 struct target_ops *t;
2065
2066 /* If we're in breakpoints-always-inserted mode or if breakpoints
2067 are global across processes, we have to remove them before
2068 disconnecting. */
2069 remove_breakpoints ();
2070
2071 for (t = current_target.beneath; t != NULL; t = t->beneath)
2072 if (t->to_disconnect != NULL)
2073 {
2074 if (targetdebug)
2075 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2076 args, from_tty);
2077 t->to_disconnect (t, args, from_tty);
2078 return;
2079 }
2080
2081 tcomplain ();
2082 }
2083
2084 ptid_t
2085 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2086 {
2087 struct target_ops *t;
2088
2089 for (t = current_target.beneath; t != NULL; t = t->beneath)
2090 {
2091 if (t->to_wait != NULL)
2092 {
2093 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2094
2095 if (targetdebug)
2096 {
2097 char *status_string;
2098
2099 status_string = target_waitstatus_to_string (status);
2100 fprintf_unfiltered (gdb_stdlog,
2101 "target_wait (%d, status) = %d, %s\n",
2102 PIDGET (ptid), PIDGET (retval),
2103 status_string);
2104 xfree (status_string);
2105 }
2106
2107 return retval;
2108 }
2109 }
2110
2111 noprocess ();
2112 }
2113
2114 char *
2115 target_pid_to_str (ptid_t ptid)
2116 {
2117 struct target_ops *t;
2118
2119 for (t = current_target.beneath; t != NULL; t = t->beneath)
2120 {
2121 if (t->to_pid_to_str != NULL)
2122 return (*t->to_pid_to_str) (t, ptid);
2123 }
2124
2125 return normal_pid_to_str (ptid);
2126 }
2127
2128 void
2129 target_resume (ptid_t ptid, int step, enum target_signal signal)
2130 {
2131 struct target_ops *t;
2132
2133 target_dcache_invalidate ();
2134
2135 for (t = current_target.beneath; t != NULL; t = t->beneath)
2136 {
2137 if (t->to_resume != NULL)
2138 {
2139 t->to_resume (t, ptid, step, signal);
2140 if (targetdebug)
2141 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2142 PIDGET (ptid),
2143 step ? "step" : "continue",
2144 target_signal_to_name (signal));
2145
2146 set_executing (ptid, 1);
2147 set_running (ptid, 1);
2148 clear_inline_frame_state (ptid);
2149 return;
2150 }
2151 }
2152
2153 noprocess ();
2154 }
2155 /* Look through the list of possible targets for a target that can
2156 follow forks. */
2157
2158 int
2159 target_follow_fork (int follow_child)
2160 {
2161 struct target_ops *t;
2162
2163 for (t = current_target.beneath; t != NULL; t = t->beneath)
2164 {
2165 if (t->to_follow_fork != NULL)
2166 {
2167 int retval = t->to_follow_fork (t, follow_child);
2168 if (targetdebug)
2169 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2170 follow_child, retval);
2171 return retval;
2172 }
2173 }
2174
2175 /* Some target returned a fork event, but did not know how to follow it. */
2176 internal_error (__FILE__, __LINE__,
2177 "could not find a target to follow fork");
2178 }
2179
2180 void
2181 target_mourn_inferior (void)
2182 {
2183 struct target_ops *t;
2184 for (t = current_target.beneath; t != NULL; t = t->beneath)
2185 {
2186 if (t->to_mourn_inferior != NULL)
2187 {
2188 t->to_mourn_inferior (t);
2189 if (targetdebug)
2190 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2191
2192 /* We no longer need to keep handles on any of the object files.
2193 Make sure to release them to avoid unnecessarily locking any
2194 of them while we're not actually debugging. */
2195 bfd_cache_close_all ();
2196
2197 return;
2198 }
2199 }
2200
2201 internal_error (__FILE__, __LINE__,
2202 "could not find a target to follow mourn inferiour");
2203 }
2204
2205 /* Look for a target which can describe architectural features, starting
2206 from TARGET. If we find one, return its description. */
2207
2208 const struct target_desc *
2209 target_read_description (struct target_ops *target)
2210 {
2211 struct target_ops *t;
2212
2213 for (t = target; t != NULL; t = t->beneath)
2214 if (t->to_read_description != NULL)
2215 {
2216 const struct target_desc *tdesc;
2217
2218 tdesc = t->to_read_description (t);
2219 if (tdesc)
2220 return tdesc;
2221 }
2222
2223 return NULL;
2224 }
2225
2226 /* The default implementation of to_search_memory.
2227 This implements a basic search of memory, reading target memory and
2228 performing the search here (as opposed to performing the search in on the
2229 target side with, for example, gdbserver). */
2230
2231 int
2232 simple_search_memory (struct target_ops *ops,
2233 CORE_ADDR start_addr, ULONGEST search_space_len,
2234 const gdb_byte *pattern, ULONGEST pattern_len,
2235 CORE_ADDR *found_addrp)
2236 {
2237 /* NOTE: also defined in find.c testcase. */
2238 #define SEARCH_CHUNK_SIZE 16000
2239 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2240 /* Buffer to hold memory contents for searching. */
2241 gdb_byte *search_buf;
2242 unsigned search_buf_size;
2243 struct cleanup *old_cleanups;
2244
2245 search_buf_size = chunk_size + pattern_len - 1;
2246
2247 /* No point in trying to allocate a buffer larger than the search space. */
2248 if (search_space_len < search_buf_size)
2249 search_buf_size = search_space_len;
2250
2251 search_buf = malloc (search_buf_size);
2252 if (search_buf == NULL)
2253 error (_("Unable to allocate memory to perform the search."));
2254 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2255
2256 /* Prime the search buffer. */
2257
2258 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2259 search_buf, start_addr, search_buf_size) != search_buf_size)
2260 {
2261 warning (_("Unable to access target memory at %s, halting search."),
2262 hex_string (start_addr));
2263 do_cleanups (old_cleanups);
2264 return -1;
2265 }
2266
2267 /* Perform the search.
2268
2269 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2270 When we've scanned N bytes we copy the trailing bytes to the start and
2271 read in another N bytes. */
2272
2273 while (search_space_len >= pattern_len)
2274 {
2275 gdb_byte *found_ptr;
2276 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2277
2278 found_ptr = memmem (search_buf, nr_search_bytes,
2279 pattern, pattern_len);
2280
2281 if (found_ptr != NULL)
2282 {
2283 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2284 *found_addrp = found_addr;
2285 do_cleanups (old_cleanups);
2286 return 1;
2287 }
2288
2289 /* Not found in this chunk, skip to next chunk. */
2290
2291 /* Don't let search_space_len wrap here, it's unsigned. */
2292 if (search_space_len >= chunk_size)
2293 search_space_len -= chunk_size;
2294 else
2295 search_space_len = 0;
2296
2297 if (search_space_len >= pattern_len)
2298 {
2299 unsigned keep_len = search_buf_size - chunk_size;
2300 CORE_ADDR read_addr = start_addr + keep_len;
2301 int nr_to_read;
2302
2303 /* Copy the trailing part of the previous iteration to the front
2304 of the buffer for the next iteration. */
2305 gdb_assert (keep_len == pattern_len - 1);
2306 memcpy (search_buf, search_buf + chunk_size, keep_len);
2307
2308 nr_to_read = min (search_space_len - keep_len, chunk_size);
2309
2310 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2311 search_buf + keep_len, read_addr,
2312 nr_to_read) != nr_to_read)
2313 {
2314 warning (_("Unable to access target memory at %s, halting search."),
2315 hex_string (read_addr));
2316 do_cleanups (old_cleanups);
2317 return -1;
2318 }
2319
2320 start_addr += chunk_size;
2321 }
2322 }
2323
2324 /* Not found. */
2325
2326 do_cleanups (old_cleanups);
2327 return 0;
2328 }
2329
2330 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2331 sequence of bytes in PATTERN with length PATTERN_LEN.
2332
2333 The result is 1 if found, 0 if not found, and -1 if there was an error
2334 requiring halting of the search (e.g. memory read error).
2335 If the pattern is found the address is recorded in FOUND_ADDRP. */
2336
2337 int
2338 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2339 const gdb_byte *pattern, ULONGEST pattern_len,
2340 CORE_ADDR *found_addrp)
2341 {
2342 struct target_ops *t;
2343 int found;
2344
2345 /* We don't use INHERIT to set current_target.to_search_memory,
2346 so we have to scan the target stack and handle targetdebug
2347 ourselves. */
2348
2349 if (targetdebug)
2350 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2351 hex_string (start_addr));
2352
2353 for (t = current_target.beneath; t != NULL; t = t->beneath)
2354 if (t->to_search_memory != NULL)
2355 break;
2356
2357 if (t != NULL)
2358 {
2359 found = t->to_search_memory (t, start_addr, search_space_len,
2360 pattern, pattern_len, found_addrp);
2361 }
2362 else
2363 {
2364 /* If a special version of to_search_memory isn't available, use the
2365 simple version. */
2366 found = simple_search_memory (current_target.beneath,
2367 start_addr, search_space_len,
2368 pattern, pattern_len, found_addrp);
2369 }
2370
2371 if (targetdebug)
2372 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2373
2374 return found;
2375 }
2376
2377 /* Look through the currently pushed targets. If none of them will
2378 be able to restart the currently running process, issue an error
2379 message. */
2380
2381 void
2382 target_require_runnable (void)
2383 {
2384 struct target_ops *t;
2385
2386 for (t = target_stack; t != NULL; t = t->beneath)
2387 {
2388 /* If this target knows how to create a new program, then
2389 assume we will still be able to after killing the current
2390 one. Either killing and mourning will not pop T, or else
2391 find_default_run_target will find it again. */
2392 if (t->to_create_inferior != NULL)
2393 return;
2394
2395 /* Do not worry about thread_stratum targets that can not
2396 create inferiors. Assume they will be pushed again if
2397 necessary, and continue to the process_stratum. */
2398 if (t->to_stratum == thread_stratum
2399 || t->to_stratum == arch_stratum)
2400 continue;
2401
2402 error (_("\
2403 The \"%s\" target does not support \"run\". Try \"help target\" or \"continue\"."),
2404 t->to_shortname);
2405 }
2406
2407 /* This function is only called if the target is running. In that
2408 case there should have been a process_stratum target and it
2409 should either know how to create inferiors, or not... */
2410 internal_error (__FILE__, __LINE__, "No targets found");
2411 }
2412
2413 /* Look through the list of possible targets for a target that can
2414 execute a run or attach command without any other data. This is
2415 used to locate the default process stratum.
2416
2417 If DO_MESG is not NULL, the result is always valid (error() is
2418 called for errors); else, return NULL on error. */
2419
2420 static struct target_ops *
2421 find_default_run_target (char *do_mesg)
2422 {
2423 struct target_ops **t;
2424 struct target_ops *runable = NULL;
2425 int count;
2426
2427 count = 0;
2428
2429 for (t = target_structs; t < target_structs + target_struct_size;
2430 ++t)
2431 {
2432 if ((*t)->to_can_run && target_can_run (*t))
2433 {
2434 runable = *t;
2435 ++count;
2436 }
2437 }
2438
2439 if (count != 1)
2440 {
2441 if (do_mesg)
2442 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2443 else
2444 return NULL;
2445 }
2446
2447 return runable;
2448 }
2449
2450 void
2451 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2452 {
2453 struct target_ops *t;
2454
2455 t = find_default_run_target ("attach");
2456 (t->to_attach) (t, args, from_tty);
2457 return;
2458 }
2459
2460 void
2461 find_default_create_inferior (struct target_ops *ops,
2462 char *exec_file, char *allargs, char **env,
2463 int from_tty)
2464 {
2465 struct target_ops *t;
2466
2467 t = find_default_run_target ("run");
2468 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2469 return;
2470 }
2471
2472 static int
2473 find_default_can_async_p (void)
2474 {
2475 struct target_ops *t;
2476
2477 /* This may be called before the target is pushed on the stack;
2478 look for the default process stratum. If there's none, gdb isn't
2479 configured with a native debugger, and target remote isn't
2480 connected yet. */
2481 t = find_default_run_target (NULL);
2482 if (t && t->to_can_async_p)
2483 return (t->to_can_async_p) ();
2484 return 0;
2485 }
2486
2487 static int
2488 find_default_is_async_p (void)
2489 {
2490 struct target_ops *t;
2491
2492 /* This may be called before the target is pushed on the stack;
2493 look for the default process stratum. If there's none, gdb isn't
2494 configured with a native debugger, and target remote isn't
2495 connected yet. */
2496 t = find_default_run_target (NULL);
2497 if (t && t->to_is_async_p)
2498 return (t->to_is_async_p) ();
2499 return 0;
2500 }
2501
2502 static int
2503 find_default_supports_non_stop (void)
2504 {
2505 struct target_ops *t;
2506
2507 t = find_default_run_target (NULL);
2508 if (t && t->to_supports_non_stop)
2509 return (t->to_supports_non_stop) ();
2510 return 0;
2511 }
2512
2513 int
2514 target_supports_non_stop (void)
2515 {
2516 struct target_ops *t;
2517 for (t = &current_target; t != NULL; t = t->beneath)
2518 if (t->to_supports_non_stop)
2519 return t->to_supports_non_stop ();
2520
2521 return 0;
2522 }
2523
2524
2525 char *
2526 target_get_osdata (const char *type)
2527 {
2528 char *document;
2529 struct target_ops *t;
2530
2531 /* If we're already connected to something that can get us OS
2532 related data, use it. Otherwise, try using the native
2533 target. */
2534 if (current_target.to_stratum >= process_stratum)
2535 t = current_target.beneath;
2536 else
2537 t = find_default_run_target ("get OS data");
2538
2539 if (!t)
2540 return NULL;
2541
2542 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2543 }
2544
2545 static int
2546 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2547 {
2548 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2549 }
2550
2551 static int
2552 default_watchpoint_addr_within_range (struct target_ops *target,
2553 CORE_ADDR addr,
2554 CORE_ADDR start, int length)
2555 {
2556 return addr >= start && addr < start + length;
2557 }
2558
2559 static struct gdbarch *
2560 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2561 {
2562 return target_gdbarch;
2563 }
2564
2565 static int
2566 return_zero (void)
2567 {
2568 return 0;
2569 }
2570
2571 static int
2572 return_one (void)
2573 {
2574 return 1;
2575 }
2576
2577 static int
2578 return_minus_one (void)
2579 {
2580 return -1;
2581 }
2582
2583 /* Find a single runnable target in the stack and return it. If for
2584 some reason there is more than one, return NULL. */
2585
2586 struct target_ops *
2587 find_run_target (void)
2588 {
2589 struct target_ops **t;
2590 struct target_ops *runable = NULL;
2591 int count;
2592
2593 count = 0;
2594
2595 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2596 {
2597 if ((*t)->to_can_run && target_can_run (*t))
2598 {
2599 runable = *t;
2600 ++count;
2601 }
2602 }
2603
2604 return (count == 1 ? runable : NULL);
2605 }
2606
2607 /* Find a single core_stratum target in the list of targets and return it.
2608 If for some reason there is more than one, return NULL. */
2609
2610 struct target_ops *
2611 find_core_target (void)
2612 {
2613 struct target_ops **t;
2614 struct target_ops *runable = NULL;
2615 int count;
2616
2617 count = 0;
2618
2619 for (t = target_structs; t < target_structs + target_struct_size;
2620 ++t)
2621 {
2622 if ((*t)->to_stratum == core_stratum)
2623 {
2624 runable = *t;
2625 ++count;
2626 }
2627 }
2628
2629 return (count == 1 ? runable : NULL);
2630 }
2631
2632 /*
2633 * Find the next target down the stack from the specified target.
2634 */
2635
2636 struct target_ops *
2637 find_target_beneath (struct target_ops *t)
2638 {
2639 return t->beneath;
2640 }
2641
2642 \f
2643 /* The inferior process has died. Long live the inferior! */
2644
2645 void
2646 generic_mourn_inferior (void)
2647 {
2648 ptid_t ptid;
2649
2650 ptid = inferior_ptid;
2651 inferior_ptid = null_ptid;
2652
2653 if (!ptid_equal (ptid, null_ptid))
2654 {
2655 int pid = ptid_get_pid (ptid);
2656 delete_inferior (pid);
2657 }
2658
2659 breakpoint_init_inferior (inf_exited);
2660 registers_changed ();
2661
2662 reopen_exec_file ();
2663 reinit_frame_cache ();
2664
2665 if (deprecated_detach_hook)
2666 deprecated_detach_hook ();
2667 }
2668 \f
2669 /* Helper function for child_wait and the derivatives of child_wait.
2670 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2671 translation of that in OURSTATUS. */
2672 void
2673 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2674 {
2675 if (WIFEXITED (hoststatus))
2676 {
2677 ourstatus->kind = TARGET_WAITKIND_EXITED;
2678 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2679 }
2680 else if (!WIFSTOPPED (hoststatus))
2681 {
2682 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2683 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2684 }
2685 else
2686 {
2687 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2688 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2689 }
2690 }
2691 \f
2692 /* Convert a normal process ID to a string. Returns the string in a
2693 static buffer. */
2694
2695 char *
2696 normal_pid_to_str (ptid_t ptid)
2697 {
2698 static char buf[32];
2699
2700 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2701 return buf;
2702 }
2703
2704 static char *
2705 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
2706 {
2707 return normal_pid_to_str (ptid);
2708 }
2709
2710 /* Error-catcher for target_find_memory_regions */
2711 static int dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
2712 {
2713 error (_("No target."));
2714 return 0;
2715 }
2716
2717 /* Error-catcher for target_make_corefile_notes */
2718 static char * dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
2719 {
2720 error (_("No target."));
2721 return NULL;
2722 }
2723
2724 /* Set up the handful of non-empty slots needed by the dummy target
2725 vector. */
2726
2727 static void
2728 init_dummy_target (void)
2729 {
2730 dummy_target.to_shortname = "None";
2731 dummy_target.to_longname = "None";
2732 dummy_target.to_doc = "";
2733 dummy_target.to_attach = find_default_attach;
2734 dummy_target.to_detach =
2735 (void (*)(struct target_ops *, char *, int))target_ignore;
2736 dummy_target.to_create_inferior = find_default_create_inferior;
2737 dummy_target.to_can_async_p = find_default_can_async_p;
2738 dummy_target.to_is_async_p = find_default_is_async_p;
2739 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
2740 dummy_target.to_pid_to_str = dummy_pid_to_str;
2741 dummy_target.to_stratum = dummy_stratum;
2742 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
2743 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
2744 dummy_target.to_xfer_partial = default_xfer_partial;
2745 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
2746 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
2747 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
2748 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
2749 dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
2750 dummy_target.to_magic = OPS_MAGIC;
2751 }
2752 \f
2753 static void
2754 debug_to_open (char *args, int from_tty)
2755 {
2756 debug_target.to_open (args, from_tty);
2757
2758 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
2759 }
2760
2761 void
2762 target_close (struct target_ops *targ, int quitting)
2763 {
2764 if (targ->to_xclose != NULL)
2765 targ->to_xclose (targ, quitting);
2766 else if (targ->to_close != NULL)
2767 targ->to_close (quitting);
2768
2769 if (targetdebug)
2770 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
2771 }
2772
2773 void
2774 target_attach (char *args, int from_tty)
2775 {
2776 struct target_ops *t;
2777 for (t = current_target.beneath; t != NULL; t = t->beneath)
2778 {
2779 if (t->to_attach != NULL)
2780 {
2781 t->to_attach (t, args, from_tty);
2782 if (targetdebug)
2783 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
2784 args, from_tty);
2785 return;
2786 }
2787 }
2788
2789 internal_error (__FILE__, __LINE__,
2790 "could not find a target to attach");
2791 }
2792
2793 int
2794 target_thread_alive (ptid_t ptid)
2795 {
2796 struct target_ops *t;
2797 for (t = current_target.beneath; t != NULL; t = t->beneath)
2798 {
2799 if (t->to_thread_alive != NULL)
2800 {
2801 int retval;
2802
2803 retval = t->to_thread_alive (t, ptid);
2804 if (targetdebug)
2805 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
2806 PIDGET (ptid), retval);
2807
2808 return retval;
2809 }
2810 }
2811
2812 return 0;
2813 }
2814
2815 void
2816 target_find_new_threads (void)
2817 {
2818 struct target_ops *t;
2819 for (t = current_target.beneath; t != NULL; t = t->beneath)
2820 {
2821 if (t->to_find_new_threads != NULL)
2822 {
2823 t->to_find_new_threads (t);
2824 if (targetdebug)
2825 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
2826
2827 return;
2828 }
2829 }
2830 }
2831
2832 static void
2833 debug_to_post_attach (int pid)
2834 {
2835 debug_target.to_post_attach (pid);
2836
2837 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
2838 }
2839
2840 /* Return a pretty printed form of target_waitstatus.
2841 Space for the result is malloc'd, caller must free. */
2842
2843 char *
2844 target_waitstatus_to_string (const struct target_waitstatus *ws)
2845 {
2846 const char *kind_str = "status->kind = ";
2847
2848 switch (ws->kind)
2849 {
2850 case TARGET_WAITKIND_EXITED:
2851 return xstrprintf ("%sexited, status = %d",
2852 kind_str, ws->value.integer);
2853 case TARGET_WAITKIND_STOPPED:
2854 return xstrprintf ("%sstopped, signal = %s",
2855 kind_str, target_signal_to_name (ws->value.sig));
2856 case TARGET_WAITKIND_SIGNALLED:
2857 return xstrprintf ("%ssignalled, signal = %s",
2858 kind_str, target_signal_to_name (ws->value.sig));
2859 case TARGET_WAITKIND_LOADED:
2860 return xstrprintf ("%sloaded", kind_str);
2861 case TARGET_WAITKIND_FORKED:
2862 return xstrprintf ("%sforked", kind_str);
2863 case TARGET_WAITKIND_VFORKED:
2864 return xstrprintf ("%svforked", kind_str);
2865 case TARGET_WAITKIND_EXECD:
2866 return xstrprintf ("%sexecd", kind_str);
2867 case TARGET_WAITKIND_SYSCALL_ENTRY:
2868 return xstrprintf ("%ssyscall-entry", kind_str);
2869 case TARGET_WAITKIND_SYSCALL_RETURN:
2870 return xstrprintf ("%ssyscall-return", kind_str);
2871 case TARGET_WAITKIND_SPURIOUS:
2872 return xstrprintf ("%sspurious", kind_str);
2873 case TARGET_WAITKIND_IGNORE:
2874 return xstrprintf ("%signore", kind_str);
2875 case TARGET_WAITKIND_NO_HISTORY:
2876 return xstrprintf ("%sno-history", kind_str);
2877 default:
2878 return xstrprintf ("%sunknown???", kind_str);
2879 }
2880 }
2881
2882 static void
2883 debug_print_register (const char * func,
2884 struct regcache *regcache, int regno)
2885 {
2886 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2887 fprintf_unfiltered (gdb_stdlog, "%s ", func);
2888 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
2889 && gdbarch_register_name (gdbarch, regno) != NULL
2890 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
2891 fprintf_unfiltered (gdb_stdlog, "(%s)",
2892 gdbarch_register_name (gdbarch, regno));
2893 else
2894 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
2895 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
2896 {
2897 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2898 int i, size = register_size (gdbarch, regno);
2899 unsigned char buf[MAX_REGISTER_SIZE];
2900 regcache_raw_collect (regcache, regno, buf);
2901 fprintf_unfiltered (gdb_stdlog, " = ");
2902 for (i = 0; i < size; i++)
2903 {
2904 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
2905 }
2906 if (size <= sizeof (LONGEST))
2907 {
2908 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
2909 fprintf_unfiltered (gdb_stdlog, " %s %s",
2910 core_addr_to_string_nz (val), plongest (val));
2911 }
2912 }
2913 fprintf_unfiltered (gdb_stdlog, "\n");
2914 }
2915
2916 void
2917 target_fetch_registers (struct regcache *regcache, int regno)
2918 {
2919 struct target_ops *t;
2920 for (t = current_target.beneath; t != NULL; t = t->beneath)
2921 {
2922 if (t->to_fetch_registers != NULL)
2923 {
2924 t->to_fetch_registers (t, regcache, regno);
2925 if (targetdebug)
2926 debug_print_register ("target_fetch_registers", regcache, regno);
2927 return;
2928 }
2929 }
2930 }
2931
2932 void
2933 target_store_registers (struct regcache *regcache, int regno)
2934 {
2935
2936 struct target_ops *t;
2937 for (t = current_target.beneath; t != NULL; t = t->beneath)
2938 {
2939 if (t->to_store_registers != NULL)
2940 {
2941 t->to_store_registers (t, regcache, regno);
2942 if (targetdebug)
2943 {
2944 debug_print_register ("target_store_registers", regcache, regno);
2945 }
2946 return;
2947 }
2948 }
2949
2950 noprocess ();
2951 }
2952
2953 static void
2954 debug_to_prepare_to_store (struct regcache *regcache)
2955 {
2956 debug_target.to_prepare_to_store (regcache);
2957
2958 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
2959 }
2960
2961 static int
2962 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
2963 int write, struct mem_attrib *attrib,
2964 struct target_ops *target)
2965 {
2966 int retval;
2967
2968 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
2969 attrib, target);
2970
2971 fprintf_unfiltered (gdb_stdlog,
2972 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
2973 paddress (target_gdbarch, memaddr), len,
2974 write ? "write" : "read", retval);
2975
2976 if (retval > 0)
2977 {
2978 int i;
2979
2980 fputs_unfiltered (", bytes =", gdb_stdlog);
2981 for (i = 0; i < retval; i++)
2982 {
2983 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
2984 {
2985 if (targetdebug < 2 && i > 0)
2986 {
2987 fprintf_unfiltered (gdb_stdlog, " ...");
2988 break;
2989 }
2990 fprintf_unfiltered (gdb_stdlog, "\n");
2991 }
2992
2993 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
2994 }
2995 }
2996
2997 fputc_unfiltered ('\n', gdb_stdlog);
2998
2999 return retval;
3000 }
3001
3002 static void
3003 debug_to_files_info (struct target_ops *target)
3004 {
3005 debug_target.to_files_info (target);
3006
3007 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3008 }
3009
3010 static int
3011 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3012 struct bp_target_info *bp_tgt)
3013 {
3014 int retval;
3015
3016 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3017
3018 fprintf_unfiltered (gdb_stdlog,
3019 "target_insert_breakpoint (0x%lx, xxx) = %ld\n",
3020 (unsigned long) bp_tgt->placed_address,
3021 (unsigned long) retval);
3022 return retval;
3023 }
3024
3025 static int
3026 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3027 struct bp_target_info *bp_tgt)
3028 {
3029 int retval;
3030
3031 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3032
3033 fprintf_unfiltered (gdb_stdlog,
3034 "target_remove_breakpoint (0x%lx, xxx) = %ld\n",
3035 (unsigned long) bp_tgt->placed_address,
3036 (unsigned long) retval);
3037 return retval;
3038 }
3039
3040 static int
3041 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3042 {
3043 int retval;
3044
3045 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3046
3047 fprintf_unfiltered (gdb_stdlog,
3048 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3049 (unsigned long) type,
3050 (unsigned long) cnt,
3051 (unsigned long) from_tty,
3052 (unsigned long) retval);
3053 return retval;
3054 }
3055
3056 static int
3057 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3058 {
3059 CORE_ADDR retval;
3060
3061 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3062
3063 fprintf_unfiltered (gdb_stdlog,
3064 "target_region_ok_for_hw_watchpoint (%ld, %ld) = 0x%lx\n",
3065 (unsigned long) addr,
3066 (unsigned long) len,
3067 (unsigned long) retval);
3068 return retval;
3069 }
3070
3071 static int
3072 debug_to_stopped_by_watchpoint (void)
3073 {
3074 int retval;
3075
3076 retval = debug_target.to_stopped_by_watchpoint ();
3077
3078 fprintf_unfiltered (gdb_stdlog,
3079 "target_stopped_by_watchpoint () = %ld\n",
3080 (unsigned long) retval);
3081 return retval;
3082 }
3083
3084 static int
3085 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3086 {
3087 int retval;
3088
3089 retval = debug_target.to_stopped_data_address (target, addr);
3090
3091 fprintf_unfiltered (gdb_stdlog,
3092 "target_stopped_data_address ([0x%lx]) = %ld\n",
3093 (unsigned long)*addr,
3094 (unsigned long)retval);
3095 return retval;
3096 }
3097
3098 static int
3099 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3100 CORE_ADDR addr,
3101 CORE_ADDR start, int length)
3102 {
3103 int retval;
3104
3105 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3106 start, length);
3107
3108 fprintf_filtered (gdb_stdlog,
3109 "target_watchpoint_addr_within_range (0x%lx, 0x%lx, %d) = %d\n",
3110 (unsigned long) addr, (unsigned long) start, length,
3111 retval);
3112 return retval;
3113 }
3114
3115 static int
3116 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3117 struct bp_target_info *bp_tgt)
3118 {
3119 int retval;
3120
3121 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3122
3123 fprintf_unfiltered (gdb_stdlog,
3124 "target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
3125 (unsigned long) bp_tgt->placed_address,
3126 (unsigned long) retval);
3127 return retval;
3128 }
3129
3130 static int
3131 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3132 struct bp_target_info *bp_tgt)
3133 {
3134 int retval;
3135
3136 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3137
3138 fprintf_unfiltered (gdb_stdlog,
3139 "target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
3140 (unsigned long) bp_tgt->placed_address,
3141 (unsigned long) retval);
3142 return retval;
3143 }
3144
3145 static int
3146 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type)
3147 {
3148 int retval;
3149
3150 retval = debug_target.to_insert_watchpoint (addr, len, type);
3151
3152 fprintf_unfiltered (gdb_stdlog,
3153 "target_insert_watchpoint (0x%lx, %d, %d) = %ld\n",
3154 (unsigned long) addr, len, type, (unsigned long) retval);
3155 return retval;
3156 }
3157
3158 static int
3159 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type)
3160 {
3161 int retval;
3162
3163 retval = debug_target.to_remove_watchpoint (addr, len, type);
3164
3165 fprintf_unfiltered (gdb_stdlog,
3166 "target_remove_watchpoint (0x%lx, %d, %d) = %ld\n",
3167 (unsigned long) addr, len, type, (unsigned long) retval);
3168 return retval;
3169 }
3170
3171 static void
3172 debug_to_terminal_init (void)
3173 {
3174 debug_target.to_terminal_init ();
3175
3176 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3177 }
3178
3179 static void
3180 debug_to_terminal_inferior (void)
3181 {
3182 debug_target.to_terminal_inferior ();
3183
3184 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3185 }
3186
3187 static void
3188 debug_to_terminal_ours_for_output (void)
3189 {
3190 debug_target.to_terminal_ours_for_output ();
3191
3192 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3193 }
3194
3195 static void
3196 debug_to_terminal_ours (void)
3197 {
3198 debug_target.to_terminal_ours ();
3199
3200 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3201 }
3202
3203 static void
3204 debug_to_terminal_save_ours (void)
3205 {
3206 debug_target.to_terminal_save_ours ();
3207
3208 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3209 }
3210
3211 static void
3212 debug_to_terminal_info (char *arg, int from_tty)
3213 {
3214 debug_target.to_terminal_info (arg, from_tty);
3215
3216 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3217 from_tty);
3218 }
3219
3220 static void
3221 debug_to_load (char *args, int from_tty)
3222 {
3223 debug_target.to_load (args, from_tty);
3224
3225 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3226 }
3227
3228 static int
3229 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
3230 {
3231 int retval;
3232
3233 retval = debug_target.to_lookup_symbol (name, addrp);
3234
3235 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
3236
3237 return retval;
3238 }
3239
3240 static void
3241 debug_to_post_startup_inferior (ptid_t ptid)
3242 {
3243 debug_target.to_post_startup_inferior (ptid);
3244
3245 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3246 PIDGET (ptid));
3247 }
3248
3249 static void
3250 debug_to_acknowledge_created_inferior (int pid)
3251 {
3252 debug_target.to_acknowledge_created_inferior (pid);
3253
3254 fprintf_unfiltered (gdb_stdlog, "target_acknowledge_created_inferior (%d)\n",
3255 pid);
3256 }
3257
3258 static void
3259 debug_to_insert_fork_catchpoint (int pid)
3260 {
3261 debug_target.to_insert_fork_catchpoint (pid);
3262
3263 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
3264 pid);
3265 }
3266
3267 static int
3268 debug_to_remove_fork_catchpoint (int pid)
3269 {
3270 int retval;
3271
3272 retval = debug_target.to_remove_fork_catchpoint (pid);
3273
3274 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3275 pid, retval);
3276
3277 return retval;
3278 }
3279
3280 static void
3281 debug_to_insert_vfork_catchpoint (int pid)
3282 {
3283 debug_target.to_insert_vfork_catchpoint (pid);
3284
3285 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
3286 pid);
3287 }
3288
3289 static int
3290 debug_to_remove_vfork_catchpoint (int pid)
3291 {
3292 int retval;
3293
3294 retval = debug_target.to_remove_vfork_catchpoint (pid);
3295
3296 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3297 pid, retval);
3298
3299 return retval;
3300 }
3301
3302 static void
3303 debug_to_insert_exec_catchpoint (int pid)
3304 {
3305 debug_target.to_insert_exec_catchpoint (pid);
3306
3307 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
3308 pid);
3309 }
3310
3311 static int
3312 debug_to_remove_exec_catchpoint (int pid)
3313 {
3314 int retval;
3315
3316 retval = debug_target.to_remove_exec_catchpoint (pid);
3317
3318 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3319 pid, retval);
3320
3321 return retval;
3322 }
3323
3324 static int
3325 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3326 {
3327 int has_exited;
3328
3329 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3330
3331 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3332 pid, wait_status, *exit_status, has_exited);
3333
3334 return has_exited;
3335 }
3336
3337 static int
3338 debug_to_can_run (void)
3339 {
3340 int retval;
3341
3342 retval = debug_target.to_can_run ();
3343
3344 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3345
3346 return retval;
3347 }
3348
3349 static void
3350 debug_to_notice_signals (ptid_t ptid)
3351 {
3352 debug_target.to_notice_signals (ptid);
3353
3354 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3355 PIDGET (ptid));
3356 }
3357
3358 static struct gdbarch *
3359 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3360 {
3361 struct gdbarch *retval;
3362
3363 retval = debug_target.to_thread_architecture (ops, ptid);
3364
3365 fprintf_unfiltered (gdb_stdlog, "target_thread_architecture (%s) = %p [%s]\n",
3366 target_pid_to_str (ptid), retval,
3367 gdbarch_bfd_arch_info (retval)->printable_name);
3368 return retval;
3369 }
3370
3371 static void
3372 debug_to_stop (ptid_t ptid)
3373 {
3374 debug_target.to_stop (ptid);
3375
3376 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3377 target_pid_to_str (ptid));
3378 }
3379
3380 static void
3381 debug_to_rcmd (char *command,
3382 struct ui_file *outbuf)
3383 {
3384 debug_target.to_rcmd (command, outbuf);
3385 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3386 }
3387
3388 static char *
3389 debug_to_pid_to_exec_file (int pid)
3390 {
3391 char *exec_file;
3392
3393 exec_file = debug_target.to_pid_to_exec_file (pid);
3394
3395 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3396 pid, exec_file);
3397
3398 return exec_file;
3399 }
3400
3401 static void
3402 setup_target_debug (void)
3403 {
3404 memcpy (&debug_target, &current_target, sizeof debug_target);
3405
3406 current_target.to_open = debug_to_open;
3407 current_target.to_post_attach = debug_to_post_attach;
3408 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3409 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3410 current_target.to_files_info = debug_to_files_info;
3411 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3412 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3413 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3414 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3415 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3416 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3417 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3418 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3419 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3420 current_target.to_watchpoint_addr_within_range = debug_to_watchpoint_addr_within_range;
3421 current_target.to_region_ok_for_hw_watchpoint = debug_to_region_ok_for_hw_watchpoint;
3422 current_target.to_terminal_init = debug_to_terminal_init;
3423 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3424 current_target.to_terminal_ours_for_output = debug_to_terminal_ours_for_output;
3425 current_target.to_terminal_ours = debug_to_terminal_ours;
3426 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3427 current_target.to_terminal_info = debug_to_terminal_info;
3428 current_target.to_load = debug_to_load;
3429 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3430 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3431 current_target.to_acknowledge_created_inferior = debug_to_acknowledge_created_inferior;
3432 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3433 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3434 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3435 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3436 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3437 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3438 current_target.to_has_exited = debug_to_has_exited;
3439 current_target.to_can_run = debug_to_can_run;
3440 current_target.to_notice_signals = debug_to_notice_signals;
3441 current_target.to_stop = debug_to_stop;
3442 current_target.to_rcmd = debug_to_rcmd;
3443 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3444 current_target.to_thread_architecture = debug_to_thread_architecture;
3445 }
3446 \f
3447
3448 static char targ_desc[] =
3449 "Names of targets and files being debugged.\n\
3450 Shows the entire stack of targets currently in use (including the exec-file,\n\
3451 core-file, and process, if any), as well as the symbol file name.";
3452
3453 static void
3454 do_monitor_command (char *cmd,
3455 int from_tty)
3456 {
3457 if ((current_target.to_rcmd
3458 == (void (*) (char *, struct ui_file *)) tcomplain)
3459 || (current_target.to_rcmd == debug_to_rcmd
3460 && (debug_target.to_rcmd
3461 == (void (*) (char *, struct ui_file *)) tcomplain)))
3462 error (_("\"monitor\" command not supported by this target."));
3463 target_rcmd (cmd, gdb_stdtarg);
3464 }
3465
3466 /* Print the name of each layers of our target stack. */
3467
3468 static void
3469 maintenance_print_target_stack (char *cmd, int from_tty)
3470 {
3471 struct target_ops *t;
3472
3473 printf_filtered (_("The current target stack is:\n"));
3474
3475 for (t = target_stack; t != NULL; t = t->beneath)
3476 {
3477 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3478 }
3479 }
3480
3481 /* Controls if async mode is permitted. */
3482 int target_async_permitted = 0;
3483
3484 /* The set command writes to this variable. If the inferior is
3485 executing, linux_nat_async_permitted is *not* updated. */
3486 static int target_async_permitted_1 = 0;
3487
3488 static void
3489 set_maintenance_target_async_permitted (char *args, int from_tty,
3490 struct cmd_list_element *c)
3491 {
3492 if (have_live_inferiors ())
3493 {
3494 target_async_permitted_1 = target_async_permitted;
3495 error (_("Cannot change this setting while the inferior is running."));
3496 }
3497
3498 target_async_permitted = target_async_permitted_1;
3499 }
3500
3501 static void
3502 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3503 struct cmd_list_element *c,
3504 const char *value)
3505 {
3506 fprintf_filtered (file, _("\
3507 Controlling the inferior in asynchronous mode is %s.\n"), value);
3508 }
3509
3510 void
3511 initialize_targets (void)
3512 {
3513 init_dummy_target ();
3514 push_target (&dummy_target);
3515
3516 add_info ("target", target_info, targ_desc);
3517 add_info ("files", target_info, targ_desc);
3518
3519 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3520 Set target debugging."), _("\
3521 Show target debugging."), _("\
3522 When non-zero, target debugging is enabled. Higher numbers are more\n\
3523 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3524 command."),
3525 NULL,
3526 show_targetdebug,
3527 &setdebuglist, &showdebuglist);
3528
3529 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3530 &trust_readonly, _("\
3531 Set mode for reading from readonly sections."), _("\
3532 Show mode for reading from readonly sections."), _("\
3533 When this mode is on, memory reads from readonly sections (such as .text)\n\
3534 will be read from the object file instead of from the target. This will\n\
3535 result in significant performance improvement for remote targets."),
3536 NULL,
3537 show_trust_readonly,
3538 &setlist, &showlist);
3539
3540 add_com ("monitor", class_obscure, do_monitor_command,
3541 _("Send a command to the remote monitor (remote targets only)."));
3542
3543 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3544 _("Print the name of each layer of the internal target stack."),
3545 &maintenanceprintlist);
3546
3547 add_setshow_boolean_cmd ("target-async", no_class,
3548 &target_async_permitted_1, _("\
3549 Set whether gdb controls the inferior in asynchronous mode."), _("\
3550 Show whether gdb controls the inferior in asynchronous mode."), _("\
3551 Tells gdb whether to control the inferior in asynchronous mode."),
3552 set_maintenance_target_async_permitted,
3553 show_maintenance_target_async_permitted,
3554 &setlist,
3555 &showlist);
3556
3557 add_setshow_boolean_cmd ("stack-cache", class_support,
3558 &stack_cache_enabled_p, _("\
3559 Set cache use for stack access."), _("\
3560 Show cache use for stack access."), _("\
3561 When on, use the data cache for all stack access, regardless of any\n\
3562 configured memory regions. This improves remote performance significantly.\n\
3563 By default, caching for stack access is on."),
3564 set_stack_cache_enabled_p,
3565 show_stack_cache_enabled_p,
3566 &setlist, &showlist);
3567
3568 target_dcache = dcache_init ();
3569 }
This page took 0.192594 seconds and 4 git commands to generate.