daily update
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46 #include "tracepoint.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static int nosymbol (char *, CORE_ADDR *);
58
59 static void tcomplain (void) ATTRIBUTE_NORETURN;
60
61 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
62
63 static int return_zero (void);
64
65 static int return_one (void);
66
67 static int return_minus_one (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static LONGEST default_xfer_partial (struct target_ops *ops,
76 enum target_object object,
77 const char *annex, gdb_byte *readbuf,
78 const gdb_byte *writebuf,
79 ULONGEST offset, LONGEST len);
80
81 static LONGEST current_xfer_partial (struct target_ops *ops,
82 enum target_object object,
83 const char *annex, gdb_byte *readbuf,
84 const gdb_byte *writebuf,
85 ULONGEST offset, LONGEST len);
86
87 static LONGEST target_xfer_partial (struct target_ops *ops,
88 enum target_object object,
89 const char *annex,
90 void *readbuf, const void *writebuf,
91 ULONGEST offset, LONGEST len);
92
93 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
94 ptid_t ptid);
95
96 static void init_dummy_target (void);
97
98 static struct target_ops debug_target;
99
100 static void debug_to_open (char *, int);
101
102 static void debug_to_prepare_to_store (struct regcache *);
103
104 static void debug_to_files_info (struct target_ops *);
105
106 static int debug_to_insert_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_remove_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_can_use_hw_breakpoint (int, int, int);
113
114 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
121
122 static int debug_to_remove_watchpoint (CORE_ADDR, int, int);
123
124 static int debug_to_stopped_by_watchpoint (void);
125
126 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
127
128 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
129 CORE_ADDR, CORE_ADDR, int);
130
131 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
132
133 static void debug_to_terminal_init (void);
134
135 static void debug_to_terminal_inferior (void);
136
137 static void debug_to_terminal_ours_for_output (void);
138
139 static void debug_to_terminal_save_ours (void);
140
141 static void debug_to_terminal_ours (void);
142
143 static void debug_to_terminal_info (char *, int);
144
145 static void debug_to_load (char *, int);
146
147 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
148
149 static int debug_to_can_run (void);
150
151 static void debug_to_notice_signals (ptid_t);
152
153 static void debug_to_stop (ptid_t);
154
155 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
156 wierd and mysterious ways. Putting the variable here lets those
157 wierd and mysterious ways keep building while they are being
158 converted to the inferior inheritance structure. */
159 struct target_ops deprecated_child_ops;
160
161 /* Pointer to array of target architecture structures; the size of the
162 array; the current index into the array; the allocated size of the
163 array. */
164 struct target_ops **target_structs;
165 unsigned target_struct_size;
166 unsigned target_struct_index;
167 unsigned target_struct_allocsize;
168 #define DEFAULT_ALLOCSIZE 10
169
170 /* The initial current target, so that there is always a semi-valid
171 current target. */
172
173 static struct target_ops dummy_target;
174
175 /* Top of target stack. */
176
177 static struct target_ops *target_stack;
178
179 /* The target structure we are currently using to talk to a process
180 or file or whatever "inferior" we have. */
181
182 struct target_ops current_target;
183
184 /* Command list for target. */
185
186 static struct cmd_list_element *targetlist = NULL;
187
188 /* Nonzero if we should trust readonly sections from the
189 executable when reading memory. */
190
191 static int trust_readonly = 0;
192
193 /* Nonzero if we should show true memory content including
194 memory breakpoint inserted by gdb. */
195
196 static int show_memory_breakpoints = 0;
197
198 /* These globals control whether GDB attempts to perform these
199 operations; they are useful for targets that need to prevent
200 inadvertant disruption, such as in non-stop mode. */
201
202 int may_write_registers = 1;
203
204 int may_write_memory = 1;
205
206 int may_insert_breakpoints = 1;
207
208 int may_insert_tracepoints = 1;
209
210 int may_insert_fast_tracepoints = 1;
211
212 int may_stop = 1;
213
214 /* Non-zero if we want to see trace of target level stuff. */
215
216 static int targetdebug = 0;
217 static void
218 show_targetdebug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
222 }
223
224 static void setup_target_debug (void);
225
226 /* The option sets this. */
227 static int stack_cache_enabled_p_1 = 1;
228 /* And set_stack_cache_enabled_p updates this.
229 The reason for the separation is so that we don't flush the cache for
230 on->on transitions. */
231 static int stack_cache_enabled_p = 1;
232
233 /* This is called *after* the stack-cache has been set.
234 Flush the cache for off->on and on->off transitions.
235 There's no real need to flush the cache for on->off transitions,
236 except cleanliness. */
237
238 static void
239 set_stack_cache_enabled_p (char *args, int from_tty,
240 struct cmd_list_element *c)
241 {
242 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
243 target_dcache_invalidate ();
244
245 stack_cache_enabled_p = stack_cache_enabled_p_1;
246 }
247
248 static void
249 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
253 }
254
255 /* Cache of memory operations, to speed up remote access. */
256 static DCACHE *target_dcache;
257
258 /* Invalidate the target dcache. */
259
260 void
261 target_dcache_invalidate (void)
262 {
263 dcache_invalidate (target_dcache);
264 }
265
266 /* The user just typed 'target' without the name of a target. */
267
268 static void
269 target_command (char *arg, int from_tty)
270 {
271 fputs_filtered ("Argument required (target name). Try `help target'\n",
272 gdb_stdout);
273 }
274
275 /* Default target_has_* methods for process_stratum targets. */
276
277 int
278 default_child_has_all_memory (struct target_ops *ops)
279 {
280 /* If no inferior selected, then we can't read memory here. */
281 if (ptid_equal (inferior_ptid, null_ptid))
282 return 0;
283
284 return 1;
285 }
286
287 int
288 default_child_has_memory (struct target_ops *ops)
289 {
290 /* If no inferior selected, then we can't read memory here. */
291 if (ptid_equal (inferior_ptid, null_ptid))
292 return 0;
293
294 return 1;
295 }
296
297 int
298 default_child_has_stack (struct target_ops *ops)
299 {
300 /* If no inferior selected, there's no stack. */
301 if (ptid_equal (inferior_ptid, null_ptid))
302 return 0;
303
304 return 1;
305 }
306
307 int
308 default_child_has_registers (struct target_ops *ops)
309 {
310 /* Can't read registers from no inferior. */
311 if (ptid_equal (inferior_ptid, null_ptid))
312 return 0;
313
314 return 1;
315 }
316
317 int
318 default_child_has_execution (struct target_ops *ops)
319 {
320 /* If there's no thread selected, then we can't make it run through
321 hoops. */
322 if (ptid_equal (inferior_ptid, null_ptid))
323 return 0;
324
325 return 1;
326 }
327
328
329 int
330 target_has_all_memory_1 (void)
331 {
332 struct target_ops *t;
333
334 for (t = current_target.beneath; t != NULL; t = t->beneath)
335 if (t->to_has_all_memory (t))
336 return 1;
337
338 return 0;
339 }
340
341 int
342 target_has_memory_1 (void)
343 {
344 struct target_ops *t;
345
346 for (t = current_target.beneath; t != NULL; t = t->beneath)
347 if (t->to_has_memory (t))
348 return 1;
349
350 return 0;
351 }
352
353 int
354 target_has_stack_1 (void)
355 {
356 struct target_ops *t;
357
358 for (t = current_target.beneath; t != NULL; t = t->beneath)
359 if (t->to_has_stack (t))
360 return 1;
361
362 return 0;
363 }
364
365 int
366 target_has_registers_1 (void)
367 {
368 struct target_ops *t;
369
370 for (t = current_target.beneath; t != NULL; t = t->beneath)
371 if (t->to_has_registers (t))
372 return 1;
373
374 return 0;
375 }
376
377 int
378 target_has_execution_1 (void)
379 {
380 struct target_ops *t;
381
382 for (t = current_target.beneath; t != NULL; t = t->beneath)
383 if (t->to_has_execution (t))
384 return 1;
385
386 return 0;
387 }
388
389 /* Add a possible target architecture to the list. */
390
391 void
392 add_target (struct target_ops *t)
393 {
394 /* Provide default values for all "must have" methods. */
395 if (t->to_xfer_partial == NULL)
396 t->to_xfer_partial = default_xfer_partial;
397
398 if (t->to_has_all_memory == NULL)
399 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
400
401 if (t->to_has_memory == NULL)
402 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
403
404 if (t->to_has_stack == NULL)
405 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
406
407 if (t->to_has_registers == NULL)
408 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
409
410 if (t->to_has_execution == NULL)
411 t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
412
413 if (!target_structs)
414 {
415 target_struct_allocsize = DEFAULT_ALLOCSIZE;
416 target_structs = (struct target_ops **) xmalloc
417 (target_struct_allocsize * sizeof (*target_structs));
418 }
419 if (target_struct_size >= target_struct_allocsize)
420 {
421 target_struct_allocsize *= 2;
422 target_structs = (struct target_ops **)
423 xrealloc ((char *) target_structs,
424 target_struct_allocsize * sizeof (*target_structs));
425 }
426 target_structs[target_struct_size++] = t;
427
428 if (targetlist == NULL)
429 add_prefix_cmd ("target", class_run, target_command, _("\
430 Connect to a target machine or process.\n\
431 The first argument is the type or protocol of the target machine.\n\
432 Remaining arguments are interpreted by the target protocol. For more\n\
433 information on the arguments for a particular protocol, type\n\
434 `help target ' followed by the protocol name."),
435 &targetlist, "target ", 0, &cmdlist);
436 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
437 }
438
439 /* Stub functions */
440
441 void
442 target_ignore (void)
443 {
444 }
445
446 void
447 target_kill (void)
448 {
449 struct target_ops *t;
450
451 for (t = current_target.beneath; t != NULL; t = t->beneath)
452 if (t->to_kill != NULL)
453 {
454 if (targetdebug)
455 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
456
457 t->to_kill (t);
458 return;
459 }
460
461 noprocess ();
462 }
463
464 void
465 target_load (char *arg, int from_tty)
466 {
467 target_dcache_invalidate ();
468 (*current_target.to_load) (arg, from_tty);
469 }
470
471 void
472 target_create_inferior (char *exec_file, char *args,
473 char **env, int from_tty)
474 {
475 struct target_ops *t;
476
477 for (t = current_target.beneath; t != NULL; t = t->beneath)
478 {
479 if (t->to_create_inferior != NULL)
480 {
481 t->to_create_inferior (t, exec_file, args, env, from_tty);
482 if (targetdebug)
483 fprintf_unfiltered (gdb_stdlog,
484 "target_create_inferior (%s, %s, xxx, %d)\n",
485 exec_file, args, from_tty);
486 return;
487 }
488 }
489
490 internal_error (__FILE__, __LINE__,
491 "could not find a target to create inferior");
492 }
493
494 void
495 target_terminal_inferior (void)
496 {
497 /* A background resume (``run&'') should leave GDB in control of the
498 terminal. Use target_can_async_p, not target_is_async_p, since at
499 this point the target is not async yet. However, if sync_execution
500 is not set, we know it will become async prior to resume. */
501 if (target_can_async_p () && !sync_execution)
502 return;
503
504 /* If GDB is resuming the inferior in the foreground, install
505 inferior's terminal modes. */
506 (*current_target.to_terminal_inferior) ();
507 }
508
509 static int
510 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
511 struct target_ops *t)
512 {
513 errno = EIO; /* Can't read/write this location */
514 return 0; /* No bytes handled */
515 }
516
517 static void
518 tcomplain (void)
519 {
520 error (_("You can't do that when your target is `%s'"),
521 current_target.to_shortname);
522 }
523
524 void
525 noprocess (void)
526 {
527 error (_("You can't do that without a process to debug."));
528 }
529
530 static int
531 nosymbol (char *name, CORE_ADDR *addrp)
532 {
533 return 1; /* Symbol does not exist in target env */
534 }
535
536 static void
537 default_terminal_info (char *args, int from_tty)
538 {
539 printf_unfiltered (_("No saved terminal information.\n"));
540 }
541
542 /* A default implementation for the to_get_ada_task_ptid target method.
543
544 This function builds the PTID by using both LWP and TID as part of
545 the PTID lwp and tid elements. The pid used is the pid of the
546 inferior_ptid. */
547
548 static ptid_t
549 default_get_ada_task_ptid (long lwp, long tid)
550 {
551 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
552 }
553
554 /* Go through the target stack from top to bottom, copying over zero
555 entries in current_target, then filling in still empty entries. In
556 effect, we are doing class inheritance through the pushed target
557 vectors.
558
559 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
560 is currently implemented, is that it discards any knowledge of
561 which target an inherited method originally belonged to.
562 Consequently, new new target methods should instead explicitly and
563 locally search the target stack for the target that can handle the
564 request. */
565
566 static void
567 update_current_target (void)
568 {
569 struct target_ops *t;
570
571 /* First, reset current's contents. */
572 memset (&current_target, 0, sizeof (current_target));
573
574 #define INHERIT(FIELD, TARGET) \
575 if (!current_target.FIELD) \
576 current_target.FIELD = (TARGET)->FIELD
577
578 for (t = target_stack; t; t = t->beneath)
579 {
580 INHERIT (to_shortname, t);
581 INHERIT (to_longname, t);
582 INHERIT (to_doc, t);
583 /* Do not inherit to_open. */
584 /* Do not inherit to_close. */
585 /* Do not inherit to_attach. */
586 INHERIT (to_post_attach, t);
587 INHERIT (to_attach_no_wait, t);
588 /* Do not inherit to_detach. */
589 /* Do not inherit to_disconnect. */
590 /* Do not inherit to_resume. */
591 /* Do not inherit to_wait. */
592 /* Do not inherit to_fetch_registers. */
593 /* Do not inherit to_store_registers. */
594 INHERIT (to_prepare_to_store, t);
595 INHERIT (deprecated_xfer_memory, t);
596 INHERIT (to_files_info, t);
597 INHERIT (to_insert_breakpoint, t);
598 INHERIT (to_remove_breakpoint, t);
599 INHERIT (to_can_use_hw_breakpoint, t);
600 INHERIT (to_insert_hw_breakpoint, t);
601 INHERIT (to_remove_hw_breakpoint, t);
602 INHERIT (to_insert_watchpoint, t);
603 INHERIT (to_remove_watchpoint, t);
604 INHERIT (to_stopped_data_address, t);
605 INHERIT (to_have_steppable_watchpoint, t);
606 INHERIT (to_have_continuable_watchpoint, t);
607 INHERIT (to_stopped_by_watchpoint, t);
608 INHERIT (to_watchpoint_addr_within_range, t);
609 INHERIT (to_region_ok_for_hw_watchpoint, t);
610 INHERIT (to_terminal_init, t);
611 INHERIT (to_terminal_inferior, t);
612 INHERIT (to_terminal_ours_for_output, t);
613 INHERIT (to_terminal_ours, t);
614 INHERIT (to_terminal_save_ours, t);
615 INHERIT (to_terminal_info, t);
616 /* Do not inherit to_kill. */
617 INHERIT (to_load, t);
618 INHERIT (to_lookup_symbol, t);
619 /* Do no inherit to_create_inferior. */
620 INHERIT (to_post_startup_inferior, t);
621 INHERIT (to_acknowledge_created_inferior, t);
622 INHERIT (to_insert_fork_catchpoint, t);
623 INHERIT (to_remove_fork_catchpoint, t);
624 INHERIT (to_insert_vfork_catchpoint, t);
625 INHERIT (to_remove_vfork_catchpoint, t);
626 /* Do not inherit to_follow_fork. */
627 INHERIT (to_insert_exec_catchpoint, t);
628 INHERIT (to_remove_exec_catchpoint, t);
629 INHERIT (to_set_syscall_catchpoint, t);
630 INHERIT (to_has_exited, t);
631 /* Do not inherit to_mourn_inferior. */
632 INHERIT (to_can_run, t);
633 INHERIT (to_notice_signals, t);
634 /* Do not inherit to_thread_alive. */
635 /* Do not inherit to_find_new_threads. */
636 /* Do not inherit to_pid_to_str. */
637 INHERIT (to_extra_thread_info, t);
638 INHERIT (to_stop, t);
639 /* Do not inherit to_xfer_partial. */
640 INHERIT (to_rcmd, t);
641 INHERIT (to_pid_to_exec_file, t);
642 INHERIT (to_log_command, t);
643 INHERIT (to_stratum, t);
644 /* Do not inherit to_has_all_memory */
645 /* Do not inherit to_has_memory */
646 /* Do not inherit to_has_stack */
647 /* Do not inherit to_has_registers */
648 /* Do not inherit to_has_execution */
649 INHERIT (to_has_thread_control, t);
650 INHERIT (to_can_async_p, t);
651 INHERIT (to_is_async_p, t);
652 INHERIT (to_async, t);
653 INHERIT (to_async_mask, t);
654 INHERIT (to_find_memory_regions, t);
655 INHERIT (to_make_corefile_notes, t);
656 INHERIT (to_get_bookmark, t);
657 INHERIT (to_goto_bookmark, t);
658 /* Do not inherit to_get_thread_local_address. */
659 INHERIT (to_can_execute_reverse, t);
660 INHERIT (to_thread_architecture, t);
661 /* Do not inherit to_read_description. */
662 INHERIT (to_get_ada_task_ptid, t);
663 /* Do not inherit to_search_memory. */
664 INHERIT (to_supports_multi_process, t);
665 INHERIT (to_trace_init, t);
666 INHERIT (to_download_tracepoint, t);
667 INHERIT (to_download_trace_state_variable, t);
668 INHERIT (to_trace_set_readonly_regions, t);
669 INHERIT (to_trace_start, t);
670 INHERIT (to_get_trace_status, t);
671 INHERIT (to_trace_stop, t);
672 INHERIT (to_trace_find, t);
673 INHERIT (to_get_trace_state_variable_value, t);
674 INHERIT (to_save_trace_data, t);
675 INHERIT (to_upload_tracepoints, t);
676 INHERIT (to_upload_trace_state_variables, t);
677 INHERIT (to_get_raw_trace_data, t);
678 INHERIT (to_set_disconnected_tracing, t);
679 INHERIT (to_set_circular_trace_buffer, t);
680 INHERIT (to_get_tib_address, t);
681 INHERIT (to_set_permissions, t);
682 INHERIT (to_magic, t);
683 /* Do not inherit to_memory_map. */
684 /* Do not inherit to_flash_erase. */
685 /* Do not inherit to_flash_done. */
686 }
687 #undef INHERIT
688
689 /* Clean up a target struct so it no longer has any zero pointers in
690 it. Some entries are defaulted to a method that print an error,
691 others are hard-wired to a standard recursive default. */
692
693 #define de_fault(field, value) \
694 if (!current_target.field) \
695 current_target.field = value
696
697 de_fault (to_open,
698 (void (*) (char *, int))
699 tcomplain);
700 de_fault (to_close,
701 (void (*) (int))
702 target_ignore);
703 de_fault (to_post_attach,
704 (void (*) (int))
705 target_ignore);
706 de_fault (to_prepare_to_store,
707 (void (*) (struct regcache *))
708 noprocess);
709 de_fault (deprecated_xfer_memory,
710 (int (*) (CORE_ADDR, gdb_byte *, int, int, struct mem_attrib *, struct target_ops *))
711 nomemory);
712 de_fault (to_files_info,
713 (void (*) (struct target_ops *))
714 target_ignore);
715 de_fault (to_insert_breakpoint,
716 memory_insert_breakpoint);
717 de_fault (to_remove_breakpoint,
718 memory_remove_breakpoint);
719 de_fault (to_can_use_hw_breakpoint,
720 (int (*) (int, int, int))
721 return_zero);
722 de_fault (to_insert_hw_breakpoint,
723 (int (*) (struct gdbarch *, struct bp_target_info *))
724 return_minus_one);
725 de_fault (to_remove_hw_breakpoint,
726 (int (*) (struct gdbarch *, struct bp_target_info *))
727 return_minus_one);
728 de_fault (to_insert_watchpoint,
729 (int (*) (CORE_ADDR, int, int))
730 return_minus_one);
731 de_fault (to_remove_watchpoint,
732 (int (*) (CORE_ADDR, int, int))
733 return_minus_one);
734 de_fault (to_stopped_by_watchpoint,
735 (int (*) (void))
736 return_zero);
737 de_fault (to_stopped_data_address,
738 (int (*) (struct target_ops *, CORE_ADDR *))
739 return_zero);
740 de_fault (to_watchpoint_addr_within_range,
741 default_watchpoint_addr_within_range);
742 de_fault (to_region_ok_for_hw_watchpoint,
743 default_region_ok_for_hw_watchpoint);
744 de_fault (to_terminal_init,
745 (void (*) (void))
746 target_ignore);
747 de_fault (to_terminal_inferior,
748 (void (*) (void))
749 target_ignore);
750 de_fault (to_terminal_ours_for_output,
751 (void (*) (void))
752 target_ignore);
753 de_fault (to_terminal_ours,
754 (void (*) (void))
755 target_ignore);
756 de_fault (to_terminal_save_ours,
757 (void (*) (void))
758 target_ignore);
759 de_fault (to_terminal_info,
760 default_terminal_info);
761 de_fault (to_load,
762 (void (*) (char *, int))
763 tcomplain);
764 de_fault (to_lookup_symbol,
765 (int (*) (char *, CORE_ADDR *))
766 nosymbol);
767 de_fault (to_post_startup_inferior,
768 (void (*) (ptid_t))
769 target_ignore);
770 de_fault (to_acknowledge_created_inferior,
771 (void (*) (int))
772 target_ignore);
773 de_fault (to_insert_fork_catchpoint,
774 (void (*) (int))
775 tcomplain);
776 de_fault (to_remove_fork_catchpoint,
777 (int (*) (int))
778 tcomplain);
779 de_fault (to_insert_vfork_catchpoint,
780 (void (*) (int))
781 tcomplain);
782 de_fault (to_remove_vfork_catchpoint,
783 (int (*) (int))
784 tcomplain);
785 de_fault (to_insert_exec_catchpoint,
786 (void (*) (int))
787 tcomplain);
788 de_fault (to_remove_exec_catchpoint,
789 (int (*) (int))
790 tcomplain);
791 de_fault (to_set_syscall_catchpoint,
792 (int (*) (int, int, int, int, int *))
793 tcomplain);
794 de_fault (to_has_exited,
795 (int (*) (int, int, int *))
796 return_zero);
797 de_fault (to_can_run,
798 return_zero);
799 de_fault (to_notice_signals,
800 (void (*) (ptid_t))
801 target_ignore);
802 de_fault (to_extra_thread_info,
803 (char *(*) (struct thread_info *))
804 return_zero);
805 de_fault (to_stop,
806 (void (*) (ptid_t))
807 target_ignore);
808 current_target.to_xfer_partial = current_xfer_partial;
809 de_fault (to_rcmd,
810 (void (*) (char *, struct ui_file *))
811 tcomplain);
812 de_fault (to_pid_to_exec_file,
813 (char *(*) (int))
814 return_zero);
815 de_fault (to_async,
816 (void (*) (void (*) (enum inferior_event_type, void*), void*))
817 tcomplain);
818 de_fault (to_async_mask,
819 (int (*) (int))
820 return_one);
821 de_fault (to_thread_architecture,
822 default_thread_architecture);
823 current_target.to_read_description = NULL;
824 de_fault (to_get_ada_task_ptid,
825 (ptid_t (*) (long, long))
826 default_get_ada_task_ptid);
827 de_fault (to_supports_multi_process,
828 (int (*) (void))
829 return_zero);
830 de_fault (to_trace_init,
831 (void (*) (void))
832 tcomplain);
833 de_fault (to_download_tracepoint,
834 (void (*) (struct breakpoint *))
835 tcomplain);
836 de_fault (to_download_trace_state_variable,
837 (void (*) (struct trace_state_variable *))
838 tcomplain);
839 de_fault (to_trace_set_readonly_regions,
840 (void (*) (void))
841 tcomplain);
842 de_fault (to_trace_start,
843 (void (*) (void))
844 tcomplain);
845 de_fault (to_get_trace_status,
846 (int (*) (struct trace_status *))
847 return_minus_one);
848 de_fault (to_trace_stop,
849 (void (*) (void))
850 tcomplain);
851 de_fault (to_trace_find,
852 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
853 return_minus_one);
854 de_fault (to_get_trace_state_variable_value,
855 (int (*) (int, LONGEST *))
856 return_zero);
857 de_fault (to_save_trace_data,
858 (int (*) (const char *))
859 tcomplain);
860 de_fault (to_upload_tracepoints,
861 (int (*) (struct uploaded_tp **))
862 return_zero);
863 de_fault (to_upload_trace_state_variables,
864 (int (*) (struct uploaded_tsv **))
865 return_zero);
866 de_fault (to_get_raw_trace_data,
867 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
868 tcomplain);
869 de_fault (to_set_disconnected_tracing,
870 (void (*) (int))
871 target_ignore);
872 de_fault (to_set_circular_trace_buffer,
873 (void (*) (int))
874 target_ignore);
875 de_fault (to_get_tib_address,
876 (int (*) (ptid_t, CORE_ADDR *))
877 tcomplain);
878 de_fault (to_set_permissions,
879 (void (*) (void))
880 target_ignore);
881 #undef de_fault
882
883 /* Finally, position the target-stack beneath the squashed
884 "current_target". That way code looking for a non-inherited
885 target method can quickly and simply find it. */
886 current_target.beneath = target_stack;
887
888 if (targetdebug)
889 setup_target_debug ();
890 }
891
892 /* Push a new target type into the stack of the existing target accessors,
893 possibly superseding some of the existing accessors.
894
895 Rather than allow an empty stack, we always have the dummy target at
896 the bottom stratum, so we can call the function vectors without
897 checking them. */
898
899 void
900 push_target (struct target_ops *t)
901 {
902 struct target_ops **cur;
903
904 /* Check magic number. If wrong, it probably means someone changed
905 the struct definition, but not all the places that initialize one. */
906 if (t->to_magic != OPS_MAGIC)
907 {
908 fprintf_unfiltered (gdb_stderr,
909 "Magic number of %s target struct wrong\n",
910 t->to_shortname);
911 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
912 }
913
914 /* Find the proper stratum to install this target in. */
915 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
916 {
917 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
918 break;
919 }
920
921 /* If there's already targets at this stratum, remove them. */
922 /* FIXME: cagney/2003-10-15: I think this should be popping all
923 targets to CUR, and not just those at this stratum level. */
924 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
925 {
926 /* There's already something at this stratum level. Close it,
927 and un-hook it from the stack. */
928 struct target_ops *tmp = (*cur);
929
930 (*cur) = (*cur)->beneath;
931 tmp->beneath = NULL;
932 target_close (tmp, 0);
933 }
934
935 /* We have removed all targets in our stratum, now add the new one. */
936 t->beneath = (*cur);
937 (*cur) = t;
938
939 update_current_target ();
940 }
941
942 /* Remove a target_ops vector from the stack, wherever it may be.
943 Return how many times it was removed (0 or 1). */
944
945 int
946 unpush_target (struct target_ops *t)
947 {
948 struct target_ops **cur;
949 struct target_ops *tmp;
950
951 if (t->to_stratum == dummy_stratum)
952 internal_error (__FILE__, __LINE__,
953 "Attempt to unpush the dummy target");
954
955 /* Look for the specified target. Note that we assume that a target
956 can only occur once in the target stack. */
957
958 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
959 {
960 if ((*cur) == t)
961 break;
962 }
963
964 if ((*cur) == NULL)
965 return 0; /* Didn't find target_ops, quit now */
966
967 /* NOTE: cagney/2003-12-06: In '94 the close call was made
968 unconditional by moving it to before the above check that the
969 target was in the target stack (something about "Change the way
970 pushing and popping of targets work to support target overlays
971 and inheritance"). This doesn't make much sense - only open
972 targets should be closed. */
973 target_close (t, 0);
974
975 /* Unchain the target */
976 tmp = (*cur);
977 (*cur) = (*cur)->beneath;
978 tmp->beneath = NULL;
979
980 update_current_target ();
981
982 return 1;
983 }
984
985 void
986 pop_target (void)
987 {
988 target_close (target_stack, 0); /* Let it clean up */
989 if (unpush_target (target_stack) == 1)
990 return;
991
992 fprintf_unfiltered (gdb_stderr,
993 "pop_target couldn't find target %s\n",
994 current_target.to_shortname);
995 internal_error (__FILE__, __LINE__,
996 _("failed internal consistency check"));
997 }
998
999 void
1000 pop_all_targets_above (enum strata above_stratum, int quitting)
1001 {
1002 while ((int) (current_target.to_stratum) > (int) above_stratum)
1003 {
1004 target_close (target_stack, quitting);
1005 if (!unpush_target (target_stack))
1006 {
1007 fprintf_unfiltered (gdb_stderr,
1008 "pop_all_targets couldn't find target %s\n",
1009 target_stack->to_shortname);
1010 internal_error (__FILE__, __LINE__,
1011 _("failed internal consistency check"));
1012 break;
1013 }
1014 }
1015 }
1016
1017 void
1018 pop_all_targets (int quitting)
1019 {
1020 pop_all_targets_above (dummy_stratum, quitting);
1021 }
1022
1023 /* Using the objfile specified in OBJFILE, find the address for the
1024 current thread's thread-local storage with offset OFFSET. */
1025 CORE_ADDR
1026 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1027 {
1028 volatile CORE_ADDR addr = 0;
1029 struct target_ops *target;
1030
1031 for (target = current_target.beneath;
1032 target != NULL;
1033 target = target->beneath)
1034 {
1035 if (target->to_get_thread_local_address != NULL)
1036 break;
1037 }
1038
1039 if (target != NULL
1040 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1041 {
1042 ptid_t ptid = inferior_ptid;
1043 volatile struct gdb_exception ex;
1044
1045 TRY_CATCH (ex, RETURN_MASK_ALL)
1046 {
1047 CORE_ADDR lm_addr;
1048
1049 /* Fetch the load module address for this objfile. */
1050 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1051 objfile);
1052 /* If it's 0, throw the appropriate exception. */
1053 if (lm_addr == 0)
1054 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1055 _("TLS load module not found"));
1056
1057 addr = target->to_get_thread_local_address (target, ptid, lm_addr, offset);
1058 }
1059 /* If an error occurred, print TLS related messages here. Otherwise,
1060 throw the error to some higher catcher. */
1061 if (ex.reason < 0)
1062 {
1063 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1064
1065 switch (ex.error)
1066 {
1067 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1068 error (_("Cannot find thread-local variables in this thread library."));
1069 break;
1070 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1071 if (objfile_is_library)
1072 error (_("Cannot find shared library `%s' in dynamic"
1073 " linker's load module list"), objfile->name);
1074 else
1075 error (_("Cannot find executable file `%s' in dynamic"
1076 " linker's load module list"), objfile->name);
1077 break;
1078 case TLS_NOT_ALLOCATED_YET_ERROR:
1079 if (objfile_is_library)
1080 error (_("The inferior has not yet allocated storage for"
1081 " thread-local variables in\n"
1082 "the shared library `%s'\n"
1083 "for %s"),
1084 objfile->name, target_pid_to_str (ptid));
1085 else
1086 error (_("The inferior has not yet allocated storage for"
1087 " thread-local variables in\n"
1088 "the executable `%s'\n"
1089 "for %s"),
1090 objfile->name, target_pid_to_str (ptid));
1091 break;
1092 case TLS_GENERIC_ERROR:
1093 if (objfile_is_library)
1094 error (_("Cannot find thread-local storage for %s, "
1095 "shared library %s:\n%s"),
1096 target_pid_to_str (ptid),
1097 objfile->name, ex.message);
1098 else
1099 error (_("Cannot find thread-local storage for %s, "
1100 "executable file %s:\n%s"),
1101 target_pid_to_str (ptid),
1102 objfile->name, ex.message);
1103 break;
1104 default:
1105 throw_exception (ex);
1106 break;
1107 }
1108 }
1109 }
1110 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1111 TLS is an ABI-specific thing. But we don't do that yet. */
1112 else
1113 error (_("Cannot find thread-local variables on this target"));
1114
1115 return addr;
1116 }
1117
1118 #undef MIN
1119 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1120
1121 /* target_read_string -- read a null terminated string, up to LEN bytes,
1122 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1123 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1124 is responsible for freeing it. Return the number of bytes successfully
1125 read. */
1126
1127 int
1128 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1129 {
1130 int tlen, origlen, offset, i;
1131 gdb_byte buf[4];
1132 int errcode = 0;
1133 char *buffer;
1134 int buffer_allocated;
1135 char *bufptr;
1136 unsigned int nbytes_read = 0;
1137
1138 gdb_assert (string);
1139
1140 /* Small for testing. */
1141 buffer_allocated = 4;
1142 buffer = xmalloc (buffer_allocated);
1143 bufptr = buffer;
1144
1145 origlen = len;
1146
1147 while (len > 0)
1148 {
1149 tlen = MIN (len, 4 - (memaddr & 3));
1150 offset = memaddr & 3;
1151
1152 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1153 if (errcode != 0)
1154 {
1155 /* The transfer request might have crossed the boundary to an
1156 unallocated region of memory. Retry the transfer, requesting
1157 a single byte. */
1158 tlen = 1;
1159 offset = 0;
1160 errcode = target_read_memory (memaddr, buf, 1);
1161 if (errcode != 0)
1162 goto done;
1163 }
1164
1165 if (bufptr - buffer + tlen > buffer_allocated)
1166 {
1167 unsigned int bytes;
1168
1169 bytes = bufptr - buffer;
1170 buffer_allocated *= 2;
1171 buffer = xrealloc (buffer, buffer_allocated);
1172 bufptr = buffer + bytes;
1173 }
1174
1175 for (i = 0; i < tlen; i++)
1176 {
1177 *bufptr++ = buf[i + offset];
1178 if (buf[i + offset] == '\000')
1179 {
1180 nbytes_read += i + 1;
1181 goto done;
1182 }
1183 }
1184
1185 memaddr += tlen;
1186 len -= tlen;
1187 nbytes_read += tlen;
1188 }
1189 done:
1190 *string = buffer;
1191 if (errnop != NULL)
1192 *errnop = errcode;
1193 return nbytes_read;
1194 }
1195
1196 struct target_section_table *
1197 target_get_section_table (struct target_ops *target)
1198 {
1199 struct target_ops *t;
1200
1201 if (targetdebug)
1202 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1203
1204 for (t = target; t != NULL; t = t->beneath)
1205 if (t->to_get_section_table != NULL)
1206 return (*t->to_get_section_table) (t);
1207
1208 return NULL;
1209 }
1210
1211 /* Find a section containing ADDR. */
1212
1213 struct target_section *
1214 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1215 {
1216 struct target_section_table *table = target_get_section_table (target);
1217 struct target_section *secp;
1218
1219 if (table == NULL)
1220 return NULL;
1221
1222 for (secp = table->sections; secp < table->sections_end; secp++)
1223 {
1224 if (addr >= secp->addr && addr < secp->endaddr)
1225 return secp;
1226 }
1227 return NULL;
1228 }
1229
1230 /* Perform a partial memory transfer.
1231 For docs see target.h, to_xfer_partial. */
1232
1233 static LONGEST
1234 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1235 void *readbuf, const void *writebuf, ULONGEST memaddr,
1236 LONGEST len)
1237 {
1238 LONGEST res;
1239 int reg_len;
1240 struct mem_region *region;
1241 struct inferior *inf;
1242
1243 /* Zero length requests are ok and require no work. */
1244 if (len == 0)
1245 return 0;
1246
1247 /* For accesses to unmapped overlay sections, read directly from
1248 files. Must do this first, as MEMADDR may need adjustment. */
1249 if (readbuf != NULL && overlay_debugging)
1250 {
1251 struct obj_section *section = find_pc_overlay (memaddr);
1252
1253 if (pc_in_unmapped_range (memaddr, section))
1254 {
1255 struct target_section_table *table
1256 = target_get_section_table (ops);
1257 const char *section_name = section->the_bfd_section->name;
1258
1259 memaddr = overlay_mapped_address (memaddr, section);
1260 return section_table_xfer_memory_partial (readbuf, writebuf,
1261 memaddr, len,
1262 table->sections,
1263 table->sections_end,
1264 section_name);
1265 }
1266 }
1267
1268 /* Try the executable files, if "trust-readonly-sections" is set. */
1269 if (readbuf != NULL && trust_readonly)
1270 {
1271 struct target_section *secp;
1272 struct target_section_table *table;
1273
1274 secp = target_section_by_addr (ops, memaddr);
1275 if (secp != NULL
1276 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1277 & SEC_READONLY))
1278 {
1279 table = target_get_section_table (ops);
1280 return section_table_xfer_memory_partial (readbuf, writebuf,
1281 memaddr, len,
1282 table->sections,
1283 table->sections_end,
1284 NULL);
1285 }
1286 }
1287
1288 /* Try GDB's internal data cache. */
1289 region = lookup_mem_region (memaddr);
1290 /* region->hi == 0 means there's no upper bound. */
1291 if (memaddr + len < region->hi || region->hi == 0)
1292 reg_len = len;
1293 else
1294 reg_len = region->hi - memaddr;
1295
1296 switch (region->attrib.mode)
1297 {
1298 case MEM_RO:
1299 if (writebuf != NULL)
1300 return -1;
1301 break;
1302
1303 case MEM_WO:
1304 if (readbuf != NULL)
1305 return -1;
1306 break;
1307
1308 case MEM_FLASH:
1309 /* We only support writing to flash during "load" for now. */
1310 if (writebuf != NULL)
1311 error (_("Writing to flash memory forbidden in this context"));
1312 break;
1313
1314 case MEM_NONE:
1315 return -1;
1316 }
1317
1318 if (!ptid_equal (inferior_ptid, null_ptid))
1319 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1320 else
1321 inf = NULL;
1322
1323 if (inf != NULL
1324 /* The dcache reads whole cache lines; that doesn't play well
1325 with reading from a trace buffer, because reading outside of
1326 the collected memory range fails. */
1327 && get_traceframe_number () == -1
1328 && (region->attrib.cache
1329 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1330 {
1331 if (readbuf != NULL)
1332 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1333 reg_len, 0);
1334 else
1335 /* FIXME drow/2006-08-09: If we're going to preserve const
1336 correctness dcache_xfer_memory should take readbuf and
1337 writebuf. */
1338 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1339 (void *) writebuf,
1340 reg_len, 1);
1341 if (res <= 0)
1342 return -1;
1343 else
1344 {
1345 if (readbuf && !show_memory_breakpoints)
1346 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1347 return res;
1348 }
1349 }
1350
1351 /* If none of those methods found the memory we wanted, fall back
1352 to a target partial transfer. Normally a single call to
1353 to_xfer_partial is enough; if it doesn't recognize an object
1354 it will call the to_xfer_partial of the next target down.
1355 But for memory this won't do. Memory is the only target
1356 object which can be read from more than one valid target.
1357 A core file, for instance, could have some of memory but
1358 delegate other bits to the target below it. So, we must
1359 manually try all targets. */
1360
1361 do
1362 {
1363 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1364 readbuf, writebuf, memaddr, reg_len);
1365 if (res > 0)
1366 break;
1367
1368 /* We want to continue past core files to executables, but not
1369 past a running target's memory. */
1370 if (ops->to_has_all_memory (ops))
1371 break;
1372
1373 ops = ops->beneath;
1374 }
1375 while (ops != NULL);
1376
1377 if (readbuf && !show_memory_breakpoints)
1378 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1379
1380 /* Make sure the cache gets updated no matter what - if we are writing
1381 to the stack. Even if this write is not tagged as such, we still need
1382 to update the cache. */
1383
1384 if (res > 0
1385 && inf != NULL
1386 && writebuf != NULL
1387 && !region->attrib.cache
1388 && stack_cache_enabled_p
1389 && object != TARGET_OBJECT_STACK_MEMORY)
1390 {
1391 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1392 }
1393
1394 /* If we still haven't got anything, return the last error. We
1395 give up. */
1396 return res;
1397 }
1398
1399 static void
1400 restore_show_memory_breakpoints (void *arg)
1401 {
1402 show_memory_breakpoints = (uintptr_t) arg;
1403 }
1404
1405 struct cleanup *
1406 make_show_memory_breakpoints_cleanup (int show)
1407 {
1408 int current = show_memory_breakpoints;
1409
1410 show_memory_breakpoints = show;
1411 return make_cleanup (restore_show_memory_breakpoints,
1412 (void *) (uintptr_t) current);
1413 }
1414
1415 /* For docs see target.h, to_xfer_partial. */
1416
1417 static LONGEST
1418 target_xfer_partial (struct target_ops *ops,
1419 enum target_object object, const char *annex,
1420 void *readbuf, const void *writebuf,
1421 ULONGEST offset, LONGEST len)
1422 {
1423 LONGEST retval;
1424
1425 gdb_assert (ops->to_xfer_partial != NULL);
1426
1427 if (writebuf && !may_write_memory)
1428 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1429 core_addr_to_string_nz (offset), plongest (len));
1430
1431 /* If this is a memory transfer, let the memory-specific code
1432 have a look at it instead. Memory transfers are more
1433 complicated. */
1434 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1435 retval = memory_xfer_partial (ops, object, readbuf,
1436 writebuf, offset, len);
1437 else
1438 {
1439 enum target_object raw_object = object;
1440
1441 /* If this is a raw memory transfer, request the normal
1442 memory object from other layers. */
1443 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1444 raw_object = TARGET_OBJECT_MEMORY;
1445
1446 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1447 writebuf, offset, len);
1448 }
1449
1450 if (targetdebug)
1451 {
1452 const unsigned char *myaddr = NULL;
1453
1454 fprintf_unfiltered (gdb_stdlog,
1455 "%s:target_xfer_partial (%d, %s, %s, %s, %s, %s) = %s",
1456 ops->to_shortname,
1457 (int) object,
1458 (annex ? annex : "(null)"),
1459 host_address_to_string (readbuf),
1460 host_address_to_string (writebuf),
1461 core_addr_to_string_nz (offset),
1462 plongest (len), plongest (retval));
1463
1464 if (readbuf)
1465 myaddr = readbuf;
1466 if (writebuf)
1467 myaddr = writebuf;
1468 if (retval > 0 && myaddr != NULL)
1469 {
1470 int i;
1471
1472 fputs_unfiltered (", bytes =", gdb_stdlog);
1473 for (i = 0; i < retval; i++)
1474 {
1475 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1476 {
1477 if (targetdebug < 2 && i > 0)
1478 {
1479 fprintf_unfiltered (gdb_stdlog, " ...");
1480 break;
1481 }
1482 fprintf_unfiltered (gdb_stdlog, "\n");
1483 }
1484
1485 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1486 }
1487 }
1488
1489 fputc_unfiltered ('\n', gdb_stdlog);
1490 }
1491 return retval;
1492 }
1493
1494 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1495 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1496 if any error occurs.
1497
1498 If an error occurs, no guarantee is made about the contents of the data at
1499 MYADDR. In particular, the caller should not depend upon partial reads
1500 filling the buffer with good data. There is no way for the caller to know
1501 how much good data might have been transfered anyway. Callers that can
1502 deal with partial reads should call target_read (which will retry until
1503 it makes no progress, and then return how much was transferred). */
1504
1505 int
1506 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1507 {
1508 /* Dispatch to the topmost target, not the flattened current_target.
1509 Memory accesses check target->to_has_(all_)memory, and the
1510 flattened target doesn't inherit those. */
1511 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1512 myaddr, memaddr, len) == len)
1513 return 0;
1514 else
1515 return EIO;
1516 }
1517
1518 /* Like target_read_memory, but specify explicitly that this is a read from
1519 the target's stack. This may trigger different cache behavior. */
1520
1521 int
1522 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1523 {
1524 /* Dispatch to the topmost target, not the flattened current_target.
1525 Memory accesses check target->to_has_(all_)memory, and the
1526 flattened target doesn't inherit those. */
1527
1528 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1529 myaddr, memaddr, len) == len)
1530 return 0;
1531 else
1532 return EIO;
1533 }
1534
1535 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1536 Returns either 0 for success or an errno value if any error occurs.
1537 If an error occurs, no guarantee is made about how much data got written.
1538 Callers that can deal with partial writes should call target_write. */
1539
1540 int
1541 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1542 {
1543 /* Dispatch to the topmost target, not the flattened current_target.
1544 Memory accesses check target->to_has_(all_)memory, and the
1545 flattened target doesn't inherit those. */
1546 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1547 myaddr, memaddr, len) == len)
1548 return 0;
1549 else
1550 return EIO;
1551 }
1552
1553 /* Fetch the target's memory map. */
1554
1555 VEC(mem_region_s) *
1556 target_memory_map (void)
1557 {
1558 VEC(mem_region_s) *result;
1559 struct mem_region *last_one, *this_one;
1560 int ix;
1561 struct target_ops *t;
1562
1563 if (targetdebug)
1564 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1565
1566 for (t = current_target.beneath; t != NULL; t = t->beneath)
1567 if (t->to_memory_map != NULL)
1568 break;
1569
1570 if (t == NULL)
1571 return NULL;
1572
1573 result = t->to_memory_map (t);
1574 if (result == NULL)
1575 return NULL;
1576
1577 qsort (VEC_address (mem_region_s, result),
1578 VEC_length (mem_region_s, result),
1579 sizeof (struct mem_region), mem_region_cmp);
1580
1581 /* Check that regions do not overlap. Simultaneously assign
1582 a numbering for the "mem" commands to use to refer to
1583 each region. */
1584 last_one = NULL;
1585 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1586 {
1587 this_one->number = ix;
1588
1589 if (last_one && last_one->hi > this_one->lo)
1590 {
1591 warning (_("Overlapping regions in memory map: ignoring"));
1592 VEC_free (mem_region_s, result);
1593 return NULL;
1594 }
1595 last_one = this_one;
1596 }
1597
1598 return result;
1599 }
1600
1601 void
1602 target_flash_erase (ULONGEST address, LONGEST length)
1603 {
1604 struct target_ops *t;
1605
1606 for (t = current_target.beneath; t != NULL; t = t->beneath)
1607 if (t->to_flash_erase != NULL)
1608 {
1609 if (targetdebug)
1610 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1611 hex_string (address), phex (length, 0));
1612 t->to_flash_erase (t, address, length);
1613 return;
1614 }
1615
1616 tcomplain ();
1617 }
1618
1619 void
1620 target_flash_done (void)
1621 {
1622 struct target_ops *t;
1623
1624 for (t = current_target.beneath; t != NULL; t = t->beneath)
1625 if (t->to_flash_done != NULL)
1626 {
1627 if (targetdebug)
1628 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1629 t->to_flash_done (t);
1630 return;
1631 }
1632
1633 tcomplain ();
1634 }
1635
1636 static void
1637 show_trust_readonly (struct ui_file *file, int from_tty,
1638 struct cmd_list_element *c, const char *value)
1639 {
1640 fprintf_filtered (file, _("\
1641 Mode for reading from readonly sections is %s.\n"),
1642 value);
1643 }
1644
1645 /* More generic transfers. */
1646
1647 static LONGEST
1648 default_xfer_partial (struct target_ops *ops, enum target_object object,
1649 const char *annex, gdb_byte *readbuf,
1650 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1651 {
1652 if (object == TARGET_OBJECT_MEMORY
1653 && ops->deprecated_xfer_memory != NULL)
1654 /* If available, fall back to the target's
1655 "deprecated_xfer_memory" method. */
1656 {
1657 int xfered = -1;
1658
1659 errno = 0;
1660 if (writebuf != NULL)
1661 {
1662 void *buffer = xmalloc (len);
1663 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1664
1665 memcpy (buffer, writebuf, len);
1666 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1667 1/*write*/, NULL, ops);
1668 do_cleanups (cleanup);
1669 }
1670 if (readbuf != NULL)
1671 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1672 0/*read*/, NULL, ops);
1673 if (xfered > 0)
1674 return xfered;
1675 else if (xfered == 0 && errno == 0)
1676 /* "deprecated_xfer_memory" uses 0, cross checked against
1677 ERRNO as one indication of an error. */
1678 return 0;
1679 else
1680 return -1;
1681 }
1682 else if (ops->beneath != NULL)
1683 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1684 readbuf, writebuf, offset, len);
1685 else
1686 return -1;
1687 }
1688
1689 /* The xfer_partial handler for the topmost target. Unlike the default,
1690 it does not need to handle memory specially; it just passes all
1691 requests down the stack. */
1692
1693 static LONGEST
1694 current_xfer_partial (struct target_ops *ops, enum target_object object,
1695 const char *annex, gdb_byte *readbuf,
1696 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1697 {
1698 if (ops->beneath != NULL)
1699 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1700 readbuf, writebuf, offset, len);
1701 else
1702 return -1;
1703 }
1704
1705 /* Target vector read/write partial wrapper functions. */
1706
1707 static LONGEST
1708 target_read_partial (struct target_ops *ops,
1709 enum target_object object,
1710 const char *annex, gdb_byte *buf,
1711 ULONGEST offset, LONGEST len)
1712 {
1713 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1714 }
1715
1716 static LONGEST
1717 target_write_partial (struct target_ops *ops,
1718 enum target_object object,
1719 const char *annex, const gdb_byte *buf,
1720 ULONGEST offset, LONGEST len)
1721 {
1722 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1723 }
1724
1725 /* Wrappers to perform the full transfer. */
1726
1727 /* For docs on target_read see target.h. */
1728
1729 LONGEST
1730 target_read (struct target_ops *ops,
1731 enum target_object object,
1732 const char *annex, gdb_byte *buf,
1733 ULONGEST offset, LONGEST len)
1734 {
1735 LONGEST xfered = 0;
1736
1737 while (xfered < len)
1738 {
1739 LONGEST xfer = target_read_partial (ops, object, annex,
1740 (gdb_byte *) buf + xfered,
1741 offset + xfered, len - xfered);
1742
1743 /* Call an observer, notifying them of the xfer progress? */
1744 if (xfer == 0)
1745 return xfered;
1746 if (xfer < 0)
1747 return -1;
1748 xfered += xfer;
1749 QUIT;
1750 }
1751 return len;
1752 }
1753
1754 LONGEST
1755 target_read_until_error (struct target_ops *ops,
1756 enum target_object object,
1757 const char *annex, gdb_byte *buf,
1758 ULONGEST offset, LONGEST len)
1759 {
1760 LONGEST xfered = 0;
1761
1762 while (xfered < len)
1763 {
1764 LONGEST xfer = target_read_partial (ops, object, annex,
1765 (gdb_byte *) buf + xfered,
1766 offset + xfered, len - xfered);
1767
1768 /* Call an observer, notifying them of the xfer progress? */
1769 if (xfer == 0)
1770 return xfered;
1771 if (xfer < 0)
1772 {
1773 /* We've got an error. Try to read in smaller blocks. */
1774 ULONGEST start = offset + xfered;
1775 ULONGEST remaining = len - xfered;
1776 ULONGEST half;
1777
1778 /* If an attempt was made to read a random memory address,
1779 it's likely that the very first byte is not accessible.
1780 Try reading the first byte, to avoid doing log N tries
1781 below. */
1782 xfer = target_read_partial (ops, object, annex,
1783 (gdb_byte *) buf + xfered, start, 1);
1784 if (xfer <= 0)
1785 return xfered;
1786 start += 1;
1787 remaining -= 1;
1788 half = remaining/2;
1789
1790 while (half > 0)
1791 {
1792 xfer = target_read_partial (ops, object, annex,
1793 (gdb_byte *) buf + xfered,
1794 start, half);
1795 if (xfer == 0)
1796 return xfered;
1797 if (xfer < 0)
1798 {
1799 remaining = half;
1800 }
1801 else
1802 {
1803 /* We have successfully read the first half. So, the
1804 error must be in the second half. Adjust start and
1805 remaining to point at the second half. */
1806 xfered += xfer;
1807 start += xfer;
1808 remaining -= xfer;
1809 }
1810 half = remaining/2;
1811 }
1812
1813 return xfered;
1814 }
1815 xfered += xfer;
1816 QUIT;
1817 }
1818 return len;
1819 }
1820
1821 /* An alternative to target_write with progress callbacks. */
1822
1823 LONGEST
1824 target_write_with_progress (struct target_ops *ops,
1825 enum target_object object,
1826 const char *annex, const gdb_byte *buf,
1827 ULONGEST offset, LONGEST len,
1828 void (*progress) (ULONGEST, void *), void *baton)
1829 {
1830 LONGEST xfered = 0;
1831
1832 /* Give the progress callback a chance to set up. */
1833 if (progress)
1834 (*progress) (0, baton);
1835
1836 while (xfered < len)
1837 {
1838 LONGEST xfer = target_write_partial (ops, object, annex,
1839 (gdb_byte *) buf + xfered,
1840 offset + xfered, len - xfered);
1841
1842 if (xfer == 0)
1843 return xfered;
1844 if (xfer < 0)
1845 return -1;
1846
1847 if (progress)
1848 (*progress) (xfer, baton);
1849
1850 xfered += xfer;
1851 QUIT;
1852 }
1853 return len;
1854 }
1855
1856 /* For docs on target_write see target.h. */
1857
1858 LONGEST
1859 target_write (struct target_ops *ops,
1860 enum target_object object,
1861 const char *annex, const gdb_byte *buf,
1862 ULONGEST offset, LONGEST len)
1863 {
1864 return target_write_with_progress (ops, object, annex, buf, offset, len,
1865 NULL, NULL);
1866 }
1867
1868 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1869 the size of the transferred data. PADDING additional bytes are
1870 available in *BUF_P. This is a helper function for
1871 target_read_alloc; see the declaration of that function for more
1872 information. */
1873
1874 static LONGEST
1875 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1876 const char *annex, gdb_byte **buf_p, int padding)
1877 {
1878 size_t buf_alloc, buf_pos;
1879 gdb_byte *buf;
1880 LONGEST n;
1881
1882 /* This function does not have a length parameter; it reads the
1883 entire OBJECT). Also, it doesn't support objects fetched partly
1884 from one target and partly from another (in a different stratum,
1885 e.g. a core file and an executable). Both reasons make it
1886 unsuitable for reading memory. */
1887 gdb_assert (object != TARGET_OBJECT_MEMORY);
1888
1889 /* Start by reading up to 4K at a time. The target will throttle
1890 this number down if necessary. */
1891 buf_alloc = 4096;
1892 buf = xmalloc (buf_alloc);
1893 buf_pos = 0;
1894 while (1)
1895 {
1896 n = target_read_partial (ops, object, annex, &buf[buf_pos],
1897 buf_pos, buf_alloc - buf_pos - padding);
1898 if (n < 0)
1899 {
1900 /* An error occurred. */
1901 xfree (buf);
1902 return -1;
1903 }
1904 else if (n == 0)
1905 {
1906 /* Read all there was. */
1907 if (buf_pos == 0)
1908 xfree (buf);
1909 else
1910 *buf_p = buf;
1911 return buf_pos;
1912 }
1913
1914 buf_pos += n;
1915
1916 /* If the buffer is filling up, expand it. */
1917 if (buf_alloc < buf_pos * 2)
1918 {
1919 buf_alloc *= 2;
1920 buf = xrealloc (buf, buf_alloc);
1921 }
1922
1923 QUIT;
1924 }
1925 }
1926
1927 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1928 the size of the transferred data. See the declaration in "target.h"
1929 function for more information about the return value. */
1930
1931 LONGEST
1932 target_read_alloc (struct target_ops *ops, enum target_object object,
1933 const char *annex, gdb_byte **buf_p)
1934 {
1935 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1936 }
1937
1938 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1939 returned as a string, allocated using xmalloc. If an error occurs
1940 or the transfer is unsupported, NULL is returned. Empty objects
1941 are returned as allocated but empty strings. A warning is issued
1942 if the result contains any embedded NUL bytes. */
1943
1944 char *
1945 target_read_stralloc (struct target_ops *ops, enum target_object object,
1946 const char *annex)
1947 {
1948 gdb_byte *buffer;
1949 LONGEST transferred;
1950
1951 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1952
1953 if (transferred < 0)
1954 return NULL;
1955
1956 if (transferred == 0)
1957 return xstrdup ("");
1958
1959 buffer[transferred] = 0;
1960 if (strlen (buffer) < transferred)
1961 warning (_("target object %d, annex %s, "
1962 "contained unexpected null characters"),
1963 (int) object, annex ? annex : "(none)");
1964
1965 return (char *) buffer;
1966 }
1967
1968 /* Memory transfer methods. */
1969
1970 void
1971 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1972 LONGEST len)
1973 {
1974 /* This method is used to read from an alternate, non-current
1975 target. This read must bypass the overlay support (as symbols
1976 don't match this target), and GDB's internal cache (wrong cache
1977 for this target). */
1978 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1979 != len)
1980 memory_error (EIO, addr);
1981 }
1982
1983 ULONGEST
1984 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1985 int len, enum bfd_endian byte_order)
1986 {
1987 gdb_byte buf[sizeof (ULONGEST)];
1988
1989 gdb_assert (len <= sizeof (buf));
1990 get_target_memory (ops, addr, buf, len);
1991 return extract_unsigned_integer (buf, len, byte_order);
1992 }
1993
1994 int
1995 target_insert_breakpoint (struct gdbarch *gdbarch,
1996 struct bp_target_info *bp_tgt)
1997 {
1998 if (!may_insert_breakpoints)
1999 {
2000 warning (_("May not insert breakpoints"));
2001 return 1;
2002 }
2003
2004 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2005 }
2006
2007 int
2008 target_remove_breakpoint (struct gdbarch *gdbarch,
2009 struct bp_target_info *bp_tgt)
2010 {
2011 /* This is kind of a weird case to handle, but the permission might
2012 have been changed after breakpoints were inserted - in which case
2013 we should just take the user literally and assume that any
2014 breakpoints should be left in place. */
2015 if (!may_insert_breakpoints)
2016 {
2017 warning (_("May not remove breakpoints"));
2018 return 1;
2019 }
2020
2021 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2022 }
2023
2024 static void
2025 target_info (char *args, int from_tty)
2026 {
2027 struct target_ops *t;
2028 int has_all_mem = 0;
2029
2030 if (symfile_objfile != NULL)
2031 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2032
2033 for (t = target_stack; t != NULL; t = t->beneath)
2034 {
2035 if (!(*t->to_has_memory) (t))
2036 continue;
2037
2038 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2039 continue;
2040 if (has_all_mem)
2041 printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
2042 printf_unfiltered ("%s:\n", t->to_longname);
2043 (t->to_files_info) (t);
2044 has_all_mem = (*t->to_has_all_memory) (t);
2045 }
2046 }
2047
2048 /* This function is called before any new inferior is created, e.g.
2049 by running a program, attaching, or connecting to a target.
2050 It cleans up any state from previous invocations which might
2051 change between runs. This is a subset of what target_preopen
2052 resets (things which might change between targets). */
2053
2054 void
2055 target_pre_inferior (int from_tty)
2056 {
2057 /* Clear out solib state. Otherwise the solib state of the previous
2058 inferior might have survived and is entirely wrong for the new
2059 target. This has been observed on GNU/Linux using glibc 2.3. How
2060 to reproduce:
2061
2062 bash$ ./foo&
2063 [1] 4711
2064 bash$ ./foo&
2065 [1] 4712
2066 bash$ gdb ./foo
2067 [...]
2068 (gdb) attach 4711
2069 (gdb) detach
2070 (gdb) attach 4712
2071 Cannot access memory at address 0xdeadbeef
2072 */
2073
2074 /* In some OSs, the shared library list is the same/global/shared
2075 across inferiors. If code is shared between processes, so are
2076 memory regions and features. */
2077 if (!gdbarch_has_global_solist (target_gdbarch))
2078 {
2079 no_shared_libraries (NULL, from_tty);
2080
2081 invalidate_target_mem_regions ();
2082
2083 target_clear_description ();
2084 }
2085 }
2086
2087 /* Callback for iterate_over_inferiors. Gets rid of the given
2088 inferior. */
2089
2090 static int
2091 dispose_inferior (struct inferior *inf, void *args)
2092 {
2093 struct thread_info *thread;
2094
2095 thread = any_thread_of_process (inf->pid);
2096 if (thread)
2097 {
2098 switch_to_thread (thread->ptid);
2099
2100 /* Core inferiors actually should be detached, not killed. */
2101 if (target_has_execution)
2102 target_kill ();
2103 else
2104 target_detach (NULL, 0);
2105 }
2106
2107 return 0;
2108 }
2109
2110 /* This is to be called by the open routine before it does
2111 anything. */
2112
2113 void
2114 target_preopen (int from_tty)
2115 {
2116 dont_repeat ();
2117
2118 if (have_inferiors ())
2119 {
2120 if (!from_tty
2121 || !have_live_inferiors ()
2122 || query (_("A program is being debugged already. Kill it? ")))
2123 iterate_over_inferiors (dispose_inferior, NULL);
2124 else
2125 error (_("Program not killed."));
2126 }
2127
2128 /* Calling target_kill may remove the target from the stack. But if
2129 it doesn't (which seems like a win for UDI), remove it now. */
2130 /* Leave the exec target, though. The user may be switching from a
2131 live process to a core of the same program. */
2132 pop_all_targets_above (file_stratum, 0);
2133
2134 target_pre_inferior (from_tty);
2135 }
2136
2137 /* Detach a target after doing deferred register stores. */
2138
2139 void
2140 target_detach (char *args, int from_tty)
2141 {
2142 struct target_ops* t;
2143
2144 if (gdbarch_has_global_breakpoints (target_gdbarch))
2145 /* Don't remove global breakpoints here. They're removed on
2146 disconnection from the target. */
2147 ;
2148 else
2149 /* If we're in breakpoints-always-inserted mode, have to remove
2150 them before detaching. */
2151 remove_breakpoints_pid (PIDGET (inferior_ptid));
2152
2153 prepare_for_detach ();
2154
2155 for (t = current_target.beneath; t != NULL; t = t->beneath)
2156 {
2157 if (t->to_detach != NULL)
2158 {
2159 t->to_detach (t, args, from_tty);
2160 if (targetdebug)
2161 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2162 args, from_tty);
2163 return;
2164 }
2165 }
2166
2167 internal_error (__FILE__, __LINE__, "could not find a target to detach");
2168 }
2169
2170 void
2171 target_disconnect (char *args, int from_tty)
2172 {
2173 struct target_ops *t;
2174
2175 /* If we're in breakpoints-always-inserted mode or if breakpoints
2176 are global across processes, we have to remove them before
2177 disconnecting. */
2178 remove_breakpoints ();
2179
2180 for (t = current_target.beneath; t != NULL; t = t->beneath)
2181 if (t->to_disconnect != NULL)
2182 {
2183 if (targetdebug)
2184 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2185 args, from_tty);
2186 t->to_disconnect (t, args, from_tty);
2187 return;
2188 }
2189
2190 tcomplain ();
2191 }
2192
2193 ptid_t
2194 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2195 {
2196 struct target_ops *t;
2197
2198 for (t = current_target.beneath; t != NULL; t = t->beneath)
2199 {
2200 if (t->to_wait != NULL)
2201 {
2202 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2203
2204 if (targetdebug)
2205 {
2206 char *status_string;
2207
2208 status_string = target_waitstatus_to_string (status);
2209 fprintf_unfiltered (gdb_stdlog,
2210 "target_wait (%d, status) = %d, %s\n",
2211 PIDGET (ptid), PIDGET (retval),
2212 status_string);
2213 xfree (status_string);
2214 }
2215
2216 return retval;
2217 }
2218 }
2219
2220 noprocess ();
2221 }
2222
2223 char *
2224 target_pid_to_str (ptid_t ptid)
2225 {
2226 struct target_ops *t;
2227
2228 for (t = current_target.beneath; t != NULL; t = t->beneath)
2229 {
2230 if (t->to_pid_to_str != NULL)
2231 return (*t->to_pid_to_str) (t, ptid);
2232 }
2233
2234 return normal_pid_to_str (ptid);
2235 }
2236
2237 void
2238 target_resume (ptid_t ptid, int step, enum target_signal signal)
2239 {
2240 struct target_ops *t;
2241
2242 target_dcache_invalidate ();
2243
2244 for (t = current_target.beneath; t != NULL; t = t->beneath)
2245 {
2246 if (t->to_resume != NULL)
2247 {
2248 t->to_resume (t, ptid, step, signal);
2249 if (targetdebug)
2250 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2251 PIDGET (ptid),
2252 step ? "step" : "continue",
2253 target_signal_to_name (signal));
2254
2255 registers_changed_ptid (ptid);
2256 set_executing (ptid, 1);
2257 set_running (ptid, 1);
2258 clear_inline_frame_state (ptid);
2259 return;
2260 }
2261 }
2262
2263 noprocess ();
2264 }
2265 /* Look through the list of possible targets for a target that can
2266 follow forks. */
2267
2268 int
2269 target_follow_fork (int follow_child)
2270 {
2271 struct target_ops *t;
2272
2273 for (t = current_target.beneath; t != NULL; t = t->beneath)
2274 {
2275 if (t->to_follow_fork != NULL)
2276 {
2277 int retval = t->to_follow_fork (t, follow_child);
2278
2279 if (targetdebug)
2280 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2281 follow_child, retval);
2282 return retval;
2283 }
2284 }
2285
2286 /* Some target returned a fork event, but did not know how to follow it. */
2287 internal_error (__FILE__, __LINE__,
2288 "could not find a target to follow fork");
2289 }
2290
2291 void
2292 target_mourn_inferior (void)
2293 {
2294 struct target_ops *t;
2295
2296 for (t = current_target.beneath; t != NULL; t = t->beneath)
2297 {
2298 if (t->to_mourn_inferior != NULL)
2299 {
2300 t->to_mourn_inferior (t);
2301 if (targetdebug)
2302 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2303
2304 /* We no longer need to keep handles on any of the object files.
2305 Make sure to release them to avoid unnecessarily locking any
2306 of them while we're not actually debugging. */
2307 bfd_cache_close_all ();
2308
2309 return;
2310 }
2311 }
2312
2313 internal_error (__FILE__, __LINE__,
2314 "could not find a target to follow mourn inferior");
2315 }
2316
2317 /* Look for a target which can describe architectural features, starting
2318 from TARGET. If we find one, return its description. */
2319
2320 const struct target_desc *
2321 target_read_description (struct target_ops *target)
2322 {
2323 struct target_ops *t;
2324
2325 for (t = target; t != NULL; t = t->beneath)
2326 if (t->to_read_description != NULL)
2327 {
2328 const struct target_desc *tdesc;
2329
2330 tdesc = t->to_read_description (t);
2331 if (tdesc)
2332 return tdesc;
2333 }
2334
2335 return NULL;
2336 }
2337
2338 /* The default implementation of to_search_memory.
2339 This implements a basic search of memory, reading target memory and
2340 performing the search here (as opposed to performing the search in on the
2341 target side with, for example, gdbserver). */
2342
2343 int
2344 simple_search_memory (struct target_ops *ops,
2345 CORE_ADDR start_addr, ULONGEST search_space_len,
2346 const gdb_byte *pattern, ULONGEST pattern_len,
2347 CORE_ADDR *found_addrp)
2348 {
2349 /* NOTE: also defined in find.c testcase. */
2350 #define SEARCH_CHUNK_SIZE 16000
2351 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2352 /* Buffer to hold memory contents for searching. */
2353 gdb_byte *search_buf;
2354 unsigned search_buf_size;
2355 struct cleanup *old_cleanups;
2356
2357 search_buf_size = chunk_size + pattern_len - 1;
2358
2359 /* No point in trying to allocate a buffer larger than the search space. */
2360 if (search_space_len < search_buf_size)
2361 search_buf_size = search_space_len;
2362
2363 search_buf = malloc (search_buf_size);
2364 if (search_buf == NULL)
2365 error (_("Unable to allocate memory to perform the search."));
2366 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2367
2368 /* Prime the search buffer. */
2369
2370 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2371 search_buf, start_addr, search_buf_size) != search_buf_size)
2372 {
2373 warning (_("Unable to access target memory at %s, halting search."),
2374 hex_string (start_addr));
2375 do_cleanups (old_cleanups);
2376 return -1;
2377 }
2378
2379 /* Perform the search.
2380
2381 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2382 When we've scanned N bytes we copy the trailing bytes to the start and
2383 read in another N bytes. */
2384
2385 while (search_space_len >= pattern_len)
2386 {
2387 gdb_byte *found_ptr;
2388 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2389
2390 found_ptr = memmem (search_buf, nr_search_bytes,
2391 pattern, pattern_len);
2392
2393 if (found_ptr != NULL)
2394 {
2395 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2396
2397 *found_addrp = found_addr;
2398 do_cleanups (old_cleanups);
2399 return 1;
2400 }
2401
2402 /* Not found in this chunk, skip to next chunk. */
2403
2404 /* Don't let search_space_len wrap here, it's unsigned. */
2405 if (search_space_len >= chunk_size)
2406 search_space_len -= chunk_size;
2407 else
2408 search_space_len = 0;
2409
2410 if (search_space_len >= pattern_len)
2411 {
2412 unsigned keep_len = search_buf_size - chunk_size;
2413 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2414 int nr_to_read;
2415
2416 /* Copy the trailing part of the previous iteration to the front
2417 of the buffer for the next iteration. */
2418 gdb_assert (keep_len == pattern_len - 1);
2419 memcpy (search_buf, search_buf + chunk_size, keep_len);
2420
2421 nr_to_read = min (search_space_len - keep_len, chunk_size);
2422
2423 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2424 search_buf + keep_len, read_addr,
2425 nr_to_read) != nr_to_read)
2426 {
2427 warning (_("Unable to access target memory at %s, halting search."),
2428 hex_string (read_addr));
2429 do_cleanups (old_cleanups);
2430 return -1;
2431 }
2432
2433 start_addr += chunk_size;
2434 }
2435 }
2436
2437 /* Not found. */
2438
2439 do_cleanups (old_cleanups);
2440 return 0;
2441 }
2442
2443 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2444 sequence of bytes in PATTERN with length PATTERN_LEN.
2445
2446 The result is 1 if found, 0 if not found, and -1 if there was an error
2447 requiring halting of the search (e.g. memory read error).
2448 If the pattern is found the address is recorded in FOUND_ADDRP. */
2449
2450 int
2451 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2452 const gdb_byte *pattern, ULONGEST pattern_len,
2453 CORE_ADDR *found_addrp)
2454 {
2455 struct target_ops *t;
2456 int found;
2457
2458 /* We don't use INHERIT to set current_target.to_search_memory,
2459 so we have to scan the target stack and handle targetdebug
2460 ourselves. */
2461
2462 if (targetdebug)
2463 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2464 hex_string (start_addr));
2465
2466 for (t = current_target.beneath; t != NULL; t = t->beneath)
2467 if (t->to_search_memory != NULL)
2468 break;
2469
2470 if (t != NULL)
2471 {
2472 found = t->to_search_memory (t, start_addr, search_space_len,
2473 pattern, pattern_len, found_addrp);
2474 }
2475 else
2476 {
2477 /* If a special version of to_search_memory isn't available, use the
2478 simple version. */
2479 found = simple_search_memory (current_target.beneath,
2480 start_addr, search_space_len,
2481 pattern, pattern_len, found_addrp);
2482 }
2483
2484 if (targetdebug)
2485 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2486
2487 return found;
2488 }
2489
2490 /* Look through the currently pushed targets. If none of them will
2491 be able to restart the currently running process, issue an error
2492 message. */
2493
2494 void
2495 target_require_runnable (void)
2496 {
2497 struct target_ops *t;
2498
2499 for (t = target_stack; t != NULL; t = t->beneath)
2500 {
2501 /* If this target knows how to create a new program, then
2502 assume we will still be able to after killing the current
2503 one. Either killing and mourning will not pop T, or else
2504 find_default_run_target will find it again. */
2505 if (t->to_create_inferior != NULL)
2506 return;
2507
2508 /* Do not worry about thread_stratum targets that can not
2509 create inferiors. Assume they will be pushed again if
2510 necessary, and continue to the process_stratum. */
2511 if (t->to_stratum == thread_stratum
2512 || t->to_stratum == arch_stratum)
2513 continue;
2514
2515 error (_("\
2516 The \"%s\" target does not support \"run\". Try \"help target\" or \"continue\"."),
2517 t->to_shortname);
2518 }
2519
2520 /* This function is only called if the target is running. In that
2521 case there should have been a process_stratum target and it
2522 should either know how to create inferiors, or not... */
2523 internal_error (__FILE__, __LINE__, "No targets found");
2524 }
2525
2526 /* Look through the list of possible targets for a target that can
2527 execute a run or attach command without any other data. This is
2528 used to locate the default process stratum.
2529
2530 If DO_MESG is not NULL, the result is always valid (error() is
2531 called for errors); else, return NULL on error. */
2532
2533 static struct target_ops *
2534 find_default_run_target (char *do_mesg)
2535 {
2536 struct target_ops **t;
2537 struct target_ops *runable = NULL;
2538 int count;
2539
2540 count = 0;
2541
2542 for (t = target_structs; t < target_structs + target_struct_size;
2543 ++t)
2544 {
2545 if ((*t)->to_can_run && target_can_run (*t))
2546 {
2547 runable = *t;
2548 ++count;
2549 }
2550 }
2551
2552 if (count != 1)
2553 {
2554 if (do_mesg)
2555 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2556 else
2557 return NULL;
2558 }
2559
2560 return runable;
2561 }
2562
2563 void
2564 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2565 {
2566 struct target_ops *t;
2567
2568 t = find_default_run_target ("attach");
2569 (t->to_attach) (t, args, from_tty);
2570 return;
2571 }
2572
2573 void
2574 find_default_create_inferior (struct target_ops *ops,
2575 char *exec_file, char *allargs, char **env,
2576 int from_tty)
2577 {
2578 struct target_ops *t;
2579
2580 t = find_default_run_target ("run");
2581 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2582 return;
2583 }
2584
2585 static int
2586 find_default_can_async_p (void)
2587 {
2588 struct target_ops *t;
2589
2590 /* This may be called before the target is pushed on the stack;
2591 look for the default process stratum. If there's none, gdb isn't
2592 configured with a native debugger, and target remote isn't
2593 connected yet. */
2594 t = find_default_run_target (NULL);
2595 if (t && t->to_can_async_p)
2596 return (t->to_can_async_p) ();
2597 return 0;
2598 }
2599
2600 static int
2601 find_default_is_async_p (void)
2602 {
2603 struct target_ops *t;
2604
2605 /* This may be called before the target is pushed on the stack;
2606 look for the default process stratum. If there's none, gdb isn't
2607 configured with a native debugger, and target remote isn't
2608 connected yet. */
2609 t = find_default_run_target (NULL);
2610 if (t && t->to_is_async_p)
2611 return (t->to_is_async_p) ();
2612 return 0;
2613 }
2614
2615 static int
2616 find_default_supports_non_stop (void)
2617 {
2618 struct target_ops *t;
2619
2620 t = find_default_run_target (NULL);
2621 if (t && t->to_supports_non_stop)
2622 return (t->to_supports_non_stop) ();
2623 return 0;
2624 }
2625
2626 int
2627 target_supports_non_stop (void)
2628 {
2629 struct target_ops *t;
2630
2631 for (t = &current_target; t != NULL; t = t->beneath)
2632 if (t->to_supports_non_stop)
2633 return t->to_supports_non_stop ();
2634
2635 return 0;
2636 }
2637
2638
2639 char *
2640 target_get_osdata (const char *type)
2641 {
2642 struct target_ops *t;
2643
2644 /* If we're already connected to something that can get us OS
2645 related data, use it. Otherwise, try using the native
2646 target. */
2647 if (current_target.to_stratum >= process_stratum)
2648 t = current_target.beneath;
2649 else
2650 t = find_default_run_target ("get OS data");
2651
2652 if (!t)
2653 return NULL;
2654
2655 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2656 }
2657
2658 /* Determine the current address space of thread PTID. */
2659
2660 struct address_space *
2661 target_thread_address_space (ptid_t ptid)
2662 {
2663 struct address_space *aspace;
2664 struct inferior *inf;
2665 struct target_ops *t;
2666
2667 for (t = current_target.beneath; t != NULL; t = t->beneath)
2668 {
2669 if (t->to_thread_address_space != NULL)
2670 {
2671 aspace = t->to_thread_address_space (t, ptid);
2672 gdb_assert (aspace);
2673
2674 if (targetdebug)
2675 fprintf_unfiltered (gdb_stdlog,
2676 "target_thread_address_space (%s) = %d\n",
2677 target_pid_to_str (ptid),
2678 address_space_num (aspace));
2679 return aspace;
2680 }
2681 }
2682
2683 /* Fall-back to the "main" address space of the inferior. */
2684 inf = find_inferior_pid (ptid_get_pid (ptid));
2685
2686 if (inf == NULL || inf->aspace == NULL)
2687 internal_error (__FILE__, __LINE__, "\
2688 Can't determine the current address space of thread %s\n",
2689 target_pid_to_str (ptid));
2690
2691 return inf->aspace;
2692 }
2693
2694 static int
2695 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2696 {
2697 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2698 }
2699
2700 static int
2701 default_watchpoint_addr_within_range (struct target_ops *target,
2702 CORE_ADDR addr,
2703 CORE_ADDR start, int length)
2704 {
2705 return addr >= start && addr < start + length;
2706 }
2707
2708 static struct gdbarch *
2709 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2710 {
2711 return target_gdbarch;
2712 }
2713
2714 static int
2715 return_zero (void)
2716 {
2717 return 0;
2718 }
2719
2720 static int
2721 return_one (void)
2722 {
2723 return 1;
2724 }
2725
2726 static int
2727 return_minus_one (void)
2728 {
2729 return -1;
2730 }
2731
2732 /* Find a single runnable target in the stack and return it. If for
2733 some reason there is more than one, return NULL. */
2734
2735 struct target_ops *
2736 find_run_target (void)
2737 {
2738 struct target_ops **t;
2739 struct target_ops *runable = NULL;
2740 int count;
2741
2742 count = 0;
2743
2744 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2745 {
2746 if ((*t)->to_can_run && target_can_run (*t))
2747 {
2748 runable = *t;
2749 ++count;
2750 }
2751 }
2752
2753 return (count == 1 ? runable : NULL);
2754 }
2755
2756 /* Find a single core_stratum target in the list of targets and return it.
2757 If for some reason there is more than one, return NULL. */
2758
2759 struct target_ops *
2760 find_core_target (void)
2761 {
2762 struct target_ops **t;
2763 struct target_ops *runable = NULL;
2764 int count;
2765
2766 count = 0;
2767
2768 for (t = target_structs; t < target_structs + target_struct_size;
2769 ++t)
2770 {
2771 if ((*t)->to_stratum == core_stratum)
2772 {
2773 runable = *t;
2774 ++count;
2775 }
2776 }
2777
2778 return (count == 1 ? runable : NULL);
2779 }
2780
2781 /*
2782 * Find the next target down the stack from the specified target.
2783 */
2784
2785 struct target_ops *
2786 find_target_beneath (struct target_ops *t)
2787 {
2788 return t->beneath;
2789 }
2790
2791 \f
2792 /* The inferior process has died. Long live the inferior! */
2793
2794 void
2795 generic_mourn_inferior (void)
2796 {
2797 ptid_t ptid;
2798
2799 ptid = inferior_ptid;
2800 inferior_ptid = null_ptid;
2801
2802 if (!ptid_equal (ptid, null_ptid))
2803 {
2804 int pid = ptid_get_pid (ptid);
2805 exit_inferior (pid);
2806 }
2807
2808 breakpoint_init_inferior (inf_exited);
2809 registers_changed ();
2810
2811 reopen_exec_file ();
2812 reinit_frame_cache ();
2813
2814 if (deprecated_detach_hook)
2815 deprecated_detach_hook ();
2816 }
2817 \f
2818 /* Helper function for child_wait and the derivatives of child_wait.
2819 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2820 translation of that in OURSTATUS. */
2821 void
2822 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2823 {
2824 if (WIFEXITED (hoststatus))
2825 {
2826 ourstatus->kind = TARGET_WAITKIND_EXITED;
2827 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2828 }
2829 else if (!WIFSTOPPED (hoststatus))
2830 {
2831 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2832 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2833 }
2834 else
2835 {
2836 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2837 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2838 }
2839 }
2840 \f
2841 /* Convert a normal process ID to a string. Returns the string in a
2842 static buffer. */
2843
2844 char *
2845 normal_pid_to_str (ptid_t ptid)
2846 {
2847 static char buf[32];
2848
2849 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2850 return buf;
2851 }
2852
2853 static char *
2854 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
2855 {
2856 return normal_pid_to_str (ptid);
2857 }
2858
2859 /* Error-catcher for target_find_memory_regions. */
2860 static int
2861 dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
2862 {
2863 error (_("Command not implemented for this target."));
2864 return 0;
2865 }
2866
2867 /* Error-catcher for target_make_corefile_notes. */
2868 static char *
2869 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
2870 {
2871 error (_("Command not implemented for this target."));
2872 return NULL;
2873 }
2874
2875 /* Error-catcher for target_get_bookmark. */
2876 static gdb_byte *
2877 dummy_get_bookmark (char *ignore1, int ignore2)
2878 {
2879 tcomplain ();
2880 return NULL;
2881 }
2882
2883 /* Error-catcher for target_goto_bookmark. */
2884 static void
2885 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
2886 {
2887 tcomplain ();
2888 }
2889
2890 /* Set up the handful of non-empty slots needed by the dummy target
2891 vector. */
2892
2893 static void
2894 init_dummy_target (void)
2895 {
2896 dummy_target.to_shortname = "None";
2897 dummy_target.to_longname = "None";
2898 dummy_target.to_doc = "";
2899 dummy_target.to_attach = find_default_attach;
2900 dummy_target.to_detach =
2901 (void (*)(struct target_ops *, char *, int))target_ignore;
2902 dummy_target.to_create_inferior = find_default_create_inferior;
2903 dummy_target.to_can_async_p = find_default_can_async_p;
2904 dummy_target.to_is_async_p = find_default_is_async_p;
2905 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
2906 dummy_target.to_pid_to_str = dummy_pid_to_str;
2907 dummy_target.to_stratum = dummy_stratum;
2908 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
2909 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
2910 dummy_target.to_get_bookmark = dummy_get_bookmark;
2911 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
2912 dummy_target.to_xfer_partial = default_xfer_partial;
2913 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
2914 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
2915 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
2916 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
2917 dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
2918 dummy_target.to_stopped_by_watchpoint = return_zero;
2919 dummy_target.to_stopped_data_address =
2920 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
2921 dummy_target.to_magic = OPS_MAGIC;
2922 }
2923 \f
2924 static void
2925 debug_to_open (char *args, int from_tty)
2926 {
2927 debug_target.to_open (args, from_tty);
2928
2929 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
2930 }
2931
2932 void
2933 target_close (struct target_ops *targ, int quitting)
2934 {
2935 if (targ->to_xclose != NULL)
2936 targ->to_xclose (targ, quitting);
2937 else if (targ->to_close != NULL)
2938 targ->to_close (quitting);
2939
2940 if (targetdebug)
2941 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
2942 }
2943
2944 void
2945 target_attach (char *args, int from_tty)
2946 {
2947 struct target_ops *t;
2948
2949 for (t = current_target.beneath; t != NULL; t = t->beneath)
2950 {
2951 if (t->to_attach != NULL)
2952 {
2953 t->to_attach (t, args, from_tty);
2954 if (targetdebug)
2955 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
2956 args, from_tty);
2957 return;
2958 }
2959 }
2960
2961 internal_error (__FILE__, __LINE__,
2962 "could not find a target to attach");
2963 }
2964
2965 int
2966 target_thread_alive (ptid_t ptid)
2967 {
2968 struct target_ops *t;
2969
2970 for (t = current_target.beneath; t != NULL; t = t->beneath)
2971 {
2972 if (t->to_thread_alive != NULL)
2973 {
2974 int retval;
2975
2976 retval = t->to_thread_alive (t, ptid);
2977 if (targetdebug)
2978 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
2979 PIDGET (ptid), retval);
2980
2981 return retval;
2982 }
2983 }
2984
2985 return 0;
2986 }
2987
2988 void
2989 target_find_new_threads (void)
2990 {
2991 struct target_ops *t;
2992
2993 for (t = current_target.beneath; t != NULL; t = t->beneath)
2994 {
2995 if (t->to_find_new_threads != NULL)
2996 {
2997 t->to_find_new_threads (t);
2998 if (targetdebug)
2999 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3000
3001 return;
3002 }
3003 }
3004 }
3005
3006 void
3007 target_stop (ptid_t ptid)
3008 {
3009 if (!may_stop)
3010 {
3011 warning (_("May not interrupt or stop the target, ignoring attempt"));
3012 return;
3013 }
3014
3015 (*current_target.to_stop) (ptid);
3016 }
3017
3018 static void
3019 debug_to_post_attach (int pid)
3020 {
3021 debug_target.to_post_attach (pid);
3022
3023 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3024 }
3025
3026 /* Return a pretty printed form of target_waitstatus.
3027 Space for the result is malloc'd, caller must free. */
3028
3029 char *
3030 target_waitstatus_to_string (const struct target_waitstatus *ws)
3031 {
3032 const char *kind_str = "status->kind = ";
3033
3034 switch (ws->kind)
3035 {
3036 case TARGET_WAITKIND_EXITED:
3037 return xstrprintf ("%sexited, status = %d",
3038 kind_str, ws->value.integer);
3039 case TARGET_WAITKIND_STOPPED:
3040 return xstrprintf ("%sstopped, signal = %s",
3041 kind_str, target_signal_to_name (ws->value.sig));
3042 case TARGET_WAITKIND_SIGNALLED:
3043 return xstrprintf ("%ssignalled, signal = %s",
3044 kind_str, target_signal_to_name (ws->value.sig));
3045 case TARGET_WAITKIND_LOADED:
3046 return xstrprintf ("%sloaded", kind_str);
3047 case TARGET_WAITKIND_FORKED:
3048 return xstrprintf ("%sforked", kind_str);
3049 case TARGET_WAITKIND_VFORKED:
3050 return xstrprintf ("%svforked", kind_str);
3051 case TARGET_WAITKIND_EXECD:
3052 return xstrprintf ("%sexecd", kind_str);
3053 case TARGET_WAITKIND_SYSCALL_ENTRY:
3054 return xstrprintf ("%sentered syscall", kind_str);
3055 case TARGET_WAITKIND_SYSCALL_RETURN:
3056 return xstrprintf ("%sexited syscall", kind_str);
3057 case TARGET_WAITKIND_SPURIOUS:
3058 return xstrprintf ("%sspurious", kind_str);
3059 case TARGET_WAITKIND_IGNORE:
3060 return xstrprintf ("%signore", kind_str);
3061 case TARGET_WAITKIND_NO_HISTORY:
3062 return xstrprintf ("%sno-history", kind_str);
3063 default:
3064 return xstrprintf ("%sunknown???", kind_str);
3065 }
3066 }
3067
3068 static void
3069 debug_print_register (const char * func,
3070 struct regcache *regcache, int regno)
3071 {
3072 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3073
3074 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3075 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3076 && gdbarch_register_name (gdbarch, regno) != NULL
3077 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3078 fprintf_unfiltered (gdb_stdlog, "(%s)",
3079 gdbarch_register_name (gdbarch, regno));
3080 else
3081 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3082 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3083 {
3084 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3085 int i, size = register_size (gdbarch, regno);
3086 unsigned char buf[MAX_REGISTER_SIZE];
3087
3088 regcache_raw_collect (regcache, regno, buf);
3089 fprintf_unfiltered (gdb_stdlog, " = ");
3090 for (i = 0; i < size; i++)
3091 {
3092 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3093 }
3094 if (size <= sizeof (LONGEST))
3095 {
3096 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3097
3098 fprintf_unfiltered (gdb_stdlog, " %s %s",
3099 core_addr_to_string_nz (val), plongest (val));
3100 }
3101 }
3102 fprintf_unfiltered (gdb_stdlog, "\n");
3103 }
3104
3105 void
3106 target_fetch_registers (struct regcache *regcache, int regno)
3107 {
3108 struct target_ops *t;
3109
3110 for (t = current_target.beneath; t != NULL; t = t->beneath)
3111 {
3112 if (t->to_fetch_registers != NULL)
3113 {
3114 t->to_fetch_registers (t, regcache, regno);
3115 if (targetdebug)
3116 debug_print_register ("target_fetch_registers", regcache, regno);
3117 return;
3118 }
3119 }
3120 }
3121
3122 void
3123 target_store_registers (struct regcache *regcache, int regno)
3124 {
3125 struct target_ops *t;
3126
3127 if (!may_write_registers)
3128 error (_("Writing to registers is not allowed (regno %d)"), regno);
3129
3130 for (t = current_target.beneath; t != NULL; t = t->beneath)
3131 {
3132 if (t->to_store_registers != NULL)
3133 {
3134 t->to_store_registers (t, regcache, regno);
3135 if (targetdebug)
3136 {
3137 debug_print_register ("target_store_registers", regcache, regno);
3138 }
3139 return;
3140 }
3141 }
3142
3143 noprocess ();
3144 }
3145
3146 int
3147 target_core_of_thread (ptid_t ptid)
3148 {
3149 struct target_ops *t;
3150
3151 for (t = current_target.beneath; t != NULL; t = t->beneath)
3152 {
3153 if (t->to_core_of_thread != NULL)
3154 {
3155 int retval = t->to_core_of_thread (t, ptid);
3156
3157 if (targetdebug)
3158 fprintf_unfiltered (gdb_stdlog, "target_core_of_thread (%d) = %d\n",
3159 PIDGET (ptid), retval);
3160 return retval;
3161 }
3162 }
3163
3164 return -1;
3165 }
3166
3167 int
3168 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3169 {
3170 struct target_ops *t;
3171
3172 for (t = current_target.beneath; t != NULL; t = t->beneath)
3173 {
3174 if (t->to_verify_memory != NULL)
3175 {
3176 int retval = t->to_verify_memory (t, data, memaddr, size);
3177
3178 if (targetdebug)
3179 fprintf_unfiltered (gdb_stdlog, "target_verify_memory (%s, %s) = %d\n",
3180 paddress (target_gdbarch, memaddr),
3181 pulongest (size),
3182 retval);
3183 return retval;
3184 }
3185 }
3186
3187 tcomplain ();
3188 }
3189
3190 static void
3191 debug_to_prepare_to_store (struct regcache *regcache)
3192 {
3193 debug_target.to_prepare_to_store (regcache);
3194
3195 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3196 }
3197
3198 static int
3199 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3200 int write, struct mem_attrib *attrib,
3201 struct target_ops *target)
3202 {
3203 int retval;
3204
3205 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3206 attrib, target);
3207
3208 fprintf_unfiltered (gdb_stdlog,
3209 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3210 paddress (target_gdbarch, memaddr), len,
3211 write ? "write" : "read", retval);
3212
3213 if (retval > 0)
3214 {
3215 int i;
3216
3217 fputs_unfiltered (", bytes =", gdb_stdlog);
3218 for (i = 0; i < retval; i++)
3219 {
3220 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3221 {
3222 if (targetdebug < 2 && i > 0)
3223 {
3224 fprintf_unfiltered (gdb_stdlog, " ...");
3225 break;
3226 }
3227 fprintf_unfiltered (gdb_stdlog, "\n");
3228 }
3229
3230 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3231 }
3232 }
3233
3234 fputc_unfiltered ('\n', gdb_stdlog);
3235
3236 return retval;
3237 }
3238
3239 static void
3240 debug_to_files_info (struct target_ops *target)
3241 {
3242 debug_target.to_files_info (target);
3243
3244 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3245 }
3246
3247 static int
3248 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3249 struct bp_target_info *bp_tgt)
3250 {
3251 int retval;
3252
3253 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3254
3255 fprintf_unfiltered (gdb_stdlog,
3256 "target_insert_breakpoint (0x%lx, xxx) = %ld\n",
3257 (unsigned long) bp_tgt->placed_address,
3258 (unsigned long) retval);
3259 return retval;
3260 }
3261
3262 static int
3263 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3264 struct bp_target_info *bp_tgt)
3265 {
3266 int retval;
3267
3268 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3269
3270 fprintf_unfiltered (gdb_stdlog,
3271 "target_remove_breakpoint (0x%lx, xxx) = %ld\n",
3272 (unsigned long) bp_tgt->placed_address,
3273 (unsigned long) retval);
3274 return retval;
3275 }
3276
3277 static int
3278 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3279 {
3280 int retval;
3281
3282 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3283
3284 fprintf_unfiltered (gdb_stdlog,
3285 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3286 (unsigned long) type,
3287 (unsigned long) cnt,
3288 (unsigned long) from_tty,
3289 (unsigned long) retval);
3290 return retval;
3291 }
3292
3293 static int
3294 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3295 {
3296 CORE_ADDR retval;
3297
3298 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3299
3300 fprintf_unfiltered (gdb_stdlog,
3301 "target_region_ok_for_hw_watchpoint (%ld, %ld) = 0x%lx\n",
3302 (unsigned long) addr,
3303 (unsigned long) len,
3304 (unsigned long) retval);
3305 return retval;
3306 }
3307
3308 static int
3309 debug_to_stopped_by_watchpoint (void)
3310 {
3311 int retval;
3312
3313 retval = debug_target.to_stopped_by_watchpoint ();
3314
3315 fprintf_unfiltered (gdb_stdlog,
3316 "target_stopped_by_watchpoint () = %ld\n",
3317 (unsigned long) retval);
3318 return retval;
3319 }
3320
3321 static int
3322 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3323 {
3324 int retval;
3325
3326 retval = debug_target.to_stopped_data_address (target, addr);
3327
3328 fprintf_unfiltered (gdb_stdlog,
3329 "target_stopped_data_address ([0x%lx]) = %ld\n",
3330 (unsigned long)*addr,
3331 (unsigned long)retval);
3332 return retval;
3333 }
3334
3335 static int
3336 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3337 CORE_ADDR addr,
3338 CORE_ADDR start, int length)
3339 {
3340 int retval;
3341
3342 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3343 start, length);
3344
3345 fprintf_filtered (gdb_stdlog,
3346 "target_watchpoint_addr_within_range (0x%lx, 0x%lx, %d) = %d\n",
3347 (unsigned long) addr, (unsigned long) start, length,
3348 retval);
3349 return retval;
3350 }
3351
3352 static int
3353 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3354 struct bp_target_info *bp_tgt)
3355 {
3356 int retval;
3357
3358 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3359
3360 fprintf_unfiltered (gdb_stdlog,
3361 "target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
3362 (unsigned long) bp_tgt->placed_address,
3363 (unsigned long) retval);
3364 return retval;
3365 }
3366
3367 static int
3368 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3369 struct bp_target_info *bp_tgt)
3370 {
3371 int retval;
3372
3373 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3374
3375 fprintf_unfiltered (gdb_stdlog,
3376 "target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
3377 (unsigned long) bp_tgt->placed_address,
3378 (unsigned long) retval);
3379 return retval;
3380 }
3381
3382 static int
3383 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type)
3384 {
3385 int retval;
3386
3387 retval = debug_target.to_insert_watchpoint (addr, len, type);
3388
3389 fprintf_unfiltered (gdb_stdlog,
3390 "target_insert_watchpoint (0x%lx, %d, %d) = %ld\n",
3391 (unsigned long) addr, len, type, (unsigned long) retval);
3392 return retval;
3393 }
3394
3395 static int
3396 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type)
3397 {
3398 int retval;
3399
3400 retval = debug_target.to_remove_watchpoint (addr, len, type);
3401
3402 fprintf_unfiltered (gdb_stdlog,
3403 "target_remove_watchpoint (0x%lx, %d, %d) = %ld\n",
3404 (unsigned long) addr, len, type, (unsigned long) retval);
3405 return retval;
3406 }
3407
3408 static void
3409 debug_to_terminal_init (void)
3410 {
3411 debug_target.to_terminal_init ();
3412
3413 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3414 }
3415
3416 static void
3417 debug_to_terminal_inferior (void)
3418 {
3419 debug_target.to_terminal_inferior ();
3420
3421 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3422 }
3423
3424 static void
3425 debug_to_terminal_ours_for_output (void)
3426 {
3427 debug_target.to_terminal_ours_for_output ();
3428
3429 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3430 }
3431
3432 static void
3433 debug_to_terminal_ours (void)
3434 {
3435 debug_target.to_terminal_ours ();
3436
3437 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3438 }
3439
3440 static void
3441 debug_to_terminal_save_ours (void)
3442 {
3443 debug_target.to_terminal_save_ours ();
3444
3445 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3446 }
3447
3448 static void
3449 debug_to_terminal_info (char *arg, int from_tty)
3450 {
3451 debug_target.to_terminal_info (arg, from_tty);
3452
3453 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3454 from_tty);
3455 }
3456
3457 static void
3458 debug_to_load (char *args, int from_tty)
3459 {
3460 debug_target.to_load (args, from_tty);
3461
3462 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3463 }
3464
3465 static int
3466 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
3467 {
3468 int retval;
3469
3470 retval = debug_target.to_lookup_symbol (name, addrp);
3471
3472 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
3473
3474 return retval;
3475 }
3476
3477 static void
3478 debug_to_post_startup_inferior (ptid_t ptid)
3479 {
3480 debug_target.to_post_startup_inferior (ptid);
3481
3482 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3483 PIDGET (ptid));
3484 }
3485
3486 static void
3487 debug_to_acknowledge_created_inferior (int pid)
3488 {
3489 debug_target.to_acknowledge_created_inferior (pid);
3490
3491 fprintf_unfiltered (gdb_stdlog, "target_acknowledge_created_inferior (%d)\n",
3492 pid);
3493 }
3494
3495 static void
3496 debug_to_insert_fork_catchpoint (int pid)
3497 {
3498 debug_target.to_insert_fork_catchpoint (pid);
3499
3500 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
3501 pid);
3502 }
3503
3504 static int
3505 debug_to_remove_fork_catchpoint (int pid)
3506 {
3507 int retval;
3508
3509 retval = debug_target.to_remove_fork_catchpoint (pid);
3510
3511 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3512 pid, retval);
3513
3514 return retval;
3515 }
3516
3517 static void
3518 debug_to_insert_vfork_catchpoint (int pid)
3519 {
3520 debug_target.to_insert_vfork_catchpoint (pid);
3521
3522 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
3523 pid);
3524 }
3525
3526 static int
3527 debug_to_remove_vfork_catchpoint (int pid)
3528 {
3529 int retval;
3530
3531 retval = debug_target.to_remove_vfork_catchpoint (pid);
3532
3533 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3534 pid, retval);
3535
3536 return retval;
3537 }
3538
3539 static void
3540 debug_to_insert_exec_catchpoint (int pid)
3541 {
3542 debug_target.to_insert_exec_catchpoint (pid);
3543
3544 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
3545 pid);
3546 }
3547
3548 static int
3549 debug_to_remove_exec_catchpoint (int pid)
3550 {
3551 int retval;
3552
3553 retval = debug_target.to_remove_exec_catchpoint (pid);
3554
3555 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3556 pid, retval);
3557
3558 return retval;
3559 }
3560
3561 static int
3562 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3563 {
3564 int has_exited;
3565
3566 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3567
3568 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3569 pid, wait_status, *exit_status, has_exited);
3570
3571 return has_exited;
3572 }
3573
3574 static int
3575 debug_to_can_run (void)
3576 {
3577 int retval;
3578
3579 retval = debug_target.to_can_run ();
3580
3581 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3582
3583 return retval;
3584 }
3585
3586 static void
3587 debug_to_notice_signals (ptid_t ptid)
3588 {
3589 debug_target.to_notice_signals (ptid);
3590
3591 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3592 PIDGET (ptid));
3593 }
3594
3595 static struct gdbarch *
3596 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3597 {
3598 struct gdbarch *retval;
3599
3600 retval = debug_target.to_thread_architecture (ops, ptid);
3601
3602 fprintf_unfiltered (gdb_stdlog, "target_thread_architecture (%s) = %s [%s]\n",
3603 target_pid_to_str (ptid), host_address_to_string (retval),
3604 gdbarch_bfd_arch_info (retval)->printable_name);
3605 return retval;
3606 }
3607
3608 static void
3609 debug_to_stop (ptid_t ptid)
3610 {
3611 debug_target.to_stop (ptid);
3612
3613 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3614 target_pid_to_str (ptid));
3615 }
3616
3617 static void
3618 debug_to_rcmd (char *command,
3619 struct ui_file *outbuf)
3620 {
3621 debug_target.to_rcmd (command, outbuf);
3622 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3623 }
3624
3625 static char *
3626 debug_to_pid_to_exec_file (int pid)
3627 {
3628 char *exec_file;
3629
3630 exec_file = debug_target.to_pid_to_exec_file (pid);
3631
3632 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3633 pid, exec_file);
3634
3635 return exec_file;
3636 }
3637
3638 static void
3639 setup_target_debug (void)
3640 {
3641 memcpy (&debug_target, &current_target, sizeof debug_target);
3642
3643 current_target.to_open = debug_to_open;
3644 current_target.to_post_attach = debug_to_post_attach;
3645 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3646 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3647 current_target.to_files_info = debug_to_files_info;
3648 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3649 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3650 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3651 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3652 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3653 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3654 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3655 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3656 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3657 current_target.to_watchpoint_addr_within_range = debug_to_watchpoint_addr_within_range;
3658 current_target.to_region_ok_for_hw_watchpoint = debug_to_region_ok_for_hw_watchpoint;
3659 current_target.to_terminal_init = debug_to_terminal_init;
3660 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3661 current_target.to_terminal_ours_for_output = debug_to_terminal_ours_for_output;
3662 current_target.to_terminal_ours = debug_to_terminal_ours;
3663 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3664 current_target.to_terminal_info = debug_to_terminal_info;
3665 current_target.to_load = debug_to_load;
3666 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3667 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3668 current_target.to_acknowledge_created_inferior = debug_to_acknowledge_created_inferior;
3669 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3670 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3671 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3672 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3673 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3674 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3675 current_target.to_has_exited = debug_to_has_exited;
3676 current_target.to_can_run = debug_to_can_run;
3677 current_target.to_notice_signals = debug_to_notice_signals;
3678 current_target.to_stop = debug_to_stop;
3679 current_target.to_rcmd = debug_to_rcmd;
3680 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3681 current_target.to_thread_architecture = debug_to_thread_architecture;
3682 }
3683 \f
3684
3685 static char targ_desc[] =
3686 "Names of targets and files being debugged.\n\
3687 Shows the entire stack of targets currently in use (including the exec-file,\n\
3688 core-file, and process, if any), as well as the symbol file name.";
3689
3690 static void
3691 do_monitor_command (char *cmd,
3692 int from_tty)
3693 {
3694 if ((current_target.to_rcmd
3695 == (void (*) (char *, struct ui_file *)) tcomplain)
3696 || (current_target.to_rcmd == debug_to_rcmd
3697 && (debug_target.to_rcmd
3698 == (void (*) (char *, struct ui_file *)) tcomplain)))
3699 error (_("\"monitor\" command not supported by this target."));
3700 target_rcmd (cmd, gdb_stdtarg);
3701 }
3702
3703 /* Print the name of each layers of our target stack. */
3704
3705 static void
3706 maintenance_print_target_stack (char *cmd, int from_tty)
3707 {
3708 struct target_ops *t;
3709
3710 printf_filtered (_("The current target stack is:\n"));
3711
3712 for (t = target_stack; t != NULL; t = t->beneath)
3713 {
3714 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3715 }
3716 }
3717
3718 /* Controls if async mode is permitted. */
3719 int target_async_permitted = 0;
3720
3721 /* The set command writes to this variable. If the inferior is
3722 executing, linux_nat_async_permitted is *not* updated. */
3723 static int target_async_permitted_1 = 0;
3724
3725 static void
3726 set_maintenance_target_async_permitted (char *args, int from_tty,
3727 struct cmd_list_element *c)
3728 {
3729 if (have_live_inferiors ())
3730 {
3731 target_async_permitted_1 = target_async_permitted;
3732 error (_("Cannot change this setting while the inferior is running."));
3733 }
3734
3735 target_async_permitted = target_async_permitted_1;
3736 }
3737
3738 static void
3739 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3740 struct cmd_list_element *c,
3741 const char *value)
3742 {
3743 fprintf_filtered (file, _("\
3744 Controlling the inferior in asynchronous mode is %s.\n"), value);
3745 }
3746
3747 /* Temporary copies of permission settings. */
3748
3749 static int may_write_registers_1 = 1;
3750 static int may_write_memory_1 = 1;
3751 static int may_insert_breakpoints_1 = 1;
3752 static int may_insert_tracepoints_1 = 1;
3753 static int may_insert_fast_tracepoints_1 = 1;
3754 static int may_stop_1 = 1;
3755
3756 /* Make the user-set values match the real values again. */
3757
3758 void
3759 update_target_permissions (void)
3760 {
3761 may_write_registers_1 = may_write_registers;
3762 may_write_memory_1 = may_write_memory;
3763 may_insert_breakpoints_1 = may_insert_breakpoints;
3764 may_insert_tracepoints_1 = may_insert_tracepoints;
3765 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3766 may_stop_1 = may_stop;
3767 }
3768
3769 /* The one function handles (most of) the permission flags in the same
3770 way. */
3771
3772 static void
3773 set_target_permissions (char *args, int from_tty,
3774 struct cmd_list_element *c)
3775 {
3776 if (target_has_execution)
3777 {
3778 update_target_permissions ();
3779 error (_("Cannot change this setting while the inferior is running."));
3780 }
3781
3782 /* Make the real values match the user-changed values. */
3783 may_write_registers = may_write_registers_1;
3784 may_insert_breakpoints = may_insert_breakpoints_1;
3785 may_insert_tracepoints = may_insert_tracepoints_1;
3786 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3787 may_stop = may_stop_1;
3788 update_observer_mode ();
3789 }
3790
3791 /* Set memory write permission independently of observer mode. */
3792
3793 static void
3794 set_write_memory_permission (char *args, int from_tty,
3795 struct cmd_list_element *c)
3796 {
3797 /* Make the real values match the user-changed values. */
3798 may_write_memory = may_write_memory_1;
3799 update_observer_mode ();
3800 }
3801
3802
3803 void
3804 initialize_targets (void)
3805 {
3806 init_dummy_target ();
3807 push_target (&dummy_target);
3808
3809 add_info ("target", target_info, targ_desc);
3810 add_info ("files", target_info, targ_desc);
3811
3812 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3813 Set target debugging."), _("\
3814 Show target debugging."), _("\
3815 When non-zero, target debugging is enabled. Higher numbers are more\n\
3816 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3817 command."),
3818 NULL,
3819 show_targetdebug,
3820 &setdebuglist, &showdebuglist);
3821
3822 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3823 &trust_readonly, _("\
3824 Set mode for reading from readonly sections."), _("\
3825 Show mode for reading from readonly sections."), _("\
3826 When this mode is on, memory reads from readonly sections (such as .text)\n\
3827 will be read from the object file instead of from the target. This will\n\
3828 result in significant performance improvement for remote targets."),
3829 NULL,
3830 show_trust_readonly,
3831 &setlist, &showlist);
3832
3833 add_com ("monitor", class_obscure, do_monitor_command,
3834 _("Send a command to the remote monitor (remote targets only)."));
3835
3836 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3837 _("Print the name of each layer of the internal target stack."),
3838 &maintenanceprintlist);
3839
3840 add_setshow_boolean_cmd ("target-async", no_class,
3841 &target_async_permitted_1, _("\
3842 Set whether gdb controls the inferior in asynchronous mode."), _("\
3843 Show whether gdb controls the inferior in asynchronous mode."), _("\
3844 Tells gdb whether to control the inferior in asynchronous mode."),
3845 set_maintenance_target_async_permitted,
3846 show_maintenance_target_async_permitted,
3847 &setlist,
3848 &showlist);
3849
3850 add_setshow_boolean_cmd ("stack-cache", class_support,
3851 &stack_cache_enabled_p_1, _("\
3852 Set cache use for stack access."), _("\
3853 Show cache use for stack access."), _("\
3854 When on, use the data cache for all stack access, regardless of any\n\
3855 configured memory regions. This improves remote performance significantly.\n\
3856 By default, caching for stack access is on."),
3857 set_stack_cache_enabled_p,
3858 show_stack_cache_enabled_p,
3859 &setlist, &showlist);
3860
3861 add_setshow_boolean_cmd ("may-write-registers", class_support,
3862 &may_write_registers_1, _("\
3863 Set permission to write into registers."), _("\
3864 Show permission to write into registers."), _("\
3865 When this permission is on, GDB may write into the target's registers.\n\
3866 Otherwise, any sort of write attempt will result in an error."),
3867 set_target_permissions, NULL,
3868 &setlist, &showlist);
3869
3870 add_setshow_boolean_cmd ("may-write-memory", class_support,
3871 &may_write_memory_1, _("\
3872 Set permission to write into target memory."), _("\
3873 Show permission to write into target memory."), _("\
3874 When this permission is on, GDB may write into the target's memory.\n\
3875 Otherwise, any sort of write attempt will result in an error."),
3876 set_write_memory_permission, NULL,
3877 &setlist, &showlist);
3878
3879 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
3880 &may_insert_breakpoints_1, _("\
3881 Set permission to insert breakpoints in the target."), _("\
3882 Show permission to insert breakpoints in the target."), _("\
3883 When this permission is on, GDB may insert breakpoints in the program.\n\
3884 Otherwise, any sort of insertion attempt will result in an error."),
3885 set_target_permissions, NULL,
3886 &setlist, &showlist);
3887
3888 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
3889 &may_insert_tracepoints_1, _("\
3890 Set permission to insert tracepoints in the target."), _("\
3891 Show permission to insert tracepoints in the target."), _("\
3892 When this permission is on, GDB may insert tracepoints in the program.\n\
3893 Otherwise, any sort of insertion attempt will result in an error."),
3894 set_target_permissions, NULL,
3895 &setlist, &showlist);
3896
3897 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
3898 &may_insert_fast_tracepoints_1, _("\
3899 Set permission to insert fast tracepoints in the target."), _("\
3900 Show permission to insert fast tracepoints in the target."), _("\
3901 When this permission is on, GDB may insert fast tracepoints.\n\
3902 Otherwise, any sort of insertion attempt will result in an error."),
3903 set_target_permissions, NULL,
3904 &setlist, &showlist);
3905
3906 add_setshow_boolean_cmd ("may-interrupt", class_support,
3907 &may_stop_1, _("\
3908 Set permission to interrupt or signal the target."), _("\
3909 Show permission to interrupt or signal the target."), _("\
3910 When this permission is on, GDB may interrupt/stop the target's execution.\n\
3911 Otherwise, any attempt to interrupt or stop will be ignored."),
3912 set_target_permissions, NULL,
3913 &setlist, &showlist);
3914
3915
3916 target_dcache = dcache_init ();
3917 }
This page took 0.113739 seconds and 4 git commands to generate.