target_stack -> current_top_target() throughout
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2018 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "byte-vector.h"
50 #include "terminal.h"
51 #include <algorithm>
52 #include <unordered_map>
53
54 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
55
56 static void default_terminal_info (struct target_ops *, const char *, int);
57
58 static int default_watchpoint_addr_within_range (struct target_ops *,
59 CORE_ADDR, CORE_ADDR, int);
60
61 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
62 CORE_ADDR, int);
63
64 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
65
66 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
67 long lwp, long tid);
68
69 static int default_follow_fork (struct target_ops *self, int follow_child,
70 int detach_fork);
71
72 static void default_mourn_inferior (struct target_ops *self);
73
74 static int default_search_memory (struct target_ops *ops,
75 CORE_ADDR start_addr,
76 ULONGEST search_space_len,
77 const gdb_byte *pattern,
78 ULONGEST pattern_len,
79 CORE_ADDR *found_addrp);
80
81 static int default_verify_memory (struct target_ops *self,
82 const gdb_byte *data,
83 CORE_ADDR memaddr, ULONGEST size);
84
85 static struct address_space *default_thread_address_space
86 (struct target_ops *self, ptid_t ptid);
87
88 static void tcomplain (void) ATTRIBUTE_NORETURN;
89
90 static struct target_ops *find_default_run_target (const char *);
91
92 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
93 ptid_t ptid);
94
95 static int dummy_find_memory_regions (struct target_ops *self,
96 find_memory_region_ftype ignore1,
97 void *ignore2);
98
99 static char *dummy_make_corefile_notes (struct target_ops *self,
100 bfd *ignore1, int *ignore2);
101
102 static const char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
103
104 static enum exec_direction_kind default_execution_direction
105 (struct target_ops *self);
106
107 /* Mapping between target_info objects (which have address identity)
108 and corresponding open/factory function/callback. Each add_target
109 call adds one entry to this map, and registers a "target
110 TARGET_NAME" command that when invoked calls the factory registered
111 here. The target_info object is associated with the command via
112 the command's context. */
113 static std::unordered_map<const target_info *, target_open_ftype *>
114 target_factories;
115
116 /* The initial current target, so that there is always a semi-valid
117 current target. */
118
119 static struct target_ops *the_dummy_target;
120 static struct target_ops *the_debug_target;
121
122 /* Top of target stack. */
123 /* The target structure we are currently using to talk to a process
124 or file or whatever "inferior" we have. */
125
126 static target_ops *g_current_top_target;
127
128 target_ops *
129 current_top_target ()
130 {
131 return g_current_top_target;
132 }
133
134 /* Command list for target. */
135
136 static struct cmd_list_element *targetlist = NULL;
137
138 /* Nonzero if we should trust readonly sections from the
139 executable when reading memory. */
140
141 static int trust_readonly = 0;
142
143 /* Nonzero if we should show true memory content including
144 memory breakpoint inserted by gdb. */
145
146 static int show_memory_breakpoints = 0;
147
148 /* These globals control whether GDB attempts to perform these
149 operations; they are useful for targets that need to prevent
150 inadvertant disruption, such as in non-stop mode. */
151
152 int may_write_registers = 1;
153
154 int may_write_memory = 1;
155
156 int may_insert_breakpoints = 1;
157
158 int may_insert_tracepoints = 1;
159
160 int may_insert_fast_tracepoints = 1;
161
162 int may_stop = 1;
163
164 /* Non-zero if we want to see trace of target level stuff. */
165
166 static unsigned int targetdebug = 0;
167
168 static void
169 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
170 {
171 if (targetdebug)
172 push_target (the_debug_target);
173 else
174 unpush_target (the_debug_target);
175 }
176
177 static void
178 show_targetdebug (struct ui_file *file, int from_tty,
179 struct cmd_list_element *c, const char *value)
180 {
181 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
182 }
183
184 /* The user just typed 'target' without the name of a target. */
185
186 static void
187 target_command (const char *arg, int from_tty)
188 {
189 fputs_filtered ("Argument required (target name). Try `help target'\n",
190 gdb_stdout);
191 }
192
193 #if GDB_SELF_TEST
194 namespace selftests {
195
196 /* A mock process_stratum target_ops that doesn't read/write registers
197 anywhere. */
198
199 static const target_info test_target_info = {
200 "test",
201 N_("unit tests target"),
202 N_("You should never see this"),
203 };
204
205 const target_info &
206 test_target_ops::info () const
207 {
208 return test_target_info;
209 }
210
211 } /* namespace selftests */
212 #endif /* GDB_SELF_TEST */
213
214 /* Default target_has_* methods for process_stratum targets. */
215
216 int
217 default_child_has_all_memory ()
218 {
219 /* If no inferior selected, then we can't read memory here. */
220 if (ptid_equal (inferior_ptid, null_ptid))
221 return 0;
222
223 return 1;
224 }
225
226 int
227 default_child_has_memory ()
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_stack ()
238 {
239 /* If no inferior selected, there's no stack. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_registers ()
248 {
249 /* Can't read registers from no inferior. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_execution (ptid_t the_ptid)
258 {
259 /* If there's no thread selected, then we can't make it run through
260 hoops. */
261 if (ptid_equal (the_ptid, null_ptid))
262 return 0;
263
264 return 1;
265 }
266
267
268 int
269 target_has_all_memory_1 (void)
270 {
271 struct target_ops *t;
272
273 for (t = current_top_target (); t != NULL; t = t->beneath)
274 if (t->has_all_memory ())
275 return 1;
276
277 return 0;
278 }
279
280 int
281 target_has_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_top_target (); t != NULL; t = t->beneath)
286 if (t->has_memory ())
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_stack_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_top_target (); t != NULL; t = t->beneath)
298 if (t->has_stack ())
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_registers_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_top_target (); t != NULL; t = t->beneath)
310 if (t->has_registers ())
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_execution_1 (ptid_t the_ptid)
318 {
319 struct target_ops *t;
320
321 for (t = current_top_target (); t != NULL; t = t->beneath)
322 if (t->has_execution (the_ptid))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_current (void)
330 {
331 return target_has_execution_1 (inferior_ptid);
332 }
333
334 /* This is used to implement the various target commands. */
335
336 static void
337 open_target (const char *args, int from_tty, struct cmd_list_element *command)
338 {
339 auto *ti = static_cast<target_info *> (get_cmd_context (command));
340 target_open_ftype *func = target_factories[ti];
341
342 if (targetdebug)
343 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
344 ti->shortname);
345
346 func (args, from_tty);
347
348 if (targetdebug)
349 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
350 ti->shortname, args, from_tty);
351 }
352
353 /* See target.h. */
354
355 void
356 add_target (const target_info &t, target_open_ftype *func,
357 completer_ftype *completer)
358 {
359 struct cmd_list_element *c;
360
361 auto &func_slot = target_factories[&t];
362 if (func_slot != nullptr)
363 internal_error (__FILE__, __LINE__,
364 _("target already added (\"%s\")."), t.shortname);
365 func_slot = func;
366
367 if (targetlist == NULL)
368 add_prefix_cmd ("target", class_run, target_command, _("\
369 Connect to a target machine or process.\n\
370 The first argument is the type or protocol of the target machine.\n\
371 Remaining arguments are interpreted by the target protocol. For more\n\
372 information on the arguments for a particular protocol, type\n\
373 `help target ' followed by the protocol name."),
374 &targetlist, "target ", 0, &cmdlist);
375 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
376 set_cmd_context (c, (void *) &t);
377 set_cmd_sfunc (c, open_target);
378 if (completer != NULL)
379 set_cmd_completer (c, completer);
380 }
381
382 /* See target.h. */
383
384 void
385 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
386 {
387 struct cmd_list_element *c;
388 char *alt;
389
390 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
391 see PR cli/15104. */
392 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
393 set_cmd_sfunc (c, open_target);
394 set_cmd_context (c, (void *) &tinfo);
395 alt = xstrprintf ("target %s", tinfo.shortname);
396 deprecate_cmd (c, alt);
397 }
398
399 /* Stub functions */
400
401 void
402 target_kill (void)
403 {
404 current_top_target ()->kill ();
405 }
406
407 void
408 target_load (const char *arg, int from_tty)
409 {
410 target_dcache_invalidate ();
411 current_top_target ()->load (arg, from_tty);
412 }
413
414 /* Define it. */
415
416 target_terminal_state target_terminal::m_terminal_state
417 = target_terminal_state::is_ours;
418
419 /* See target/target.h. */
420
421 void
422 target_terminal::init (void)
423 {
424 current_top_target ()->terminal_init ();
425
426 m_terminal_state = target_terminal_state::is_ours;
427 }
428
429 /* See target/target.h. */
430
431 void
432 target_terminal::inferior (void)
433 {
434 struct ui *ui = current_ui;
435
436 /* A background resume (``run&'') should leave GDB in control of the
437 terminal. */
438 if (ui->prompt_state != PROMPT_BLOCKED)
439 return;
440
441 /* Since we always run the inferior in the main console (unless "set
442 inferior-tty" is in effect), when some UI other than the main one
443 calls target_terminal::inferior, then we leave the main UI's
444 terminal settings as is. */
445 if (ui != main_ui)
446 return;
447
448 /* If GDB is resuming the inferior in the foreground, install
449 inferior's terminal modes. */
450
451 struct inferior *inf = current_inferior ();
452
453 if (inf->terminal_state != target_terminal_state::is_inferior)
454 {
455 current_top_target ()->terminal_inferior ();
456 inf->terminal_state = target_terminal_state::is_inferior;
457 }
458
459 m_terminal_state = target_terminal_state::is_inferior;
460
461 /* If the user hit C-c before, pretend that it was hit right
462 here. */
463 if (check_quit_flag ())
464 target_pass_ctrlc ();
465 }
466
467 /* See target/target.h. */
468
469 void
470 target_terminal::restore_inferior (void)
471 {
472 struct ui *ui = current_ui;
473
474 /* See target_terminal::inferior(). */
475 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
476 return;
477
478 /* Restore the terminal settings of inferiors that were in the
479 foreground but are now ours_for_output due to a temporary
480 target_target::ours_for_output() call. */
481
482 {
483 scoped_restore_current_inferior restore_inferior;
484 struct inferior *inf;
485
486 ALL_INFERIORS (inf)
487 {
488 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
489 {
490 set_current_inferior (inf);
491 current_top_target ()->terminal_inferior ();
492 inf->terminal_state = target_terminal_state::is_inferior;
493 }
494 }
495 }
496
497 m_terminal_state = target_terminal_state::is_inferior;
498
499 /* If the user hit C-c before, pretend that it was hit right
500 here. */
501 if (check_quit_flag ())
502 target_pass_ctrlc ();
503 }
504
505 /* Switch terminal state to DESIRED_STATE, either is_ours, or
506 is_ours_for_output. */
507
508 static void
509 target_terminal_is_ours_kind (target_terminal_state desired_state)
510 {
511 scoped_restore_current_inferior restore_inferior;
512 struct inferior *inf;
513
514 /* Must do this in two passes. First, have all inferiors save the
515 current terminal settings. Then, after all inferiors have add a
516 chance to safely save the terminal settings, restore GDB's
517 terminal settings. */
518
519 ALL_INFERIORS (inf)
520 {
521 if (inf->terminal_state == target_terminal_state::is_inferior)
522 {
523 set_current_inferior (inf);
524 current_top_target ()->terminal_save_inferior ();
525 }
526 }
527
528 ALL_INFERIORS (inf)
529 {
530 /* Note we don't check is_inferior here like above because we
531 need to handle 'is_ours_for_output -> is_ours' too. Careful
532 to never transition from 'is_ours' to 'is_ours_for_output',
533 though. */
534 if (inf->terminal_state != target_terminal_state::is_ours
535 && inf->terminal_state != desired_state)
536 {
537 set_current_inferior (inf);
538 if (desired_state == target_terminal_state::is_ours)
539 current_top_target ()->terminal_ours ();
540 else if (desired_state == target_terminal_state::is_ours_for_output)
541 current_top_target ()->terminal_ours_for_output ();
542 else
543 gdb_assert_not_reached ("unhandled desired state");
544 inf->terminal_state = desired_state;
545 }
546 }
547 }
548
549 /* See target/target.h. */
550
551 void
552 target_terminal::ours ()
553 {
554 struct ui *ui = current_ui;
555
556 /* See target_terminal::inferior. */
557 if (ui != main_ui)
558 return;
559
560 if (m_terminal_state == target_terminal_state::is_ours)
561 return;
562
563 target_terminal_is_ours_kind (target_terminal_state::is_ours);
564 m_terminal_state = target_terminal_state::is_ours;
565 }
566
567 /* See target/target.h. */
568
569 void
570 target_terminal::ours_for_output ()
571 {
572 struct ui *ui = current_ui;
573
574 /* See target_terminal::inferior. */
575 if (ui != main_ui)
576 return;
577
578 if (!target_terminal::is_inferior ())
579 return;
580
581 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
582 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
583 }
584
585 /* See target/target.h. */
586
587 void
588 target_terminal::info (const char *arg, int from_tty)
589 {
590 current_top_target ()->terminal_info (arg, from_tty);
591 }
592
593 /* See target.h. */
594
595 int
596 target_supports_terminal_ours (void)
597 {
598 return current_top_target ()->supports_terminal_ours ();
599 }
600
601 static void
602 tcomplain (void)
603 {
604 error (_("You can't do that when your target is `%s'"),
605 current_top_target ()->shortname ());
606 }
607
608 void
609 noprocess (void)
610 {
611 error (_("You can't do that without a process to debug."));
612 }
613
614 static void
615 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
616 {
617 printf_unfiltered (_("No saved terminal information.\n"));
618 }
619
620 /* A default implementation for the to_get_ada_task_ptid target method.
621
622 This function builds the PTID by using both LWP and TID as part of
623 the PTID lwp and tid elements. The pid used is the pid of the
624 inferior_ptid. */
625
626 static ptid_t
627 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
628 {
629 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
630 }
631
632 static enum exec_direction_kind
633 default_execution_direction (struct target_ops *self)
634 {
635 if (!target_can_execute_reverse)
636 return EXEC_FORWARD;
637 else if (!target_can_async_p ())
638 return EXEC_FORWARD;
639 else
640 gdb_assert_not_reached ("\
641 to_execution_direction must be implemented for reverse async");
642 }
643
644 /* Push a new target type into the stack of the existing target accessors,
645 possibly superseding some of the existing accessors.
646
647 Rather than allow an empty stack, we always have the dummy target at
648 the bottom stratum, so we can call the function vectors without
649 checking them. */
650
651 void
652 push_target (struct target_ops *t)
653 {
654 struct target_ops **cur;
655
656 /* Find the proper stratum to install this target in. */
657 for (cur = &g_current_top_target; (*cur) != NULL; cur = &(*cur)->beneath)
658 {
659 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
660 break;
661 }
662
663 /* If there's already targets at this stratum, remove them. */
664 /* FIXME: cagney/2003-10-15: I think this should be popping all
665 targets to CUR, and not just those at this stratum level. */
666 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
667 {
668 /* There's already something at this stratum level. Close it,
669 and un-hook it from the stack. */
670 struct target_ops *tmp = (*cur);
671
672 (*cur) = (*cur)->beneath;
673 tmp->beneath = NULL;
674 target_close (tmp);
675 }
676
677 /* We have removed all targets in our stratum, now add the new one. */
678 t->beneath = (*cur);
679 (*cur) = t;
680 }
681
682 /* Remove a target_ops vector from the stack, wherever it may be.
683 Return how many times it was removed (0 or 1). */
684
685 int
686 unpush_target (struct target_ops *t)
687 {
688 struct target_ops **cur;
689 struct target_ops *tmp;
690
691 if (t->to_stratum == dummy_stratum)
692 internal_error (__FILE__, __LINE__,
693 _("Attempt to unpush the dummy target"));
694
695 /* Look for the specified target. Note that we assume that a target
696 can only occur once in the target stack. */
697
698 for (cur = &g_current_top_target; (*cur) != NULL; cur = &(*cur)->beneath)
699 {
700 if ((*cur) == t)
701 break;
702 }
703
704 /* If we don't find target_ops, quit. Only open targets should be
705 closed. */
706 if ((*cur) == NULL)
707 return 0;
708
709 /* Unchain the target. */
710 tmp = (*cur);
711 (*cur) = (*cur)->beneath;
712 tmp->beneath = NULL;
713
714 /* Finally close the target. Note we do this after unchaining, so
715 any target method calls from within the target_close
716 implementation don't end up in T anymore. */
717 target_close (t);
718
719 return 1;
720 }
721
722 /* Unpush TARGET and assert that it worked. */
723
724 static void
725 unpush_target_and_assert (struct target_ops *target)
726 {
727 if (!unpush_target (target))
728 {
729 fprintf_unfiltered (gdb_stderr,
730 "pop_all_targets couldn't find target %s\n",
731 target->shortname ());
732 internal_error (__FILE__, __LINE__,
733 _("failed internal consistency check"));
734 }
735 }
736
737 void
738 pop_all_targets_above (enum strata above_stratum)
739 {
740 while ((int) (current_top_target ()->to_stratum) > (int) above_stratum)
741 unpush_target_and_assert (current_top_target ());
742 }
743
744 /* See target.h. */
745
746 void
747 pop_all_targets_at_and_above (enum strata stratum)
748 {
749 while ((int) (current_top_target ()->to_stratum) >= (int) stratum)
750 unpush_target_and_assert (current_top_target ());
751 }
752
753 void
754 pop_all_targets (void)
755 {
756 pop_all_targets_above (dummy_stratum);
757 }
758
759 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
760
761 int
762 target_is_pushed (struct target_ops *t)
763 {
764 struct target_ops *cur;
765
766 for (cur = current_top_target (); cur != NULL; cur = cur->beneath)
767 if (cur == t)
768 return 1;
769
770 return 0;
771 }
772
773 /* Default implementation of to_get_thread_local_address. */
774
775 static void
776 generic_tls_error (void)
777 {
778 throw_error (TLS_GENERIC_ERROR,
779 _("Cannot find thread-local variables on this target"));
780 }
781
782 /* Using the objfile specified in OBJFILE, find the address for the
783 current thread's thread-local storage with offset OFFSET. */
784 CORE_ADDR
785 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
786 {
787 volatile CORE_ADDR addr = 0;
788 struct target_ops *target = current_top_target ();
789
790 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
791 {
792 ptid_t ptid = inferior_ptid;
793
794 TRY
795 {
796 CORE_ADDR lm_addr;
797
798 /* Fetch the load module address for this objfile. */
799 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
800 objfile);
801
802 addr = target->get_thread_local_address (ptid, lm_addr, offset);
803 }
804 /* If an error occurred, print TLS related messages here. Otherwise,
805 throw the error to some higher catcher. */
806 CATCH (ex, RETURN_MASK_ALL)
807 {
808 int objfile_is_library = (objfile->flags & OBJF_SHARED);
809
810 switch (ex.error)
811 {
812 case TLS_NO_LIBRARY_SUPPORT_ERROR:
813 error (_("Cannot find thread-local variables "
814 "in this thread library."));
815 break;
816 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
817 if (objfile_is_library)
818 error (_("Cannot find shared library `%s' in dynamic"
819 " linker's load module list"), objfile_name (objfile));
820 else
821 error (_("Cannot find executable file `%s' in dynamic"
822 " linker's load module list"), objfile_name (objfile));
823 break;
824 case TLS_NOT_ALLOCATED_YET_ERROR:
825 if (objfile_is_library)
826 error (_("The inferior has not yet allocated storage for"
827 " thread-local variables in\n"
828 "the shared library `%s'\n"
829 "for %s"),
830 objfile_name (objfile), target_pid_to_str (ptid));
831 else
832 error (_("The inferior has not yet allocated storage for"
833 " thread-local variables in\n"
834 "the executable `%s'\n"
835 "for %s"),
836 objfile_name (objfile), target_pid_to_str (ptid));
837 break;
838 case TLS_GENERIC_ERROR:
839 if (objfile_is_library)
840 error (_("Cannot find thread-local storage for %s, "
841 "shared library %s:\n%s"),
842 target_pid_to_str (ptid),
843 objfile_name (objfile), ex.message);
844 else
845 error (_("Cannot find thread-local storage for %s, "
846 "executable file %s:\n%s"),
847 target_pid_to_str (ptid),
848 objfile_name (objfile), ex.message);
849 break;
850 default:
851 throw_exception (ex);
852 break;
853 }
854 }
855 END_CATCH
856 }
857 /* It wouldn't be wrong here to try a gdbarch method, too; finding
858 TLS is an ABI-specific thing. But we don't do that yet. */
859 else
860 error (_("Cannot find thread-local variables on this target"));
861
862 return addr;
863 }
864
865 const char *
866 target_xfer_status_to_string (enum target_xfer_status status)
867 {
868 #define CASE(X) case X: return #X
869 switch (status)
870 {
871 CASE(TARGET_XFER_E_IO);
872 CASE(TARGET_XFER_UNAVAILABLE);
873 default:
874 return "<unknown>";
875 }
876 #undef CASE
877 };
878
879
880 #undef MIN
881 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
882
883 /* target_read_string -- read a null terminated string, up to LEN bytes,
884 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
885 Set *STRING to a pointer to malloc'd memory containing the data; the caller
886 is responsible for freeing it. Return the number of bytes successfully
887 read. */
888
889 int
890 target_read_string (CORE_ADDR memaddr, gdb::unique_xmalloc_ptr<char> *string,
891 int len, int *errnop)
892 {
893 int tlen, offset, i;
894 gdb_byte buf[4];
895 int errcode = 0;
896 char *buffer;
897 int buffer_allocated;
898 char *bufptr;
899 unsigned int nbytes_read = 0;
900
901 gdb_assert (string);
902
903 /* Small for testing. */
904 buffer_allocated = 4;
905 buffer = (char *) xmalloc (buffer_allocated);
906 bufptr = buffer;
907
908 while (len > 0)
909 {
910 tlen = MIN (len, 4 - (memaddr & 3));
911 offset = memaddr & 3;
912
913 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
914 if (errcode != 0)
915 {
916 /* The transfer request might have crossed the boundary to an
917 unallocated region of memory. Retry the transfer, requesting
918 a single byte. */
919 tlen = 1;
920 offset = 0;
921 errcode = target_read_memory (memaddr, buf, 1);
922 if (errcode != 0)
923 goto done;
924 }
925
926 if (bufptr - buffer + tlen > buffer_allocated)
927 {
928 unsigned int bytes;
929
930 bytes = bufptr - buffer;
931 buffer_allocated *= 2;
932 buffer = (char *) xrealloc (buffer, buffer_allocated);
933 bufptr = buffer + bytes;
934 }
935
936 for (i = 0; i < tlen; i++)
937 {
938 *bufptr++ = buf[i + offset];
939 if (buf[i + offset] == '\000')
940 {
941 nbytes_read += i + 1;
942 goto done;
943 }
944 }
945
946 memaddr += tlen;
947 len -= tlen;
948 nbytes_read += tlen;
949 }
950 done:
951 string->reset (buffer);
952 if (errnop != NULL)
953 *errnop = errcode;
954 return nbytes_read;
955 }
956
957 struct target_section_table *
958 target_get_section_table (struct target_ops *target)
959 {
960 return target->get_section_table ();
961 }
962
963 /* Find a section containing ADDR. */
964
965 struct target_section *
966 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
967 {
968 struct target_section_table *table = target_get_section_table (target);
969 struct target_section *secp;
970
971 if (table == NULL)
972 return NULL;
973
974 for (secp = table->sections; secp < table->sections_end; secp++)
975 {
976 if (addr >= secp->addr && addr < secp->endaddr)
977 return secp;
978 }
979 return NULL;
980 }
981
982
983 /* Helper for the memory xfer routines. Checks the attributes of the
984 memory region of MEMADDR against the read or write being attempted.
985 If the access is permitted returns true, otherwise returns false.
986 REGION_P is an optional output parameter. If not-NULL, it is
987 filled with a pointer to the memory region of MEMADDR. REG_LEN
988 returns LEN trimmed to the end of the region. This is how much the
989 caller can continue requesting, if the access is permitted. A
990 single xfer request must not straddle memory region boundaries. */
991
992 static int
993 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
994 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
995 struct mem_region **region_p)
996 {
997 struct mem_region *region;
998
999 region = lookup_mem_region (memaddr);
1000
1001 if (region_p != NULL)
1002 *region_p = region;
1003
1004 switch (region->attrib.mode)
1005 {
1006 case MEM_RO:
1007 if (writebuf != NULL)
1008 return 0;
1009 break;
1010
1011 case MEM_WO:
1012 if (readbuf != NULL)
1013 return 0;
1014 break;
1015
1016 case MEM_FLASH:
1017 /* We only support writing to flash during "load" for now. */
1018 if (writebuf != NULL)
1019 error (_("Writing to flash memory forbidden in this context"));
1020 break;
1021
1022 case MEM_NONE:
1023 return 0;
1024 }
1025
1026 /* region->hi == 0 means there's no upper bound. */
1027 if (memaddr + len < region->hi || region->hi == 0)
1028 *reg_len = len;
1029 else
1030 *reg_len = region->hi - memaddr;
1031
1032 return 1;
1033 }
1034
1035 /* Read memory from more than one valid target. A core file, for
1036 instance, could have some of memory but delegate other bits to
1037 the target below it. So, we must manually try all targets. */
1038
1039 enum target_xfer_status
1040 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1041 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1042 ULONGEST *xfered_len)
1043 {
1044 enum target_xfer_status res;
1045
1046 do
1047 {
1048 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1049 readbuf, writebuf, memaddr, len,
1050 xfered_len);
1051 if (res == TARGET_XFER_OK)
1052 break;
1053
1054 /* Stop if the target reports that the memory is not available. */
1055 if (res == TARGET_XFER_UNAVAILABLE)
1056 break;
1057
1058 /* We want to continue past core files to executables, but not
1059 past a running target's memory. */
1060 if (ops->has_all_memory ())
1061 break;
1062
1063 ops = ops->beneath;
1064 }
1065 while (ops != NULL);
1066
1067 /* The cache works at the raw memory level. Make sure the cache
1068 gets updated with raw contents no matter what kind of memory
1069 object was originally being written. Note we do write-through
1070 first, so that if it fails, we don't write to the cache contents
1071 that never made it to the target. */
1072 if (writebuf != NULL
1073 && !ptid_equal (inferior_ptid, null_ptid)
1074 && target_dcache_init_p ()
1075 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1076 {
1077 DCACHE *dcache = target_dcache_get ();
1078
1079 /* Note that writing to an area of memory which wasn't present
1080 in the cache doesn't cause it to be loaded in. */
1081 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1082 }
1083
1084 return res;
1085 }
1086
1087 /* Perform a partial memory transfer.
1088 For docs see target.h, to_xfer_partial. */
1089
1090 static enum target_xfer_status
1091 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1092 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1093 ULONGEST len, ULONGEST *xfered_len)
1094 {
1095 enum target_xfer_status res;
1096 ULONGEST reg_len;
1097 struct mem_region *region;
1098 struct inferior *inf;
1099
1100 /* For accesses to unmapped overlay sections, read directly from
1101 files. Must do this first, as MEMADDR may need adjustment. */
1102 if (readbuf != NULL && overlay_debugging)
1103 {
1104 struct obj_section *section = find_pc_overlay (memaddr);
1105
1106 if (pc_in_unmapped_range (memaddr, section))
1107 {
1108 struct target_section_table *table
1109 = target_get_section_table (ops);
1110 const char *section_name = section->the_bfd_section->name;
1111
1112 memaddr = overlay_mapped_address (memaddr, section);
1113 return section_table_xfer_memory_partial (readbuf, writebuf,
1114 memaddr, len, xfered_len,
1115 table->sections,
1116 table->sections_end,
1117 section_name);
1118 }
1119 }
1120
1121 /* Try the executable files, if "trust-readonly-sections" is set. */
1122 if (readbuf != NULL && trust_readonly)
1123 {
1124 struct target_section *secp;
1125 struct target_section_table *table;
1126
1127 secp = target_section_by_addr (ops, memaddr);
1128 if (secp != NULL
1129 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1130 secp->the_bfd_section)
1131 & SEC_READONLY))
1132 {
1133 table = target_get_section_table (ops);
1134 return section_table_xfer_memory_partial (readbuf, writebuf,
1135 memaddr, len, xfered_len,
1136 table->sections,
1137 table->sections_end,
1138 NULL);
1139 }
1140 }
1141
1142 /* Try GDB's internal data cache. */
1143
1144 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1145 &region))
1146 return TARGET_XFER_E_IO;
1147
1148 if (!ptid_equal (inferior_ptid, null_ptid))
1149 inf = find_inferior_ptid (inferior_ptid);
1150 else
1151 inf = NULL;
1152
1153 if (inf != NULL
1154 && readbuf != NULL
1155 /* The dcache reads whole cache lines; that doesn't play well
1156 with reading from a trace buffer, because reading outside of
1157 the collected memory range fails. */
1158 && get_traceframe_number () == -1
1159 && (region->attrib.cache
1160 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1161 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1162 {
1163 DCACHE *dcache = target_dcache_get_or_init ();
1164
1165 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1166 reg_len, xfered_len);
1167 }
1168
1169 /* If none of those methods found the memory we wanted, fall back
1170 to a target partial transfer. Normally a single call to
1171 to_xfer_partial is enough; if it doesn't recognize an object
1172 it will call the to_xfer_partial of the next target down.
1173 But for memory this won't do. Memory is the only target
1174 object which can be read from more than one valid target.
1175 A core file, for instance, could have some of memory but
1176 delegate other bits to the target below it. So, we must
1177 manually try all targets. */
1178
1179 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1180 xfered_len);
1181
1182 /* If we still haven't got anything, return the last error. We
1183 give up. */
1184 return res;
1185 }
1186
1187 /* Perform a partial memory transfer. For docs see target.h,
1188 to_xfer_partial. */
1189
1190 static enum target_xfer_status
1191 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1192 gdb_byte *readbuf, const gdb_byte *writebuf,
1193 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1194 {
1195 enum target_xfer_status res;
1196
1197 /* Zero length requests are ok and require no work. */
1198 if (len == 0)
1199 return TARGET_XFER_EOF;
1200
1201 memaddr = address_significant (target_gdbarch (), memaddr);
1202
1203 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1204 breakpoint insns, thus hiding out from higher layers whether
1205 there are software breakpoints inserted in the code stream. */
1206 if (readbuf != NULL)
1207 {
1208 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1209 xfered_len);
1210
1211 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1212 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1213 }
1214 else
1215 {
1216 /* A large write request is likely to be partially satisfied
1217 by memory_xfer_partial_1. We will continually malloc
1218 and free a copy of the entire write request for breakpoint
1219 shadow handling even though we only end up writing a small
1220 subset of it. Cap writes to a limit specified by the target
1221 to mitigate this. */
1222 len = std::min (ops->get_memory_xfer_limit (), len);
1223
1224 gdb::byte_vector buf (writebuf, writebuf + len);
1225 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1226 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1227 xfered_len);
1228 }
1229
1230 return res;
1231 }
1232
1233 scoped_restore_tmpl<int>
1234 make_scoped_restore_show_memory_breakpoints (int show)
1235 {
1236 return make_scoped_restore (&show_memory_breakpoints, show);
1237 }
1238
1239 /* For docs see target.h, to_xfer_partial. */
1240
1241 enum target_xfer_status
1242 target_xfer_partial (struct target_ops *ops,
1243 enum target_object object, const char *annex,
1244 gdb_byte *readbuf, const gdb_byte *writebuf,
1245 ULONGEST offset, ULONGEST len,
1246 ULONGEST *xfered_len)
1247 {
1248 enum target_xfer_status retval;
1249
1250 /* Transfer is done when LEN is zero. */
1251 if (len == 0)
1252 return TARGET_XFER_EOF;
1253
1254 if (writebuf && !may_write_memory)
1255 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1256 core_addr_to_string_nz (offset), plongest (len));
1257
1258 *xfered_len = 0;
1259
1260 /* If this is a memory transfer, let the memory-specific code
1261 have a look at it instead. Memory transfers are more
1262 complicated. */
1263 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1264 || object == TARGET_OBJECT_CODE_MEMORY)
1265 retval = memory_xfer_partial (ops, object, readbuf,
1266 writebuf, offset, len, xfered_len);
1267 else if (object == TARGET_OBJECT_RAW_MEMORY)
1268 {
1269 /* Skip/avoid accessing the target if the memory region
1270 attributes block the access. Check this here instead of in
1271 raw_memory_xfer_partial as otherwise we'd end up checking
1272 this twice in the case of the memory_xfer_partial path is
1273 taken; once before checking the dcache, and another in the
1274 tail call to raw_memory_xfer_partial. */
1275 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1276 NULL))
1277 return TARGET_XFER_E_IO;
1278
1279 /* Request the normal memory object from other layers. */
1280 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1281 xfered_len);
1282 }
1283 else
1284 retval = ops->xfer_partial (object, annex, readbuf,
1285 writebuf, offset, len, xfered_len);
1286
1287 if (targetdebug)
1288 {
1289 const unsigned char *myaddr = NULL;
1290
1291 fprintf_unfiltered (gdb_stdlog,
1292 "%s:target_xfer_partial "
1293 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1294 ops->shortname (),
1295 (int) object,
1296 (annex ? annex : "(null)"),
1297 host_address_to_string (readbuf),
1298 host_address_to_string (writebuf),
1299 core_addr_to_string_nz (offset),
1300 pulongest (len), retval,
1301 pulongest (*xfered_len));
1302
1303 if (readbuf)
1304 myaddr = readbuf;
1305 if (writebuf)
1306 myaddr = writebuf;
1307 if (retval == TARGET_XFER_OK && myaddr != NULL)
1308 {
1309 int i;
1310
1311 fputs_unfiltered (", bytes =", gdb_stdlog);
1312 for (i = 0; i < *xfered_len; i++)
1313 {
1314 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1315 {
1316 if (targetdebug < 2 && i > 0)
1317 {
1318 fprintf_unfiltered (gdb_stdlog, " ...");
1319 break;
1320 }
1321 fprintf_unfiltered (gdb_stdlog, "\n");
1322 }
1323
1324 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1325 }
1326 }
1327
1328 fputc_unfiltered ('\n', gdb_stdlog);
1329 }
1330
1331 /* Check implementations of to_xfer_partial update *XFERED_LEN
1332 properly. Do assertion after printing debug messages, so that we
1333 can find more clues on assertion failure from debugging messages. */
1334 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1335 gdb_assert (*xfered_len > 0);
1336
1337 return retval;
1338 }
1339
1340 /* Read LEN bytes of target memory at address MEMADDR, placing the
1341 results in GDB's memory at MYADDR. Returns either 0 for success or
1342 -1 if any error occurs.
1343
1344 If an error occurs, no guarantee is made about the contents of the data at
1345 MYADDR. In particular, the caller should not depend upon partial reads
1346 filling the buffer with good data. There is no way for the caller to know
1347 how much good data might have been transfered anyway. Callers that can
1348 deal with partial reads should call target_read (which will retry until
1349 it makes no progress, and then return how much was transferred). */
1350
1351 int
1352 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1353 {
1354 if (target_read (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1355 myaddr, memaddr, len) == len)
1356 return 0;
1357 else
1358 return -1;
1359 }
1360
1361 /* See target/target.h. */
1362
1363 int
1364 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1365 {
1366 gdb_byte buf[4];
1367 int r;
1368
1369 r = target_read_memory (memaddr, buf, sizeof buf);
1370 if (r != 0)
1371 return r;
1372 *result = extract_unsigned_integer (buf, sizeof buf,
1373 gdbarch_byte_order (target_gdbarch ()));
1374 return 0;
1375 }
1376
1377 /* Like target_read_memory, but specify explicitly that this is a read
1378 from the target's raw memory. That is, this read bypasses the
1379 dcache, breakpoint shadowing, etc. */
1380
1381 int
1382 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1383 {
1384 if (target_read (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1385 myaddr, memaddr, len) == len)
1386 return 0;
1387 else
1388 return -1;
1389 }
1390
1391 /* Like target_read_memory, but specify explicitly that this is a read from
1392 the target's stack. This may trigger different cache behavior. */
1393
1394 int
1395 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1396 {
1397 if (target_read (current_top_target (), TARGET_OBJECT_STACK_MEMORY, NULL,
1398 myaddr, memaddr, len) == len)
1399 return 0;
1400 else
1401 return -1;
1402 }
1403
1404 /* Like target_read_memory, but specify explicitly that this is a read from
1405 the target's code. This may trigger different cache behavior. */
1406
1407 int
1408 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1409 {
1410 if (target_read (current_top_target (), TARGET_OBJECT_CODE_MEMORY, NULL,
1411 myaddr, memaddr, len) == len)
1412 return 0;
1413 else
1414 return -1;
1415 }
1416
1417 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1418 Returns either 0 for success or -1 if any error occurs. If an
1419 error occurs, no guarantee is made about how much data got written.
1420 Callers that can deal with partial writes should call
1421 target_write. */
1422
1423 int
1424 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1425 {
1426 if (target_write (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1427 myaddr, memaddr, len) == len)
1428 return 0;
1429 else
1430 return -1;
1431 }
1432
1433 /* Write LEN bytes from MYADDR to target raw memory at address
1434 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1435 If an error occurs, no guarantee is made about how much data got
1436 written. Callers that can deal with partial writes should call
1437 target_write. */
1438
1439 int
1440 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1441 {
1442 if (target_write (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1443 myaddr, memaddr, len) == len)
1444 return 0;
1445 else
1446 return -1;
1447 }
1448
1449 /* Fetch the target's memory map. */
1450
1451 std::vector<mem_region>
1452 target_memory_map (void)
1453 {
1454 std::vector<mem_region> result = current_top_target ()->memory_map ();
1455 if (result.empty ())
1456 return result;
1457
1458 std::sort (result.begin (), result.end ());
1459
1460 /* Check that regions do not overlap. Simultaneously assign
1461 a numbering for the "mem" commands to use to refer to
1462 each region. */
1463 mem_region *last_one = NULL;
1464 for (size_t ix = 0; ix < result.size (); ix++)
1465 {
1466 mem_region *this_one = &result[ix];
1467 this_one->number = ix;
1468
1469 if (last_one != NULL && last_one->hi > this_one->lo)
1470 {
1471 warning (_("Overlapping regions in memory map: ignoring"));
1472 return std::vector<mem_region> ();
1473 }
1474
1475 last_one = this_one;
1476 }
1477
1478 return result;
1479 }
1480
1481 void
1482 target_flash_erase (ULONGEST address, LONGEST length)
1483 {
1484 current_top_target ()->flash_erase (address, length);
1485 }
1486
1487 void
1488 target_flash_done (void)
1489 {
1490 current_top_target ()->flash_done ();
1491 }
1492
1493 static void
1494 show_trust_readonly (struct ui_file *file, int from_tty,
1495 struct cmd_list_element *c, const char *value)
1496 {
1497 fprintf_filtered (file,
1498 _("Mode for reading from readonly sections is %s.\n"),
1499 value);
1500 }
1501
1502 /* Target vector read/write partial wrapper functions. */
1503
1504 static enum target_xfer_status
1505 target_read_partial (struct target_ops *ops,
1506 enum target_object object,
1507 const char *annex, gdb_byte *buf,
1508 ULONGEST offset, ULONGEST len,
1509 ULONGEST *xfered_len)
1510 {
1511 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1512 xfered_len);
1513 }
1514
1515 static enum target_xfer_status
1516 target_write_partial (struct target_ops *ops,
1517 enum target_object object,
1518 const char *annex, const gdb_byte *buf,
1519 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1520 {
1521 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1522 xfered_len);
1523 }
1524
1525 /* Wrappers to perform the full transfer. */
1526
1527 /* For docs on target_read see target.h. */
1528
1529 LONGEST
1530 target_read (struct target_ops *ops,
1531 enum target_object object,
1532 const char *annex, gdb_byte *buf,
1533 ULONGEST offset, LONGEST len)
1534 {
1535 LONGEST xfered_total = 0;
1536 int unit_size = 1;
1537
1538 /* If we are reading from a memory object, find the length of an addressable
1539 unit for that architecture. */
1540 if (object == TARGET_OBJECT_MEMORY
1541 || object == TARGET_OBJECT_STACK_MEMORY
1542 || object == TARGET_OBJECT_CODE_MEMORY
1543 || object == TARGET_OBJECT_RAW_MEMORY)
1544 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1545
1546 while (xfered_total < len)
1547 {
1548 ULONGEST xfered_partial;
1549 enum target_xfer_status status;
1550
1551 status = target_read_partial (ops, object, annex,
1552 buf + xfered_total * unit_size,
1553 offset + xfered_total, len - xfered_total,
1554 &xfered_partial);
1555
1556 /* Call an observer, notifying them of the xfer progress? */
1557 if (status == TARGET_XFER_EOF)
1558 return xfered_total;
1559 else if (status == TARGET_XFER_OK)
1560 {
1561 xfered_total += xfered_partial;
1562 QUIT;
1563 }
1564 else
1565 return TARGET_XFER_E_IO;
1566
1567 }
1568 return len;
1569 }
1570
1571 /* Assuming that the entire [begin, end) range of memory cannot be
1572 read, try to read whatever subrange is possible to read.
1573
1574 The function returns, in RESULT, either zero or one memory block.
1575 If there's a readable subrange at the beginning, it is completely
1576 read and returned. Any further readable subrange will not be read.
1577 Otherwise, if there's a readable subrange at the end, it will be
1578 completely read and returned. Any readable subranges before it
1579 (obviously, not starting at the beginning), will be ignored. In
1580 other cases -- either no readable subrange, or readable subrange(s)
1581 that is neither at the beginning, or end, nothing is returned.
1582
1583 The purpose of this function is to handle a read across a boundary
1584 of accessible memory in a case when memory map is not available.
1585 The above restrictions are fine for this case, but will give
1586 incorrect results if the memory is 'patchy'. However, supporting
1587 'patchy' memory would require trying to read every single byte,
1588 and it seems unacceptable solution. Explicit memory map is
1589 recommended for this case -- and target_read_memory_robust will
1590 take care of reading multiple ranges then. */
1591
1592 static void
1593 read_whatever_is_readable (struct target_ops *ops,
1594 const ULONGEST begin, const ULONGEST end,
1595 int unit_size,
1596 std::vector<memory_read_result> *result)
1597 {
1598 ULONGEST current_begin = begin;
1599 ULONGEST current_end = end;
1600 int forward;
1601 ULONGEST xfered_len;
1602
1603 /* If we previously failed to read 1 byte, nothing can be done here. */
1604 if (end - begin <= 1)
1605 return;
1606
1607 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
1608
1609 /* Check that either first or the last byte is readable, and give up
1610 if not. This heuristic is meant to permit reading accessible memory
1611 at the boundary of accessible region. */
1612 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1613 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
1614 {
1615 forward = 1;
1616 ++current_begin;
1617 }
1618 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1619 buf.get () + (end - begin) - 1, end - 1, 1,
1620 &xfered_len) == TARGET_XFER_OK)
1621 {
1622 forward = 0;
1623 --current_end;
1624 }
1625 else
1626 return;
1627
1628 /* Loop invariant is that the [current_begin, current_end) was previously
1629 found to be not readable as a whole.
1630
1631 Note loop condition -- if the range has 1 byte, we can't divide the range
1632 so there's no point trying further. */
1633 while (current_end - current_begin > 1)
1634 {
1635 ULONGEST first_half_begin, first_half_end;
1636 ULONGEST second_half_begin, second_half_end;
1637 LONGEST xfer;
1638 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
1639
1640 if (forward)
1641 {
1642 first_half_begin = current_begin;
1643 first_half_end = middle;
1644 second_half_begin = middle;
1645 second_half_end = current_end;
1646 }
1647 else
1648 {
1649 first_half_begin = middle;
1650 first_half_end = current_end;
1651 second_half_begin = current_begin;
1652 second_half_end = middle;
1653 }
1654
1655 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1656 buf.get () + (first_half_begin - begin) * unit_size,
1657 first_half_begin,
1658 first_half_end - first_half_begin);
1659
1660 if (xfer == first_half_end - first_half_begin)
1661 {
1662 /* This half reads up fine. So, the error must be in the
1663 other half. */
1664 current_begin = second_half_begin;
1665 current_end = second_half_end;
1666 }
1667 else
1668 {
1669 /* This half is not readable. Because we've tried one byte, we
1670 know some part of this half if actually readable. Go to the next
1671 iteration to divide again and try to read.
1672
1673 We don't handle the other half, because this function only tries
1674 to read a single readable subrange. */
1675 current_begin = first_half_begin;
1676 current_end = first_half_end;
1677 }
1678 }
1679
1680 if (forward)
1681 {
1682 /* The [begin, current_begin) range has been read. */
1683 result->emplace_back (begin, current_end, std::move (buf));
1684 }
1685 else
1686 {
1687 /* The [current_end, end) range has been read. */
1688 LONGEST region_len = end - current_end;
1689
1690 gdb::unique_xmalloc_ptr<gdb_byte> data
1691 ((gdb_byte *) xmalloc (region_len * unit_size));
1692 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
1693 region_len * unit_size);
1694 result->emplace_back (current_end, end, std::move (data));
1695 }
1696 }
1697
1698 std::vector<memory_read_result>
1699 read_memory_robust (struct target_ops *ops,
1700 const ULONGEST offset, const LONGEST len)
1701 {
1702 std::vector<memory_read_result> result;
1703 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1704
1705 LONGEST xfered_total = 0;
1706 while (xfered_total < len)
1707 {
1708 struct mem_region *region = lookup_mem_region (offset + xfered_total);
1709 LONGEST region_len;
1710
1711 /* If there is no explicit region, a fake one should be created. */
1712 gdb_assert (region);
1713
1714 if (region->hi == 0)
1715 region_len = len - xfered_total;
1716 else
1717 region_len = region->hi - offset;
1718
1719 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1720 {
1721 /* Cannot read this region. Note that we can end up here only
1722 if the region is explicitly marked inaccessible, or
1723 'inaccessible-by-default' is in effect. */
1724 xfered_total += region_len;
1725 }
1726 else
1727 {
1728 LONGEST to_read = std::min (len - xfered_total, region_len);
1729 gdb::unique_xmalloc_ptr<gdb_byte> buffer
1730 ((gdb_byte *) xmalloc (to_read * unit_size));
1731
1732 LONGEST xfered_partial =
1733 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
1734 offset + xfered_total, to_read);
1735 /* Call an observer, notifying them of the xfer progress? */
1736 if (xfered_partial <= 0)
1737 {
1738 /* Got an error reading full chunk. See if maybe we can read
1739 some subrange. */
1740 read_whatever_is_readable (ops, offset + xfered_total,
1741 offset + xfered_total + to_read,
1742 unit_size, &result);
1743 xfered_total += to_read;
1744 }
1745 else
1746 {
1747 result.emplace_back (offset + xfered_total,
1748 offset + xfered_total + xfered_partial,
1749 std::move (buffer));
1750 xfered_total += xfered_partial;
1751 }
1752 QUIT;
1753 }
1754 }
1755
1756 return result;
1757 }
1758
1759
1760 /* An alternative to target_write with progress callbacks. */
1761
1762 LONGEST
1763 target_write_with_progress (struct target_ops *ops,
1764 enum target_object object,
1765 const char *annex, const gdb_byte *buf,
1766 ULONGEST offset, LONGEST len,
1767 void (*progress) (ULONGEST, void *), void *baton)
1768 {
1769 LONGEST xfered_total = 0;
1770 int unit_size = 1;
1771
1772 /* If we are writing to a memory object, find the length of an addressable
1773 unit for that architecture. */
1774 if (object == TARGET_OBJECT_MEMORY
1775 || object == TARGET_OBJECT_STACK_MEMORY
1776 || object == TARGET_OBJECT_CODE_MEMORY
1777 || object == TARGET_OBJECT_RAW_MEMORY)
1778 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1779
1780 /* Give the progress callback a chance to set up. */
1781 if (progress)
1782 (*progress) (0, baton);
1783
1784 while (xfered_total < len)
1785 {
1786 ULONGEST xfered_partial;
1787 enum target_xfer_status status;
1788
1789 status = target_write_partial (ops, object, annex,
1790 buf + xfered_total * unit_size,
1791 offset + xfered_total, len - xfered_total,
1792 &xfered_partial);
1793
1794 if (status != TARGET_XFER_OK)
1795 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
1796
1797 if (progress)
1798 (*progress) (xfered_partial, baton);
1799
1800 xfered_total += xfered_partial;
1801 QUIT;
1802 }
1803 return len;
1804 }
1805
1806 /* For docs on target_write see target.h. */
1807
1808 LONGEST
1809 target_write (struct target_ops *ops,
1810 enum target_object object,
1811 const char *annex, const gdb_byte *buf,
1812 ULONGEST offset, LONGEST len)
1813 {
1814 return target_write_with_progress (ops, object, annex, buf, offset, len,
1815 NULL, NULL);
1816 }
1817
1818 /* Help for target_read_alloc and target_read_stralloc. See their comments
1819 for details. */
1820
1821 template <typename T>
1822 gdb::optional<gdb::def_vector<T>>
1823 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1824 const char *annex)
1825 {
1826 gdb::def_vector<T> buf;
1827 size_t buf_pos = 0;
1828 const int chunk = 4096;
1829
1830 /* This function does not have a length parameter; it reads the
1831 entire OBJECT). Also, it doesn't support objects fetched partly
1832 from one target and partly from another (in a different stratum,
1833 e.g. a core file and an executable). Both reasons make it
1834 unsuitable for reading memory. */
1835 gdb_assert (object != TARGET_OBJECT_MEMORY);
1836
1837 /* Start by reading up to 4K at a time. The target will throttle
1838 this number down if necessary. */
1839 while (1)
1840 {
1841 ULONGEST xfered_len;
1842 enum target_xfer_status status;
1843
1844 buf.resize (buf_pos + chunk);
1845
1846 status = target_read_partial (ops, object, annex,
1847 (gdb_byte *) &buf[buf_pos],
1848 buf_pos, chunk,
1849 &xfered_len);
1850
1851 if (status == TARGET_XFER_EOF)
1852 {
1853 /* Read all there was. */
1854 buf.resize (buf_pos);
1855 return buf;
1856 }
1857 else if (status != TARGET_XFER_OK)
1858 {
1859 /* An error occurred. */
1860 return {};
1861 }
1862
1863 buf_pos += xfered_len;
1864
1865 QUIT;
1866 }
1867 }
1868
1869 /* See target.h */
1870
1871 gdb::optional<gdb::byte_vector>
1872 target_read_alloc (struct target_ops *ops, enum target_object object,
1873 const char *annex)
1874 {
1875 return target_read_alloc_1<gdb_byte> (ops, object, annex);
1876 }
1877
1878 /* See target.h. */
1879
1880 gdb::optional<gdb::char_vector>
1881 target_read_stralloc (struct target_ops *ops, enum target_object object,
1882 const char *annex)
1883 {
1884 gdb::optional<gdb::char_vector> buf
1885 = target_read_alloc_1<char> (ops, object, annex);
1886
1887 if (!buf)
1888 return {};
1889
1890 if (buf->back () != '\0')
1891 buf->push_back ('\0');
1892
1893 /* Check for embedded NUL bytes; but allow trailing NULs. */
1894 for (auto it = std::find (buf->begin (), buf->end (), '\0');
1895 it != buf->end (); it++)
1896 if (*it != '\0')
1897 {
1898 warning (_("target object %d, annex %s, "
1899 "contained unexpected null characters"),
1900 (int) object, annex ? annex : "(none)");
1901 break;
1902 }
1903
1904 return buf;
1905 }
1906
1907 /* Memory transfer methods. */
1908
1909 void
1910 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1911 LONGEST len)
1912 {
1913 /* This method is used to read from an alternate, non-current
1914 target. This read must bypass the overlay support (as symbols
1915 don't match this target), and GDB's internal cache (wrong cache
1916 for this target). */
1917 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1918 != len)
1919 memory_error (TARGET_XFER_E_IO, addr);
1920 }
1921
1922 ULONGEST
1923 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1924 int len, enum bfd_endian byte_order)
1925 {
1926 gdb_byte buf[sizeof (ULONGEST)];
1927
1928 gdb_assert (len <= sizeof (buf));
1929 get_target_memory (ops, addr, buf, len);
1930 return extract_unsigned_integer (buf, len, byte_order);
1931 }
1932
1933 /* See target.h. */
1934
1935 int
1936 target_insert_breakpoint (struct gdbarch *gdbarch,
1937 struct bp_target_info *bp_tgt)
1938 {
1939 if (!may_insert_breakpoints)
1940 {
1941 warning (_("May not insert breakpoints"));
1942 return 1;
1943 }
1944
1945 return current_top_target ()->insert_breakpoint (gdbarch, bp_tgt);
1946 }
1947
1948 /* See target.h. */
1949
1950 int
1951 target_remove_breakpoint (struct gdbarch *gdbarch,
1952 struct bp_target_info *bp_tgt,
1953 enum remove_bp_reason reason)
1954 {
1955 /* This is kind of a weird case to handle, but the permission might
1956 have been changed after breakpoints were inserted - in which case
1957 we should just take the user literally and assume that any
1958 breakpoints should be left in place. */
1959 if (!may_insert_breakpoints)
1960 {
1961 warning (_("May not remove breakpoints"));
1962 return 1;
1963 }
1964
1965 return current_top_target ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1966 }
1967
1968 static void
1969 info_target_command (const char *args, int from_tty)
1970 {
1971 struct target_ops *t;
1972 int has_all_mem = 0;
1973
1974 if (symfile_objfile != NULL)
1975 printf_unfiltered (_("Symbols from \"%s\".\n"),
1976 objfile_name (symfile_objfile));
1977
1978 for (t = current_top_target (); t != NULL; t = t->beneath)
1979 {
1980 if (!t->has_memory ())
1981 continue;
1982
1983 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1984 continue;
1985 if (has_all_mem)
1986 printf_unfiltered (_("\tWhile running this, "
1987 "GDB does not access memory from...\n"));
1988 printf_unfiltered ("%s:\n", t->longname ());
1989 t->files_info ();
1990 has_all_mem = t->has_all_memory ();
1991 }
1992 }
1993
1994 /* This function is called before any new inferior is created, e.g.
1995 by running a program, attaching, or connecting to a target.
1996 It cleans up any state from previous invocations which might
1997 change between runs. This is a subset of what target_preopen
1998 resets (things which might change between targets). */
1999
2000 void
2001 target_pre_inferior (int from_tty)
2002 {
2003 /* Clear out solib state. Otherwise the solib state of the previous
2004 inferior might have survived and is entirely wrong for the new
2005 target. This has been observed on GNU/Linux using glibc 2.3. How
2006 to reproduce:
2007
2008 bash$ ./foo&
2009 [1] 4711
2010 bash$ ./foo&
2011 [1] 4712
2012 bash$ gdb ./foo
2013 [...]
2014 (gdb) attach 4711
2015 (gdb) detach
2016 (gdb) attach 4712
2017 Cannot access memory at address 0xdeadbeef
2018 */
2019
2020 /* In some OSs, the shared library list is the same/global/shared
2021 across inferiors. If code is shared between processes, so are
2022 memory regions and features. */
2023 if (!gdbarch_has_global_solist (target_gdbarch ()))
2024 {
2025 no_shared_libraries (NULL, from_tty);
2026
2027 invalidate_target_mem_regions ();
2028
2029 target_clear_description ();
2030 }
2031
2032 /* attach_flag may be set if the previous process associated with
2033 the inferior was attached to. */
2034 current_inferior ()->attach_flag = 0;
2035
2036 current_inferior ()->highest_thread_num = 0;
2037
2038 agent_capability_invalidate ();
2039 }
2040
2041 /* Callback for iterate_over_inferiors. Gets rid of the given
2042 inferior. */
2043
2044 static int
2045 dispose_inferior (struct inferior *inf, void *args)
2046 {
2047 struct thread_info *thread;
2048
2049 thread = any_thread_of_process (inf->pid);
2050 if (thread)
2051 {
2052 switch_to_thread (thread->ptid);
2053
2054 /* Core inferiors actually should be detached, not killed. */
2055 if (target_has_execution)
2056 target_kill ();
2057 else
2058 target_detach (inf, 0);
2059 }
2060
2061 return 0;
2062 }
2063
2064 /* This is to be called by the open routine before it does
2065 anything. */
2066
2067 void
2068 target_preopen (int from_tty)
2069 {
2070 dont_repeat ();
2071
2072 if (have_inferiors ())
2073 {
2074 if (!from_tty
2075 || !have_live_inferiors ()
2076 || query (_("A program is being debugged already. Kill it? ")))
2077 iterate_over_inferiors (dispose_inferior, NULL);
2078 else
2079 error (_("Program not killed."));
2080 }
2081
2082 /* Calling target_kill may remove the target from the stack. But if
2083 it doesn't (which seems like a win for UDI), remove it now. */
2084 /* Leave the exec target, though. The user may be switching from a
2085 live process to a core of the same program. */
2086 pop_all_targets_above (file_stratum);
2087
2088 target_pre_inferior (from_tty);
2089 }
2090
2091 /* See target.h. */
2092
2093 void
2094 target_detach (inferior *inf, int from_tty)
2095 {
2096 /* As long as some to_detach implementations rely on the current_inferior
2097 (either directly, or indirectly, like through target_gdbarch or by
2098 reading memory), INF needs to be the current inferior. When that
2099 requirement will become no longer true, then we can remove this
2100 assertion. */
2101 gdb_assert (inf == current_inferior ());
2102
2103 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2104 /* Don't remove global breakpoints here. They're removed on
2105 disconnection from the target. */
2106 ;
2107 else
2108 /* If we're in breakpoints-always-inserted mode, have to remove
2109 them before detaching. */
2110 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2111
2112 prepare_for_detach ();
2113
2114 current_top_target ()->detach (inf, from_tty);
2115 }
2116
2117 void
2118 target_disconnect (const char *args, int from_tty)
2119 {
2120 /* If we're in breakpoints-always-inserted mode or if breakpoints
2121 are global across processes, we have to remove them before
2122 disconnecting. */
2123 remove_breakpoints ();
2124
2125 current_top_target ()->disconnect (args, from_tty);
2126 }
2127
2128 /* See target/target.h. */
2129
2130 ptid_t
2131 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2132 {
2133 return current_top_target ()->wait (ptid, status, options);
2134 }
2135
2136 /* See target.h. */
2137
2138 ptid_t
2139 default_target_wait (struct target_ops *ops,
2140 ptid_t ptid, struct target_waitstatus *status,
2141 int options)
2142 {
2143 status->kind = TARGET_WAITKIND_IGNORE;
2144 return minus_one_ptid;
2145 }
2146
2147 const char *
2148 target_pid_to_str (ptid_t ptid)
2149 {
2150 return current_top_target ()->pid_to_str (ptid);
2151 }
2152
2153 const char *
2154 target_thread_name (struct thread_info *info)
2155 {
2156 return current_top_target ()->thread_name (info);
2157 }
2158
2159 struct thread_info *
2160 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2161 int handle_len,
2162 struct inferior *inf)
2163 {
2164 return current_top_target ()->thread_handle_to_thread_info (thread_handle,
2165 handle_len, inf);
2166 }
2167
2168 void
2169 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2170 {
2171 target_dcache_invalidate ();
2172
2173 current_top_target ()->resume (ptid, step, signal);
2174
2175 registers_changed_ptid (ptid);
2176 /* We only set the internal executing state here. The user/frontend
2177 running state is set at a higher level. */
2178 set_executing (ptid, 1);
2179 clear_inline_frame_state (ptid);
2180 }
2181
2182 /* If true, target_commit_resume is a nop. */
2183 static int defer_target_commit_resume;
2184
2185 /* See target.h. */
2186
2187 void
2188 target_commit_resume (void)
2189 {
2190 if (defer_target_commit_resume)
2191 return;
2192
2193 current_top_target ()->commit_resume ();
2194 }
2195
2196 /* See target.h. */
2197
2198 scoped_restore_tmpl<int>
2199 make_scoped_defer_target_commit_resume ()
2200 {
2201 return make_scoped_restore (&defer_target_commit_resume, 1);
2202 }
2203
2204 void
2205 target_pass_signals (int numsigs, unsigned char *pass_signals)
2206 {
2207 current_top_target ()->pass_signals (numsigs, pass_signals);
2208 }
2209
2210 void
2211 target_program_signals (int numsigs, unsigned char *program_signals)
2212 {
2213 current_top_target ()->program_signals (numsigs, program_signals);
2214 }
2215
2216 static int
2217 default_follow_fork (struct target_ops *self, int follow_child,
2218 int detach_fork)
2219 {
2220 /* Some target returned a fork event, but did not know how to follow it. */
2221 internal_error (__FILE__, __LINE__,
2222 _("could not find a target to follow fork"));
2223 }
2224
2225 /* Look through the list of possible targets for a target that can
2226 follow forks. */
2227
2228 int
2229 target_follow_fork (int follow_child, int detach_fork)
2230 {
2231 return current_top_target ()->follow_fork (follow_child, detach_fork);
2232 }
2233
2234 /* Target wrapper for follow exec hook. */
2235
2236 void
2237 target_follow_exec (struct inferior *inf, char *execd_pathname)
2238 {
2239 current_top_target ()->follow_exec (inf, execd_pathname);
2240 }
2241
2242 static void
2243 default_mourn_inferior (struct target_ops *self)
2244 {
2245 internal_error (__FILE__, __LINE__,
2246 _("could not find a target to follow mourn inferior"));
2247 }
2248
2249 void
2250 target_mourn_inferior (ptid_t ptid)
2251 {
2252 gdb_assert (ptid_equal (ptid, inferior_ptid));
2253 current_top_target ()->mourn_inferior ();
2254
2255 /* We no longer need to keep handles on any of the object files.
2256 Make sure to release them to avoid unnecessarily locking any
2257 of them while we're not actually debugging. */
2258 bfd_cache_close_all ();
2259 }
2260
2261 /* Look for a target which can describe architectural features, starting
2262 from TARGET. If we find one, return its description. */
2263
2264 const struct target_desc *
2265 target_read_description (struct target_ops *target)
2266 {
2267 return target->read_description ();
2268 }
2269
2270 /* This implements a basic search of memory, reading target memory and
2271 performing the search here (as opposed to performing the search in on the
2272 target side with, for example, gdbserver). */
2273
2274 int
2275 simple_search_memory (struct target_ops *ops,
2276 CORE_ADDR start_addr, ULONGEST search_space_len,
2277 const gdb_byte *pattern, ULONGEST pattern_len,
2278 CORE_ADDR *found_addrp)
2279 {
2280 /* NOTE: also defined in find.c testcase. */
2281 #define SEARCH_CHUNK_SIZE 16000
2282 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2283 /* Buffer to hold memory contents for searching. */
2284 unsigned search_buf_size;
2285
2286 search_buf_size = chunk_size + pattern_len - 1;
2287
2288 /* No point in trying to allocate a buffer larger than the search space. */
2289 if (search_space_len < search_buf_size)
2290 search_buf_size = search_space_len;
2291
2292 gdb::byte_vector search_buf (search_buf_size);
2293
2294 /* Prime the search buffer. */
2295
2296 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2297 search_buf.data (), start_addr, search_buf_size)
2298 != search_buf_size)
2299 {
2300 warning (_("Unable to access %s bytes of target "
2301 "memory at %s, halting search."),
2302 pulongest (search_buf_size), hex_string (start_addr));
2303 return -1;
2304 }
2305
2306 /* Perform the search.
2307
2308 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2309 When we've scanned N bytes we copy the trailing bytes to the start and
2310 read in another N bytes. */
2311
2312 while (search_space_len >= pattern_len)
2313 {
2314 gdb_byte *found_ptr;
2315 unsigned nr_search_bytes
2316 = std::min (search_space_len, (ULONGEST) search_buf_size);
2317
2318 found_ptr = (gdb_byte *) memmem (search_buf.data (), nr_search_bytes,
2319 pattern, pattern_len);
2320
2321 if (found_ptr != NULL)
2322 {
2323 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf.data ());
2324
2325 *found_addrp = found_addr;
2326 return 1;
2327 }
2328
2329 /* Not found in this chunk, skip to next chunk. */
2330
2331 /* Don't let search_space_len wrap here, it's unsigned. */
2332 if (search_space_len >= chunk_size)
2333 search_space_len -= chunk_size;
2334 else
2335 search_space_len = 0;
2336
2337 if (search_space_len >= pattern_len)
2338 {
2339 unsigned keep_len = search_buf_size - chunk_size;
2340 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2341 int nr_to_read;
2342
2343 /* Copy the trailing part of the previous iteration to the front
2344 of the buffer for the next iteration. */
2345 gdb_assert (keep_len == pattern_len - 1);
2346 memcpy (&search_buf[0], &search_buf[chunk_size], keep_len);
2347
2348 nr_to_read = std::min (search_space_len - keep_len,
2349 (ULONGEST) chunk_size);
2350
2351 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2352 &search_buf[keep_len], read_addr,
2353 nr_to_read) != nr_to_read)
2354 {
2355 warning (_("Unable to access %s bytes of target "
2356 "memory at %s, halting search."),
2357 plongest (nr_to_read),
2358 hex_string (read_addr));
2359 return -1;
2360 }
2361
2362 start_addr += chunk_size;
2363 }
2364 }
2365
2366 /* Not found. */
2367
2368 return 0;
2369 }
2370
2371 /* Default implementation of memory-searching. */
2372
2373 static int
2374 default_search_memory (struct target_ops *self,
2375 CORE_ADDR start_addr, ULONGEST search_space_len,
2376 const gdb_byte *pattern, ULONGEST pattern_len,
2377 CORE_ADDR *found_addrp)
2378 {
2379 /* Start over from the top of the target stack. */
2380 return simple_search_memory (current_top_target (),
2381 start_addr, search_space_len,
2382 pattern, pattern_len, found_addrp);
2383 }
2384
2385 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2386 sequence of bytes in PATTERN with length PATTERN_LEN.
2387
2388 The result is 1 if found, 0 if not found, and -1 if there was an error
2389 requiring halting of the search (e.g. memory read error).
2390 If the pattern is found the address is recorded in FOUND_ADDRP. */
2391
2392 int
2393 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2394 const gdb_byte *pattern, ULONGEST pattern_len,
2395 CORE_ADDR *found_addrp)
2396 {
2397 return current_top_target ()->search_memory (start_addr, search_space_len,
2398 pattern, pattern_len, found_addrp);
2399 }
2400
2401 /* Look through the currently pushed targets. If none of them will
2402 be able to restart the currently running process, issue an error
2403 message. */
2404
2405 void
2406 target_require_runnable (void)
2407 {
2408 struct target_ops *t;
2409
2410 for (t = current_top_target (); t != NULL; t = t->beneath)
2411 {
2412 /* If this target knows how to create a new program, then
2413 assume we will still be able to after killing the current
2414 one. Either killing and mourning will not pop T, or else
2415 find_default_run_target will find it again. */
2416 if (t->can_create_inferior ())
2417 return;
2418
2419 /* Do not worry about targets at certain strata that can not
2420 create inferiors. Assume they will be pushed again if
2421 necessary, and continue to the process_stratum. */
2422 if (t->to_stratum > process_stratum)
2423 continue;
2424
2425 error (_("The \"%s\" target does not support \"run\". "
2426 "Try \"help target\" or \"continue\"."),
2427 t->shortname ());
2428 }
2429
2430 /* This function is only called if the target is running. In that
2431 case there should have been a process_stratum target and it
2432 should either know how to create inferiors, or not... */
2433 internal_error (__FILE__, __LINE__, _("No targets found"));
2434 }
2435
2436 /* Whether GDB is allowed to fall back to the default run target for
2437 "run", "attach", etc. when no target is connected yet. */
2438 static int auto_connect_native_target = 1;
2439
2440 static void
2441 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2442 struct cmd_list_element *c, const char *value)
2443 {
2444 fprintf_filtered (file,
2445 _("Whether GDB may automatically connect to the "
2446 "native target is %s.\n"),
2447 value);
2448 }
2449
2450 /* A pointer to the target that can respond to "run" or "attach".
2451 Native targets are always singletons and instantiated early at GDB
2452 startup. */
2453 static target_ops *the_native_target;
2454
2455 /* See target.h. */
2456
2457 void
2458 set_native_target (target_ops *target)
2459 {
2460 if (the_native_target != NULL)
2461 internal_error (__FILE__, __LINE__,
2462 _("native target already set (\"%s\")."),
2463 the_native_target->longname ());
2464
2465 the_native_target = target;
2466 }
2467
2468 /* See target.h. */
2469
2470 target_ops *
2471 get_native_target ()
2472 {
2473 return the_native_target;
2474 }
2475
2476 /* Look through the list of possible targets for a target that can
2477 execute a run or attach command without any other data. This is
2478 used to locate the default process stratum.
2479
2480 If DO_MESG is not NULL, the result is always valid (error() is
2481 called for errors); else, return NULL on error. */
2482
2483 static struct target_ops *
2484 find_default_run_target (const char *do_mesg)
2485 {
2486 if (auto_connect_native_target && the_native_target != NULL)
2487 return the_native_target;
2488
2489 if (do_mesg != NULL)
2490 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2491 return NULL;
2492 }
2493
2494 /* See target.h. */
2495
2496 struct target_ops *
2497 find_attach_target (void)
2498 {
2499 /* If a target on the current stack can attach, use it. */
2500 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath)
2501 {
2502 if (t->can_attach ())
2503 return t;
2504 }
2505
2506 /* Otherwise, use the default run target for attaching. */
2507 return find_default_run_target ("attach");
2508 }
2509
2510 /* See target.h. */
2511
2512 struct target_ops *
2513 find_run_target (void)
2514 {
2515 /* If a target on the current stack can run, use it. */
2516 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath)
2517 {
2518 if (t->can_create_inferior ())
2519 return t;
2520 }
2521
2522 /* Otherwise, use the default run target. */
2523 return find_default_run_target ("run");
2524 }
2525
2526 bool
2527 target_ops::info_proc (const char *args, enum info_proc_what what)
2528 {
2529 return false;
2530 }
2531
2532 /* Implement the "info proc" command. */
2533
2534 int
2535 target_info_proc (const char *args, enum info_proc_what what)
2536 {
2537 struct target_ops *t;
2538
2539 /* If we're already connected to something that can get us OS
2540 related data, use it. Otherwise, try using the native
2541 target. */
2542 t = find_target_at (process_stratum);
2543 if (t == NULL)
2544 t = find_default_run_target (NULL);
2545
2546 for (; t != NULL; t = t->beneath)
2547 {
2548 if (t->info_proc (args, what))
2549 {
2550 if (targetdebug)
2551 fprintf_unfiltered (gdb_stdlog,
2552 "target_info_proc (\"%s\", %d)\n", args, what);
2553
2554 return 1;
2555 }
2556 }
2557
2558 return 0;
2559 }
2560
2561 static int
2562 find_default_supports_disable_randomization (struct target_ops *self)
2563 {
2564 struct target_ops *t;
2565
2566 t = find_default_run_target (NULL);
2567 if (t != NULL)
2568 return t->supports_disable_randomization ();
2569 return 0;
2570 }
2571
2572 int
2573 target_supports_disable_randomization (void)
2574 {
2575 return current_top_target ()->supports_disable_randomization ();
2576 }
2577
2578 /* See target/target.h. */
2579
2580 int
2581 target_supports_multi_process (void)
2582 {
2583 return current_top_target ()->supports_multi_process ();
2584 }
2585
2586 /* See target.h. */
2587
2588 gdb::optional<gdb::char_vector>
2589 target_get_osdata (const char *type)
2590 {
2591 struct target_ops *t;
2592
2593 /* If we're already connected to something that can get us OS
2594 related data, use it. Otherwise, try using the native
2595 target. */
2596 t = find_target_at (process_stratum);
2597 if (t == NULL)
2598 t = find_default_run_target ("get OS data");
2599
2600 if (!t)
2601 return {};
2602
2603 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2604 }
2605
2606 static struct address_space *
2607 default_thread_address_space (struct target_ops *self, ptid_t ptid)
2608 {
2609 struct inferior *inf;
2610
2611 /* Fall-back to the "main" address space of the inferior. */
2612 inf = find_inferior_ptid (ptid);
2613
2614 if (inf == NULL || inf->aspace == NULL)
2615 internal_error (__FILE__, __LINE__,
2616 _("Can't determine the current "
2617 "address space of thread %s\n"),
2618 target_pid_to_str (ptid));
2619
2620 return inf->aspace;
2621 }
2622
2623 /* Determine the current address space of thread PTID. */
2624
2625 struct address_space *
2626 target_thread_address_space (ptid_t ptid)
2627 {
2628 struct address_space *aspace;
2629
2630 aspace = current_top_target ()->thread_address_space (ptid);
2631 gdb_assert (aspace != NULL);
2632
2633 return aspace;
2634 }
2635
2636 void
2637 target_ops::close ()
2638 {
2639 }
2640
2641 bool
2642 target_ops::can_attach ()
2643 {
2644 return 0;
2645 }
2646
2647 void
2648 target_ops::attach (const char *, int)
2649 {
2650 gdb_assert_not_reached ("target_ops::attach called");
2651 }
2652
2653 bool
2654 target_ops::can_create_inferior ()
2655 {
2656 return 0;
2657 }
2658
2659 void
2660 target_ops::create_inferior (const char *, const std::string &,
2661 char **, int)
2662 {
2663 gdb_assert_not_reached ("target_ops::create_inferior called");
2664 }
2665
2666 bool
2667 target_ops::can_run ()
2668 {
2669 return false;
2670 }
2671
2672 int
2673 target_can_run ()
2674 {
2675 struct target_ops *t;
2676
2677 for (t = current_top_target (); t != NULL; t = t->beneath)
2678 {
2679 if (t->can_run ())
2680 return 1;
2681 }
2682
2683 return 0;
2684 }
2685
2686 /* Target file operations. */
2687
2688 static struct target_ops *
2689 default_fileio_target (void)
2690 {
2691 struct target_ops *t;
2692
2693 /* If we're already connected to something that can perform
2694 file I/O, use it. Otherwise, try using the native target. */
2695 t = find_target_at (process_stratum);
2696 if (t != NULL)
2697 return t;
2698 return find_default_run_target ("file I/O");
2699 }
2700
2701 /* File handle for target file operations. */
2702
2703 struct fileio_fh_t
2704 {
2705 /* The target on which this file is open. NULL if the target is
2706 meanwhile closed while the handle is open. */
2707 target_ops *target;
2708
2709 /* The file descriptor on the target. */
2710 int target_fd;
2711
2712 /* Check whether this fileio_fh_t represents a closed file. */
2713 bool is_closed ()
2714 {
2715 return target_fd < 0;
2716 }
2717 };
2718
2719 /* Vector of currently open file handles. The value returned by
2720 target_fileio_open and passed as the FD argument to other
2721 target_fileio_* functions is an index into this vector. This
2722 vector's entries are never freed; instead, files are marked as
2723 closed, and the handle becomes available for reuse. */
2724 static std::vector<fileio_fh_t> fileio_fhandles;
2725
2726 /* Index into fileio_fhandles of the lowest handle that might be
2727 closed. This permits handle reuse without searching the whole
2728 list each time a new file is opened. */
2729 static int lowest_closed_fd;
2730
2731 /* Invalidate the target associated with open handles that were open
2732 on target TARG, since we're about to close (and maybe destroy) the
2733 target. The handles remain open from the client's perspective, but
2734 trying to do anything with them other than closing them will fail
2735 with EIO. */
2736
2737 static void
2738 fileio_handles_invalidate_target (target_ops *targ)
2739 {
2740 for (fileio_fh_t &fh : fileio_fhandles)
2741 if (fh.target == targ)
2742 fh.target = NULL;
2743 }
2744
2745 /* Acquire a target fileio file descriptor. */
2746
2747 static int
2748 acquire_fileio_fd (target_ops *target, int target_fd)
2749 {
2750 /* Search for closed handles to reuse. */
2751 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
2752 {
2753 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
2754
2755 if (fh.is_closed ())
2756 break;
2757 }
2758
2759 /* Push a new handle if no closed handles were found. */
2760 if (lowest_closed_fd == fileio_fhandles.size ())
2761 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
2762 else
2763 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
2764
2765 /* Should no longer be marked closed. */
2766 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
2767
2768 /* Return its index, and start the next lookup at
2769 the next index. */
2770 return lowest_closed_fd++;
2771 }
2772
2773 /* Release a target fileio file descriptor. */
2774
2775 static void
2776 release_fileio_fd (int fd, fileio_fh_t *fh)
2777 {
2778 fh->target_fd = -1;
2779 lowest_closed_fd = std::min (lowest_closed_fd, fd);
2780 }
2781
2782 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
2783
2784 static fileio_fh_t *
2785 fileio_fd_to_fh (int fd)
2786 {
2787 return &fileio_fhandles[fd];
2788 }
2789
2790
2791 /* Default implementations of file i/o methods. We don't want these
2792 to delegate automatically, because we need to know which target
2793 supported the method, in order to call it directly from within
2794 pread/pwrite, etc. */
2795
2796 int
2797 target_ops::fileio_open (struct inferior *inf, const char *filename,
2798 int flags, int mode, int warn_if_slow,
2799 int *target_errno)
2800 {
2801 *target_errno = FILEIO_ENOSYS;
2802 return -1;
2803 }
2804
2805 int
2806 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2807 ULONGEST offset, int *target_errno)
2808 {
2809 *target_errno = FILEIO_ENOSYS;
2810 return -1;
2811 }
2812
2813 int
2814 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
2815 ULONGEST offset, int *target_errno)
2816 {
2817 *target_errno = FILEIO_ENOSYS;
2818 return -1;
2819 }
2820
2821 int
2822 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
2823 {
2824 *target_errno = FILEIO_ENOSYS;
2825 return -1;
2826 }
2827
2828 int
2829 target_ops::fileio_close (int fd, int *target_errno)
2830 {
2831 *target_errno = FILEIO_ENOSYS;
2832 return -1;
2833 }
2834
2835 int
2836 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
2837 int *target_errno)
2838 {
2839 *target_errno = FILEIO_ENOSYS;
2840 return -1;
2841 }
2842
2843 gdb::optional<std::string>
2844 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
2845 int *target_errno)
2846 {
2847 *target_errno = FILEIO_ENOSYS;
2848 return {};
2849 }
2850
2851 /* Helper for target_fileio_open and
2852 target_fileio_open_warn_if_slow. */
2853
2854 static int
2855 target_fileio_open_1 (struct inferior *inf, const char *filename,
2856 int flags, int mode, int warn_if_slow,
2857 int *target_errno)
2858 {
2859 struct target_ops *t;
2860
2861 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2862 {
2863 int fd = t->fileio_open (inf, filename, flags, mode,
2864 warn_if_slow, target_errno);
2865
2866 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
2867 continue;
2868
2869 if (fd < 0)
2870 fd = -1;
2871 else
2872 fd = acquire_fileio_fd (t, fd);
2873
2874 if (targetdebug)
2875 fprintf_unfiltered (gdb_stdlog,
2876 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
2877 " = %d (%d)\n",
2878 inf == NULL ? 0 : inf->num,
2879 filename, flags, mode,
2880 warn_if_slow, fd,
2881 fd != -1 ? 0 : *target_errno);
2882 return fd;
2883 }
2884
2885 *target_errno = FILEIO_ENOSYS;
2886 return -1;
2887 }
2888
2889 /* See target.h. */
2890
2891 int
2892 target_fileio_open (struct inferior *inf, const char *filename,
2893 int flags, int mode, int *target_errno)
2894 {
2895 return target_fileio_open_1 (inf, filename, flags, mode, 0,
2896 target_errno);
2897 }
2898
2899 /* See target.h. */
2900
2901 int
2902 target_fileio_open_warn_if_slow (struct inferior *inf,
2903 const char *filename,
2904 int flags, int mode, int *target_errno)
2905 {
2906 return target_fileio_open_1 (inf, filename, flags, mode, 1,
2907 target_errno);
2908 }
2909
2910 /* See target.h. */
2911
2912 int
2913 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2914 ULONGEST offset, int *target_errno)
2915 {
2916 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2917 int ret = -1;
2918
2919 if (fh->is_closed ())
2920 *target_errno = EBADF;
2921 else if (fh->target == NULL)
2922 *target_errno = EIO;
2923 else
2924 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
2925 len, offset, target_errno);
2926
2927 if (targetdebug)
2928 fprintf_unfiltered (gdb_stdlog,
2929 "target_fileio_pwrite (%d,...,%d,%s) "
2930 "= %d (%d)\n",
2931 fd, len, pulongest (offset),
2932 ret, ret != -1 ? 0 : *target_errno);
2933 return ret;
2934 }
2935
2936 /* See target.h. */
2937
2938 int
2939 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2940 ULONGEST offset, int *target_errno)
2941 {
2942 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2943 int ret = -1;
2944
2945 if (fh->is_closed ())
2946 *target_errno = EBADF;
2947 else if (fh->target == NULL)
2948 *target_errno = EIO;
2949 else
2950 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
2951 len, offset, target_errno);
2952
2953 if (targetdebug)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "target_fileio_pread (%d,...,%d,%s) "
2956 "= %d (%d)\n",
2957 fd, len, pulongest (offset),
2958 ret, ret != -1 ? 0 : *target_errno);
2959 return ret;
2960 }
2961
2962 /* See target.h. */
2963
2964 int
2965 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
2966 {
2967 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2968 int ret = -1;
2969
2970 if (fh->is_closed ())
2971 *target_errno = EBADF;
2972 else if (fh->target == NULL)
2973 *target_errno = EIO;
2974 else
2975 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
2976
2977 if (targetdebug)
2978 fprintf_unfiltered (gdb_stdlog,
2979 "target_fileio_fstat (%d) = %d (%d)\n",
2980 fd, ret, ret != -1 ? 0 : *target_errno);
2981 return ret;
2982 }
2983
2984 /* See target.h. */
2985
2986 int
2987 target_fileio_close (int fd, int *target_errno)
2988 {
2989 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2990 int ret = -1;
2991
2992 if (fh->is_closed ())
2993 *target_errno = EBADF;
2994 else
2995 {
2996 if (fh->target != NULL)
2997 ret = fh->target->fileio_close (fh->target_fd,
2998 target_errno);
2999 else
3000 ret = 0;
3001 release_fileio_fd (fd, fh);
3002 }
3003
3004 if (targetdebug)
3005 fprintf_unfiltered (gdb_stdlog,
3006 "target_fileio_close (%d) = %d (%d)\n",
3007 fd, ret, ret != -1 ? 0 : *target_errno);
3008 return ret;
3009 }
3010
3011 /* See target.h. */
3012
3013 int
3014 target_fileio_unlink (struct inferior *inf, const char *filename,
3015 int *target_errno)
3016 {
3017 struct target_ops *t;
3018
3019 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3020 {
3021 int ret = t->fileio_unlink (inf, filename, target_errno);
3022
3023 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3024 continue;
3025
3026 if (targetdebug)
3027 fprintf_unfiltered (gdb_stdlog,
3028 "target_fileio_unlink (%d,%s)"
3029 " = %d (%d)\n",
3030 inf == NULL ? 0 : inf->num, filename,
3031 ret, ret != -1 ? 0 : *target_errno);
3032 return ret;
3033 }
3034
3035 *target_errno = FILEIO_ENOSYS;
3036 return -1;
3037 }
3038
3039 /* See target.h. */
3040
3041 gdb::optional<std::string>
3042 target_fileio_readlink (struct inferior *inf, const char *filename,
3043 int *target_errno)
3044 {
3045 struct target_ops *t;
3046
3047 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3048 {
3049 gdb::optional<std::string> ret
3050 = t->fileio_readlink (inf, filename, target_errno);
3051
3052 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3053 continue;
3054
3055 if (targetdebug)
3056 fprintf_unfiltered (gdb_stdlog,
3057 "target_fileio_readlink (%d,%s)"
3058 " = %s (%d)\n",
3059 inf == NULL ? 0 : inf->num,
3060 filename, ret ? ret->c_str () : "(nil)",
3061 ret ? 0 : *target_errno);
3062 return ret;
3063 }
3064
3065 *target_errno = FILEIO_ENOSYS;
3066 return {};
3067 }
3068
3069 /* Like scoped_fd, but specific to target fileio. */
3070
3071 class scoped_target_fd
3072 {
3073 public:
3074 explicit scoped_target_fd (int fd) noexcept
3075 : m_fd (fd)
3076 {
3077 }
3078
3079 ~scoped_target_fd ()
3080 {
3081 if (m_fd >= 0)
3082 {
3083 int target_errno;
3084
3085 target_fileio_close (m_fd, &target_errno);
3086 }
3087 }
3088
3089 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3090
3091 int get () const noexcept
3092 {
3093 return m_fd;
3094 }
3095
3096 private:
3097 int m_fd;
3098 };
3099
3100 /* Read target file FILENAME, in the filesystem as seen by INF. If
3101 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3102 remote targets, the remote stub). Store the result in *BUF_P and
3103 return the size of the transferred data. PADDING additional bytes
3104 are available in *BUF_P. This is a helper function for
3105 target_fileio_read_alloc; see the declaration of that function for
3106 more information. */
3107
3108 static LONGEST
3109 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3110 gdb_byte **buf_p, int padding)
3111 {
3112 size_t buf_alloc, buf_pos;
3113 gdb_byte *buf;
3114 LONGEST n;
3115 int target_errno;
3116
3117 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3118 0700, &target_errno));
3119 if (fd.get () == -1)
3120 return -1;
3121
3122 /* Start by reading up to 4K at a time. The target will throttle
3123 this number down if necessary. */
3124 buf_alloc = 4096;
3125 buf = (gdb_byte *) xmalloc (buf_alloc);
3126 buf_pos = 0;
3127 while (1)
3128 {
3129 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3130 buf_alloc - buf_pos - padding, buf_pos,
3131 &target_errno);
3132 if (n < 0)
3133 {
3134 /* An error occurred. */
3135 xfree (buf);
3136 return -1;
3137 }
3138 else if (n == 0)
3139 {
3140 /* Read all there was. */
3141 if (buf_pos == 0)
3142 xfree (buf);
3143 else
3144 *buf_p = buf;
3145 return buf_pos;
3146 }
3147
3148 buf_pos += n;
3149
3150 /* If the buffer is filling up, expand it. */
3151 if (buf_alloc < buf_pos * 2)
3152 {
3153 buf_alloc *= 2;
3154 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3155 }
3156
3157 QUIT;
3158 }
3159 }
3160
3161 /* See target.h. */
3162
3163 LONGEST
3164 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3165 gdb_byte **buf_p)
3166 {
3167 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3168 }
3169
3170 /* See target.h. */
3171
3172 gdb::unique_xmalloc_ptr<char>
3173 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3174 {
3175 gdb_byte *buffer;
3176 char *bufstr;
3177 LONGEST i, transferred;
3178
3179 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3180 bufstr = (char *) buffer;
3181
3182 if (transferred < 0)
3183 return gdb::unique_xmalloc_ptr<char> (nullptr);
3184
3185 if (transferred == 0)
3186 return gdb::unique_xmalloc_ptr<char> (xstrdup (""));
3187
3188 bufstr[transferred] = 0;
3189
3190 /* Check for embedded NUL bytes; but allow trailing NULs. */
3191 for (i = strlen (bufstr); i < transferred; i++)
3192 if (bufstr[i] != 0)
3193 {
3194 warning (_("target file %s "
3195 "contained unexpected null characters"),
3196 filename);
3197 break;
3198 }
3199
3200 return gdb::unique_xmalloc_ptr<char> (bufstr);
3201 }
3202
3203
3204 static int
3205 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3206 CORE_ADDR addr, int len)
3207 {
3208 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3209 }
3210
3211 static int
3212 default_watchpoint_addr_within_range (struct target_ops *target,
3213 CORE_ADDR addr,
3214 CORE_ADDR start, int length)
3215 {
3216 return addr >= start && addr < start + length;
3217 }
3218
3219 static struct gdbarch *
3220 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3221 {
3222 inferior *inf = find_inferior_ptid (ptid);
3223 gdb_assert (inf != NULL);
3224 return inf->gdbarch;
3225 }
3226
3227 /*
3228 * Find the next target down the stack from the specified target.
3229 */
3230
3231 struct target_ops *
3232 find_target_beneath (struct target_ops *t)
3233 {
3234 return t->beneath;
3235 }
3236
3237 /* See target.h. */
3238
3239 struct target_ops *
3240 find_target_at (enum strata stratum)
3241 {
3242 struct target_ops *t;
3243
3244 for (t = current_top_target (); t != NULL; t = t->beneath)
3245 if (t->to_stratum == stratum)
3246 return t;
3247
3248 return NULL;
3249 }
3250
3251 \f
3252
3253 /* See target.h */
3254
3255 void
3256 target_announce_detach (int from_tty)
3257 {
3258 pid_t pid;
3259 const char *exec_file;
3260
3261 if (!from_tty)
3262 return;
3263
3264 exec_file = get_exec_file (0);
3265 if (exec_file == NULL)
3266 exec_file = "";
3267
3268 pid = ptid_get_pid (inferior_ptid);
3269 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3270 target_pid_to_str (pid_to_ptid (pid)));
3271 gdb_flush (gdb_stdout);
3272 }
3273
3274 /* The inferior process has died. Long live the inferior! */
3275
3276 void
3277 generic_mourn_inferior (void)
3278 {
3279 ptid_t ptid;
3280
3281 ptid = inferior_ptid;
3282 inferior_ptid = null_ptid;
3283
3284 /* Mark breakpoints uninserted in case something tries to delete a
3285 breakpoint while we delete the inferior's threads (which would
3286 fail, since the inferior is long gone). */
3287 mark_breakpoints_out ();
3288
3289 if (!ptid_equal (ptid, null_ptid))
3290 {
3291 int pid = ptid_get_pid (ptid);
3292 exit_inferior (pid);
3293 }
3294
3295 /* Note this wipes step-resume breakpoints, so needs to be done
3296 after exit_inferior, which ends up referencing the step-resume
3297 breakpoints through clear_thread_inferior_resources. */
3298 breakpoint_init_inferior (inf_exited);
3299
3300 registers_changed ();
3301
3302 reopen_exec_file ();
3303 reinit_frame_cache ();
3304
3305 if (deprecated_detach_hook)
3306 deprecated_detach_hook ();
3307 }
3308 \f
3309 /* Convert a normal process ID to a string. Returns the string in a
3310 static buffer. */
3311
3312 const char *
3313 normal_pid_to_str (ptid_t ptid)
3314 {
3315 static char buf[32];
3316
3317 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3318 return buf;
3319 }
3320
3321 static const char *
3322 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3323 {
3324 return normal_pid_to_str (ptid);
3325 }
3326
3327 /* Error-catcher for target_find_memory_regions. */
3328 static int
3329 dummy_find_memory_regions (struct target_ops *self,
3330 find_memory_region_ftype ignore1, void *ignore2)
3331 {
3332 error (_("Command not implemented for this target."));
3333 return 0;
3334 }
3335
3336 /* Error-catcher for target_make_corefile_notes. */
3337 static char *
3338 dummy_make_corefile_notes (struct target_ops *self,
3339 bfd *ignore1, int *ignore2)
3340 {
3341 error (_("Command not implemented for this target."));
3342 return NULL;
3343 }
3344
3345 #include "target-delegates.c"
3346
3347
3348 static const target_info dummy_target_info = {
3349 "None",
3350 N_("None"),
3351 ""
3352 };
3353
3354 dummy_target::dummy_target ()
3355 {
3356 to_stratum = dummy_stratum;
3357 }
3358
3359 debug_target::debug_target ()
3360 {
3361 to_stratum = debug_stratum;
3362 }
3363
3364 const target_info &
3365 dummy_target::info () const
3366 {
3367 return dummy_target_info;
3368 }
3369
3370 const target_info &
3371 debug_target::info () const
3372 {
3373 return beneath->info ();
3374 }
3375
3376 \f
3377
3378 void
3379 target_close (struct target_ops *targ)
3380 {
3381 gdb_assert (!target_is_pushed (targ));
3382
3383 fileio_handles_invalidate_target (targ);
3384
3385 targ->close ();
3386
3387 if (targetdebug)
3388 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3389 }
3390
3391 int
3392 target_thread_alive (ptid_t ptid)
3393 {
3394 return current_top_target ()->thread_alive (ptid);
3395 }
3396
3397 void
3398 target_update_thread_list (void)
3399 {
3400 current_top_target ()->update_thread_list ();
3401 }
3402
3403 void
3404 target_stop (ptid_t ptid)
3405 {
3406 if (!may_stop)
3407 {
3408 warning (_("May not interrupt or stop the target, ignoring attempt"));
3409 return;
3410 }
3411
3412 current_top_target ()->stop (ptid);
3413 }
3414
3415 void
3416 target_interrupt ()
3417 {
3418 if (!may_stop)
3419 {
3420 warning (_("May not interrupt or stop the target, ignoring attempt"));
3421 return;
3422 }
3423
3424 current_top_target ()->interrupt ();
3425 }
3426
3427 /* See target.h. */
3428
3429 void
3430 target_pass_ctrlc (void)
3431 {
3432 current_top_target ()->pass_ctrlc ();
3433 }
3434
3435 /* See target.h. */
3436
3437 void
3438 default_target_pass_ctrlc (struct target_ops *ops)
3439 {
3440 target_interrupt ();
3441 }
3442
3443 /* See target/target.h. */
3444
3445 void
3446 target_stop_and_wait (ptid_t ptid)
3447 {
3448 struct target_waitstatus status;
3449 int was_non_stop = non_stop;
3450
3451 non_stop = 1;
3452 target_stop (ptid);
3453
3454 memset (&status, 0, sizeof (status));
3455 target_wait (ptid, &status, 0);
3456
3457 non_stop = was_non_stop;
3458 }
3459
3460 /* See target/target.h. */
3461
3462 void
3463 target_continue_no_signal (ptid_t ptid)
3464 {
3465 target_resume (ptid, 0, GDB_SIGNAL_0);
3466 }
3467
3468 /* See target/target.h. */
3469
3470 void
3471 target_continue (ptid_t ptid, enum gdb_signal signal)
3472 {
3473 target_resume (ptid, 0, signal);
3474 }
3475
3476 /* Concatenate ELEM to LIST, a comma separate list, and return the
3477 result. The LIST incoming argument is released. */
3478
3479 static char *
3480 str_comma_list_concat_elem (char *list, const char *elem)
3481 {
3482 if (list == NULL)
3483 return xstrdup (elem);
3484 else
3485 return reconcat (list, list, ", ", elem, (char *) NULL);
3486 }
3487
3488 /* Helper for target_options_to_string. If OPT is present in
3489 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3490 Returns the new resulting string. OPT is removed from
3491 TARGET_OPTIONS. */
3492
3493 static char *
3494 do_option (int *target_options, char *ret,
3495 int opt, const char *opt_str)
3496 {
3497 if ((*target_options & opt) != 0)
3498 {
3499 ret = str_comma_list_concat_elem (ret, opt_str);
3500 *target_options &= ~opt;
3501 }
3502
3503 return ret;
3504 }
3505
3506 char *
3507 target_options_to_string (int target_options)
3508 {
3509 char *ret = NULL;
3510
3511 #define DO_TARG_OPTION(OPT) \
3512 ret = do_option (&target_options, ret, OPT, #OPT)
3513
3514 DO_TARG_OPTION (TARGET_WNOHANG);
3515
3516 if (target_options != 0)
3517 ret = str_comma_list_concat_elem (ret, "unknown???");
3518
3519 if (ret == NULL)
3520 ret = xstrdup ("");
3521 return ret;
3522 }
3523
3524 void
3525 target_fetch_registers (struct regcache *regcache, int regno)
3526 {
3527 current_top_target ()->fetch_registers (regcache, regno);
3528 if (targetdebug)
3529 regcache->debug_print_register ("target_fetch_registers", regno);
3530 }
3531
3532 void
3533 target_store_registers (struct regcache *regcache, int regno)
3534 {
3535 if (!may_write_registers)
3536 error (_("Writing to registers is not allowed (regno %d)"), regno);
3537
3538 current_top_target ()->store_registers (regcache, regno);
3539 if (targetdebug)
3540 {
3541 regcache->debug_print_register ("target_store_registers", regno);
3542 }
3543 }
3544
3545 int
3546 target_core_of_thread (ptid_t ptid)
3547 {
3548 return current_top_target ()->core_of_thread (ptid);
3549 }
3550
3551 int
3552 simple_verify_memory (struct target_ops *ops,
3553 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3554 {
3555 LONGEST total_xfered = 0;
3556
3557 while (total_xfered < size)
3558 {
3559 ULONGEST xfered_len;
3560 enum target_xfer_status status;
3561 gdb_byte buf[1024];
3562 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3563
3564 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3565 buf, NULL, lma + total_xfered, howmuch,
3566 &xfered_len);
3567 if (status == TARGET_XFER_OK
3568 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3569 {
3570 total_xfered += xfered_len;
3571 QUIT;
3572 }
3573 else
3574 return 0;
3575 }
3576 return 1;
3577 }
3578
3579 /* Default implementation of memory verification. */
3580
3581 static int
3582 default_verify_memory (struct target_ops *self,
3583 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3584 {
3585 /* Start over from the top of the target stack. */
3586 return simple_verify_memory (current_top_target (),
3587 data, memaddr, size);
3588 }
3589
3590 int
3591 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3592 {
3593 return current_top_target ()->verify_memory (data, memaddr, size);
3594 }
3595
3596 /* The documentation for this function is in its prototype declaration in
3597 target.h. */
3598
3599 int
3600 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3601 enum target_hw_bp_type rw)
3602 {
3603 return current_top_target ()->insert_mask_watchpoint (addr, mask, rw);
3604 }
3605
3606 /* The documentation for this function is in its prototype declaration in
3607 target.h. */
3608
3609 int
3610 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3611 enum target_hw_bp_type rw)
3612 {
3613 return current_top_target ()->remove_mask_watchpoint (addr, mask, rw);
3614 }
3615
3616 /* The documentation for this function is in its prototype declaration
3617 in target.h. */
3618
3619 int
3620 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3621 {
3622 return current_top_target ()->masked_watch_num_registers (addr, mask);
3623 }
3624
3625 /* The documentation for this function is in its prototype declaration
3626 in target.h. */
3627
3628 int
3629 target_ranged_break_num_registers (void)
3630 {
3631 return current_top_target ()->ranged_break_num_registers ();
3632 }
3633
3634 /* See target.h. */
3635
3636 struct btrace_target_info *
3637 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
3638 {
3639 return current_top_target ()->enable_btrace (ptid, conf);
3640 }
3641
3642 /* See target.h. */
3643
3644 void
3645 target_disable_btrace (struct btrace_target_info *btinfo)
3646 {
3647 current_top_target ()->disable_btrace (btinfo);
3648 }
3649
3650 /* See target.h. */
3651
3652 void
3653 target_teardown_btrace (struct btrace_target_info *btinfo)
3654 {
3655 current_top_target ()->teardown_btrace (btinfo);
3656 }
3657
3658 /* See target.h. */
3659
3660 enum btrace_error
3661 target_read_btrace (struct btrace_data *btrace,
3662 struct btrace_target_info *btinfo,
3663 enum btrace_read_type type)
3664 {
3665 return current_top_target ()->read_btrace (btrace, btinfo, type);
3666 }
3667
3668 /* See target.h. */
3669
3670 const struct btrace_config *
3671 target_btrace_conf (const struct btrace_target_info *btinfo)
3672 {
3673 return current_top_target ()->btrace_conf (btinfo);
3674 }
3675
3676 /* See target.h. */
3677
3678 void
3679 target_stop_recording (void)
3680 {
3681 current_top_target ()->stop_recording ();
3682 }
3683
3684 /* See target.h. */
3685
3686 void
3687 target_save_record (const char *filename)
3688 {
3689 current_top_target ()->save_record (filename);
3690 }
3691
3692 /* See target.h. */
3693
3694 int
3695 target_supports_delete_record ()
3696 {
3697 return current_top_target ()->supports_delete_record ();
3698 }
3699
3700 /* See target.h. */
3701
3702 void
3703 target_delete_record (void)
3704 {
3705 current_top_target ()->delete_record ();
3706 }
3707
3708 /* See target.h. */
3709
3710 enum record_method
3711 target_record_method (ptid_t ptid)
3712 {
3713 return current_top_target ()->record_method (ptid);
3714 }
3715
3716 /* See target.h. */
3717
3718 int
3719 target_record_is_replaying (ptid_t ptid)
3720 {
3721 return current_top_target ()->record_is_replaying (ptid);
3722 }
3723
3724 /* See target.h. */
3725
3726 int
3727 target_record_will_replay (ptid_t ptid, int dir)
3728 {
3729 return current_top_target ()->record_will_replay (ptid, dir);
3730 }
3731
3732 /* See target.h. */
3733
3734 void
3735 target_record_stop_replaying (void)
3736 {
3737 current_top_target ()->record_stop_replaying ();
3738 }
3739
3740 /* See target.h. */
3741
3742 void
3743 target_goto_record_begin (void)
3744 {
3745 current_top_target ()->goto_record_begin ();
3746 }
3747
3748 /* See target.h. */
3749
3750 void
3751 target_goto_record_end (void)
3752 {
3753 current_top_target ()->goto_record_end ();
3754 }
3755
3756 /* See target.h. */
3757
3758 void
3759 target_goto_record (ULONGEST insn)
3760 {
3761 current_top_target ()->goto_record (insn);
3762 }
3763
3764 /* See target.h. */
3765
3766 void
3767 target_insn_history (int size, gdb_disassembly_flags flags)
3768 {
3769 current_top_target ()->insn_history (size, flags);
3770 }
3771
3772 /* See target.h. */
3773
3774 void
3775 target_insn_history_from (ULONGEST from, int size,
3776 gdb_disassembly_flags flags)
3777 {
3778 current_top_target ()->insn_history_from (from, size, flags);
3779 }
3780
3781 /* See target.h. */
3782
3783 void
3784 target_insn_history_range (ULONGEST begin, ULONGEST end,
3785 gdb_disassembly_flags flags)
3786 {
3787 current_top_target ()->insn_history_range (begin, end, flags);
3788 }
3789
3790 /* See target.h. */
3791
3792 void
3793 target_call_history (int size, record_print_flags flags)
3794 {
3795 current_top_target ()->call_history (size, flags);
3796 }
3797
3798 /* See target.h. */
3799
3800 void
3801 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
3802 {
3803 current_top_target ()->call_history_from (begin, size, flags);
3804 }
3805
3806 /* See target.h. */
3807
3808 void
3809 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
3810 {
3811 current_top_target ()->call_history_range (begin, end, flags);
3812 }
3813
3814 /* See target.h. */
3815
3816 const struct frame_unwind *
3817 target_get_unwinder (void)
3818 {
3819 return current_top_target ()->get_unwinder ();
3820 }
3821
3822 /* See target.h. */
3823
3824 const struct frame_unwind *
3825 target_get_tailcall_unwinder (void)
3826 {
3827 return current_top_target ()->get_tailcall_unwinder ();
3828 }
3829
3830 /* See target.h. */
3831
3832 void
3833 target_prepare_to_generate_core (void)
3834 {
3835 current_top_target ()->prepare_to_generate_core ();
3836 }
3837
3838 /* See target.h. */
3839
3840 void
3841 target_done_generating_core (void)
3842 {
3843 current_top_target ()->done_generating_core ();
3844 }
3845
3846 \f
3847
3848 static char targ_desc[] =
3849 "Names of targets and files being debugged.\nShows the entire \
3850 stack of targets currently in use (including the exec-file,\n\
3851 core-file, and process, if any), as well as the symbol file name.";
3852
3853 static void
3854 default_rcmd (struct target_ops *self, const char *command,
3855 struct ui_file *output)
3856 {
3857 error (_("\"monitor\" command not supported by this target."));
3858 }
3859
3860 static void
3861 do_monitor_command (const char *cmd, int from_tty)
3862 {
3863 target_rcmd (cmd, gdb_stdtarg);
3864 }
3865
3866 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
3867 ignored. */
3868
3869 void
3870 flash_erase_command (const char *cmd, int from_tty)
3871 {
3872 /* Used to communicate termination of flash operations to the target. */
3873 bool found_flash_region = false;
3874 struct gdbarch *gdbarch = target_gdbarch ();
3875
3876 std::vector<mem_region> mem_regions = target_memory_map ();
3877
3878 /* Iterate over all memory regions. */
3879 for (const mem_region &m : mem_regions)
3880 {
3881 /* Is this a flash memory region? */
3882 if (m.attrib.mode == MEM_FLASH)
3883 {
3884 found_flash_region = true;
3885 target_flash_erase (m.lo, m.hi - m.lo);
3886
3887 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
3888
3889 current_uiout->message (_("Erasing flash memory region at address "));
3890 current_uiout->field_fmt ("address", "%s", paddress (gdbarch, m.lo));
3891 current_uiout->message (", size = ");
3892 current_uiout->field_fmt ("size", "%s", hex_string (m.hi - m.lo));
3893 current_uiout->message ("\n");
3894 }
3895 }
3896
3897 /* Did we do any flash operations? If so, we need to finalize them. */
3898 if (found_flash_region)
3899 target_flash_done ();
3900 else
3901 current_uiout->message (_("No flash memory regions found.\n"));
3902 }
3903
3904 /* Print the name of each layers of our target stack. */
3905
3906 static void
3907 maintenance_print_target_stack (const char *cmd, int from_tty)
3908 {
3909 struct target_ops *t;
3910
3911 printf_filtered (_("The current target stack is:\n"));
3912
3913 for (t = current_top_target (); t != NULL; t = t->beneath)
3914 {
3915 if (t->to_stratum == debug_stratum)
3916 continue;
3917 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
3918 }
3919 }
3920
3921 /* See target.h. */
3922
3923 void
3924 target_async (int enable)
3925 {
3926 infrun_async (enable);
3927 current_top_target ()->async (enable);
3928 }
3929
3930 /* See target.h. */
3931
3932 void
3933 target_thread_events (int enable)
3934 {
3935 current_top_target ()->thread_events (enable);
3936 }
3937
3938 /* Controls if targets can report that they can/are async. This is
3939 just for maintainers to use when debugging gdb. */
3940 int target_async_permitted = 1;
3941
3942 /* The set command writes to this variable. If the inferior is
3943 executing, target_async_permitted is *not* updated. */
3944 static int target_async_permitted_1 = 1;
3945
3946 static void
3947 maint_set_target_async_command (const char *args, int from_tty,
3948 struct cmd_list_element *c)
3949 {
3950 if (have_live_inferiors ())
3951 {
3952 target_async_permitted_1 = target_async_permitted;
3953 error (_("Cannot change this setting while the inferior is running."));
3954 }
3955
3956 target_async_permitted = target_async_permitted_1;
3957 }
3958
3959 static void
3960 maint_show_target_async_command (struct ui_file *file, int from_tty,
3961 struct cmd_list_element *c,
3962 const char *value)
3963 {
3964 fprintf_filtered (file,
3965 _("Controlling the inferior in "
3966 "asynchronous mode is %s.\n"), value);
3967 }
3968
3969 /* Return true if the target operates in non-stop mode even with "set
3970 non-stop off". */
3971
3972 static int
3973 target_always_non_stop_p (void)
3974 {
3975 return current_top_target ()->always_non_stop_p ();
3976 }
3977
3978 /* See target.h. */
3979
3980 int
3981 target_is_non_stop_p (void)
3982 {
3983 return (non_stop
3984 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
3985 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
3986 && target_always_non_stop_p ()));
3987 }
3988
3989 /* Controls if targets can report that they always run in non-stop
3990 mode. This is just for maintainers to use when debugging gdb. */
3991 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
3992
3993 /* The set command writes to this variable. If the inferior is
3994 executing, target_non_stop_enabled is *not* updated. */
3995 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
3996
3997 /* Implementation of "maint set target-non-stop". */
3998
3999 static void
4000 maint_set_target_non_stop_command (const char *args, int from_tty,
4001 struct cmd_list_element *c)
4002 {
4003 if (have_live_inferiors ())
4004 {
4005 target_non_stop_enabled_1 = target_non_stop_enabled;
4006 error (_("Cannot change this setting while the inferior is running."));
4007 }
4008
4009 target_non_stop_enabled = target_non_stop_enabled_1;
4010 }
4011
4012 /* Implementation of "maint show target-non-stop". */
4013
4014 static void
4015 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
4016 struct cmd_list_element *c,
4017 const char *value)
4018 {
4019 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4020 fprintf_filtered (file,
4021 _("Whether the target is always in non-stop mode "
4022 "is %s (currently %s).\n"), value,
4023 target_always_non_stop_p () ? "on" : "off");
4024 else
4025 fprintf_filtered (file,
4026 _("Whether the target is always in non-stop mode "
4027 "is %s.\n"), value);
4028 }
4029
4030 /* Temporary copies of permission settings. */
4031
4032 static int may_write_registers_1 = 1;
4033 static int may_write_memory_1 = 1;
4034 static int may_insert_breakpoints_1 = 1;
4035 static int may_insert_tracepoints_1 = 1;
4036 static int may_insert_fast_tracepoints_1 = 1;
4037 static int may_stop_1 = 1;
4038
4039 /* Make the user-set values match the real values again. */
4040
4041 void
4042 update_target_permissions (void)
4043 {
4044 may_write_registers_1 = may_write_registers;
4045 may_write_memory_1 = may_write_memory;
4046 may_insert_breakpoints_1 = may_insert_breakpoints;
4047 may_insert_tracepoints_1 = may_insert_tracepoints;
4048 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4049 may_stop_1 = may_stop;
4050 }
4051
4052 /* The one function handles (most of) the permission flags in the same
4053 way. */
4054
4055 static void
4056 set_target_permissions (const char *args, int from_tty,
4057 struct cmd_list_element *c)
4058 {
4059 if (target_has_execution)
4060 {
4061 update_target_permissions ();
4062 error (_("Cannot change this setting while the inferior is running."));
4063 }
4064
4065 /* Make the real values match the user-changed values. */
4066 may_write_registers = may_write_registers_1;
4067 may_insert_breakpoints = may_insert_breakpoints_1;
4068 may_insert_tracepoints = may_insert_tracepoints_1;
4069 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4070 may_stop = may_stop_1;
4071 update_observer_mode ();
4072 }
4073
4074 /* Set memory write permission independently of observer mode. */
4075
4076 static void
4077 set_write_memory_permission (const char *args, int from_tty,
4078 struct cmd_list_element *c)
4079 {
4080 /* Make the real values match the user-changed values. */
4081 may_write_memory = may_write_memory_1;
4082 update_observer_mode ();
4083 }
4084
4085 void
4086 initialize_targets (void)
4087 {
4088 the_dummy_target = new dummy_target ();
4089 push_target (the_dummy_target);
4090
4091 the_debug_target = new debug_target ();
4092
4093 add_info ("target", info_target_command, targ_desc);
4094 add_info ("files", info_target_command, targ_desc);
4095
4096 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4097 Set target debugging."), _("\
4098 Show target debugging."), _("\
4099 When non-zero, target debugging is enabled. Higher numbers are more\n\
4100 verbose."),
4101 set_targetdebug,
4102 show_targetdebug,
4103 &setdebuglist, &showdebuglist);
4104
4105 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4106 &trust_readonly, _("\
4107 Set mode for reading from readonly sections."), _("\
4108 Show mode for reading from readonly sections."), _("\
4109 When this mode is on, memory reads from readonly sections (such as .text)\n\
4110 will be read from the object file instead of from the target. This will\n\
4111 result in significant performance improvement for remote targets."),
4112 NULL,
4113 show_trust_readonly,
4114 &setlist, &showlist);
4115
4116 add_com ("monitor", class_obscure, do_monitor_command,
4117 _("Send a command to the remote monitor (remote targets only)."));
4118
4119 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4120 _("Print the name of each layer of the internal target stack."),
4121 &maintenanceprintlist);
4122
4123 add_setshow_boolean_cmd ("target-async", no_class,
4124 &target_async_permitted_1, _("\
4125 Set whether gdb controls the inferior in asynchronous mode."), _("\
4126 Show whether gdb controls the inferior in asynchronous mode."), _("\
4127 Tells gdb whether to control the inferior in asynchronous mode."),
4128 maint_set_target_async_command,
4129 maint_show_target_async_command,
4130 &maintenance_set_cmdlist,
4131 &maintenance_show_cmdlist);
4132
4133 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4134 &target_non_stop_enabled_1, _("\
4135 Set whether gdb always controls the inferior in non-stop mode."), _("\
4136 Show whether gdb always controls the inferior in non-stop mode."), _("\
4137 Tells gdb whether to control the inferior in non-stop mode."),
4138 maint_set_target_non_stop_command,
4139 maint_show_target_non_stop_command,
4140 &maintenance_set_cmdlist,
4141 &maintenance_show_cmdlist);
4142
4143 add_setshow_boolean_cmd ("may-write-registers", class_support,
4144 &may_write_registers_1, _("\
4145 Set permission to write into registers."), _("\
4146 Show permission to write into registers."), _("\
4147 When this permission is on, GDB may write into the target's registers.\n\
4148 Otherwise, any sort of write attempt will result in an error."),
4149 set_target_permissions, NULL,
4150 &setlist, &showlist);
4151
4152 add_setshow_boolean_cmd ("may-write-memory", class_support,
4153 &may_write_memory_1, _("\
4154 Set permission to write into target memory."), _("\
4155 Show permission to write into target memory."), _("\
4156 When this permission is on, GDB may write into the target's memory.\n\
4157 Otherwise, any sort of write attempt will result in an error."),
4158 set_write_memory_permission, NULL,
4159 &setlist, &showlist);
4160
4161 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4162 &may_insert_breakpoints_1, _("\
4163 Set permission to insert breakpoints in the target."), _("\
4164 Show permission to insert breakpoints in the target."), _("\
4165 When this permission is on, GDB may insert breakpoints in the program.\n\
4166 Otherwise, any sort of insertion attempt will result in an error."),
4167 set_target_permissions, NULL,
4168 &setlist, &showlist);
4169
4170 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4171 &may_insert_tracepoints_1, _("\
4172 Set permission to insert tracepoints in the target."), _("\
4173 Show permission to insert tracepoints in the target."), _("\
4174 When this permission is on, GDB may insert tracepoints in the program.\n\
4175 Otherwise, any sort of insertion attempt will result in an error."),
4176 set_target_permissions, NULL,
4177 &setlist, &showlist);
4178
4179 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4180 &may_insert_fast_tracepoints_1, _("\
4181 Set permission to insert fast tracepoints in the target."), _("\
4182 Show permission to insert fast tracepoints in the target."), _("\
4183 When this permission is on, GDB may insert fast tracepoints.\n\
4184 Otherwise, any sort of insertion attempt will result in an error."),
4185 set_target_permissions, NULL,
4186 &setlist, &showlist);
4187
4188 add_setshow_boolean_cmd ("may-interrupt", class_support,
4189 &may_stop_1, _("\
4190 Set permission to interrupt or signal the target."), _("\
4191 Show permission to interrupt or signal the target."), _("\
4192 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4193 Otherwise, any attempt to interrupt or stop will be ignored."),
4194 set_target_permissions, NULL,
4195 &setlist, &showlist);
4196
4197 add_com ("flash-erase", no_class, flash_erase_command,
4198 _("Erase all flash memory regions."));
4199
4200 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4201 &auto_connect_native_target, _("\
4202 Set whether GDB may automatically connect to the native target."), _("\
4203 Show whether GDB may automatically connect to the native target."), _("\
4204 When on, and GDB is not connected to a target yet, GDB\n\
4205 attempts \"run\" and other commands with the native target."),
4206 NULL, show_auto_connect_native_target,
4207 &setlist, &showlist);
4208 }
This page took 0.116612 seconds and 4 git commands to generate.