Add command support for Guile.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39
40 /* The target_ops of record-btrace. */
41 static struct target_ops record_btrace_ops;
42
43 /* A new thread observer enabling branch tracing for the new thread. */
44 static struct observer *record_btrace_thread_observer;
45
46 /* Memory access types used in set/show record btrace replay-memory-access. */
47 static const char replay_memory_access_read_only[] = "read-only";
48 static const char replay_memory_access_read_write[] = "read-write";
49 static const char *const replay_memory_access_types[] =
50 {
51 replay_memory_access_read_only,
52 replay_memory_access_read_write,
53 NULL
54 };
55
56 /* The currently allowed replay memory access type. */
57 static const char *replay_memory_access = replay_memory_access_read_only;
58
59 /* Command lists for "set/show record btrace". */
60 static struct cmd_list_element *set_record_btrace_cmdlist;
61 static struct cmd_list_element *show_record_btrace_cmdlist;
62
63 /* Print a record-btrace debug message. Use do ... while (0) to avoid
64 ambiguities when used in if statements. */
65
66 #define DEBUG(msg, args...) \
67 do \
68 { \
69 if (record_debug != 0) \
70 fprintf_unfiltered (gdb_stdlog, \
71 "[record-btrace] " msg "\n", ##args); \
72 } \
73 while (0)
74
75
76 /* Update the branch trace for the current thread and return a pointer to its
77 thread_info.
78
79 Throws an error if there is no thread or no trace. This function never
80 returns NULL. */
81
82 static struct thread_info *
83 require_btrace_thread (void)
84 {
85 struct thread_info *tp;
86
87 DEBUG ("require");
88
89 tp = find_thread_ptid (inferior_ptid);
90 if (tp == NULL)
91 error (_("No thread."));
92
93 btrace_fetch (tp);
94
95 if (btrace_is_empty (tp))
96 error (_("No trace."));
97
98 return tp;
99 }
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 branch trace information struct.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct btrace_thread_info *
108 require_btrace (void)
109 {
110 struct thread_info *tp;
111
112 tp = require_btrace_thread ();
113
114 return &tp->btrace;
115 }
116
117 /* Enable branch tracing for one thread. Warn on errors. */
118
119 static void
120 record_btrace_enable_warn (struct thread_info *tp)
121 {
122 volatile struct gdb_exception error;
123
124 TRY_CATCH (error, RETURN_MASK_ERROR)
125 btrace_enable (tp);
126
127 if (error.message != NULL)
128 warning ("%s", error.message);
129 }
130
131 /* Callback function to disable branch tracing for one thread. */
132
133 static void
134 record_btrace_disable_callback (void *arg)
135 {
136 struct thread_info *tp;
137
138 tp = arg;
139
140 btrace_disable (tp);
141 }
142
143 /* Enable automatic tracing of new threads. */
144
145 static void
146 record_btrace_auto_enable (void)
147 {
148 DEBUG ("attach thread observer");
149
150 record_btrace_thread_observer
151 = observer_attach_new_thread (record_btrace_enable_warn);
152 }
153
154 /* Disable automatic tracing of new threads. */
155
156 static void
157 record_btrace_auto_disable (void)
158 {
159 /* The observer may have been detached, already. */
160 if (record_btrace_thread_observer == NULL)
161 return;
162
163 DEBUG ("detach thread observer");
164
165 observer_detach_new_thread (record_btrace_thread_observer);
166 record_btrace_thread_observer = NULL;
167 }
168
169 /* The to_open method of target record-btrace. */
170
171 static void
172 record_btrace_open (char *args, int from_tty)
173 {
174 struct cleanup *disable_chain;
175 struct thread_info *tp;
176
177 DEBUG ("open");
178
179 record_preopen ();
180
181 if (!target_has_execution)
182 error (_("The program is not being run."));
183
184 if (!target_supports_btrace ())
185 error (_("Target does not support branch tracing."));
186
187 if (non_stop)
188 error (_("Record btrace can't debug inferior in non-stop mode."));
189
190 gdb_assert (record_btrace_thread_observer == NULL);
191
192 disable_chain = make_cleanup (null_cleanup, NULL);
193 ALL_THREADS (tp)
194 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
195 {
196 btrace_enable (tp);
197
198 make_cleanup (record_btrace_disable_callback, tp);
199 }
200
201 record_btrace_auto_enable ();
202
203 push_target (&record_btrace_ops);
204
205 observer_notify_record_changed (current_inferior (), 1);
206
207 discard_cleanups (disable_chain);
208 }
209
210 /* The to_stop_recording method of target record-btrace. */
211
212 static void
213 record_btrace_stop_recording (struct target_ops *self)
214 {
215 struct thread_info *tp;
216
217 DEBUG ("stop recording");
218
219 record_btrace_auto_disable ();
220
221 ALL_THREADS (tp)
222 if (tp->btrace.target != NULL)
223 btrace_disable (tp);
224 }
225
226 /* The to_close method of target record-btrace. */
227
228 static void
229 record_btrace_close (struct target_ops *self)
230 {
231 struct thread_info *tp;
232
233 /* Make sure automatic recording gets disabled even if we did not stop
234 recording before closing the record-btrace target. */
235 record_btrace_auto_disable ();
236
237 /* We should have already stopped recording.
238 Tear down btrace in case we have not. */
239 ALL_THREADS (tp)
240 btrace_teardown (tp);
241 }
242
243 /* The to_info_record method of target record-btrace. */
244
245 static void
246 record_btrace_info (struct target_ops *self)
247 {
248 struct btrace_thread_info *btinfo;
249 struct thread_info *tp;
250 unsigned int insns, calls;
251
252 DEBUG ("info");
253
254 tp = find_thread_ptid (inferior_ptid);
255 if (tp == NULL)
256 error (_("No thread."));
257
258 btrace_fetch (tp);
259
260 insns = 0;
261 calls = 0;
262
263 btinfo = &tp->btrace;
264
265 if (!btrace_is_empty (tp))
266 {
267 struct btrace_call_iterator call;
268 struct btrace_insn_iterator insn;
269
270 btrace_call_end (&call, btinfo);
271 btrace_call_prev (&call, 1);
272 calls = btrace_call_number (&call);
273
274 btrace_insn_end (&insn, btinfo);
275 btrace_insn_prev (&insn, 1);
276 insns = btrace_insn_number (&insn);
277 }
278
279 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
280 "%d (%s).\n"), insns, calls, tp->num,
281 target_pid_to_str (tp->ptid));
282
283 if (btrace_is_replaying (tp))
284 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
285 btrace_insn_number (btinfo->replay));
286 }
287
288 /* Print an unsigned int. */
289
290 static void
291 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
292 {
293 ui_out_field_fmt (uiout, fld, "%u", val);
294 }
295
296 /* Disassemble a section of the recorded instruction trace. */
297
298 static void
299 btrace_insn_history (struct ui_out *uiout,
300 const struct btrace_insn_iterator *begin,
301 const struct btrace_insn_iterator *end, int flags)
302 {
303 struct gdbarch *gdbarch;
304 struct btrace_insn_iterator it;
305
306 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
307 btrace_insn_number (end));
308
309 gdbarch = target_gdbarch ();
310
311 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
312 {
313 const struct btrace_insn *insn;
314
315 insn = btrace_insn_get (&it);
316
317 /* Print the instruction index. */
318 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
319 ui_out_text (uiout, "\t");
320
321 /* Disassembly with '/m' flag may not produce the expected result.
322 See PR gdb/11833. */
323 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
324 }
325 }
326
327 /* The to_insn_history method of target record-btrace. */
328
329 static void
330 record_btrace_insn_history (struct target_ops *self, int size, int flags)
331 {
332 struct btrace_thread_info *btinfo;
333 struct btrace_insn_history *history;
334 struct btrace_insn_iterator begin, end;
335 struct cleanup *uiout_cleanup;
336 struct ui_out *uiout;
337 unsigned int context, covered;
338
339 uiout = current_uiout;
340 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
341 "insn history");
342 context = abs (size);
343 if (context == 0)
344 error (_("Bad record instruction-history-size."));
345
346 btinfo = require_btrace ();
347 history = btinfo->insn_history;
348 if (history == NULL)
349 {
350 struct btrace_insn_iterator *replay;
351
352 DEBUG ("insn-history (0x%x): %d", flags, size);
353
354 /* If we're replaying, we start at the replay position. Otherwise, we
355 start at the tail of the trace. */
356 replay = btinfo->replay;
357 if (replay != NULL)
358 begin = *replay;
359 else
360 btrace_insn_end (&begin, btinfo);
361
362 /* We start from here and expand in the requested direction. Then we
363 expand in the other direction, as well, to fill up any remaining
364 context. */
365 end = begin;
366 if (size < 0)
367 {
368 /* We want the current position covered, as well. */
369 covered = btrace_insn_next (&end, 1);
370 covered += btrace_insn_prev (&begin, context - covered);
371 covered += btrace_insn_next (&end, context - covered);
372 }
373 else
374 {
375 covered = btrace_insn_next (&end, context);
376 covered += btrace_insn_prev (&begin, context - covered);
377 }
378 }
379 else
380 {
381 begin = history->begin;
382 end = history->end;
383
384 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
385 btrace_insn_number (&begin), btrace_insn_number (&end));
386
387 if (size < 0)
388 {
389 end = begin;
390 covered = btrace_insn_prev (&begin, context);
391 }
392 else
393 {
394 begin = end;
395 covered = btrace_insn_next (&end, context);
396 }
397 }
398
399 if (covered > 0)
400 btrace_insn_history (uiout, &begin, &end, flags);
401 else
402 {
403 if (size < 0)
404 printf_unfiltered (_("At the start of the branch trace record.\n"));
405 else
406 printf_unfiltered (_("At the end of the branch trace record.\n"));
407 }
408
409 btrace_set_insn_history (btinfo, &begin, &end);
410 do_cleanups (uiout_cleanup);
411 }
412
413 /* The to_insn_history_range method of target record-btrace. */
414
415 static void
416 record_btrace_insn_history_range (struct target_ops *self,
417 ULONGEST from, ULONGEST to, int flags)
418 {
419 struct btrace_thread_info *btinfo;
420 struct btrace_insn_history *history;
421 struct btrace_insn_iterator begin, end;
422 struct cleanup *uiout_cleanup;
423 struct ui_out *uiout;
424 unsigned int low, high;
425 int found;
426
427 uiout = current_uiout;
428 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
429 "insn history");
430 low = from;
431 high = to;
432
433 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
434
435 /* Check for wrap-arounds. */
436 if (low != from || high != to)
437 error (_("Bad range."));
438
439 if (high < low)
440 error (_("Bad range."));
441
442 btinfo = require_btrace ();
443
444 found = btrace_find_insn_by_number (&begin, btinfo, low);
445 if (found == 0)
446 error (_("Range out of bounds."));
447
448 found = btrace_find_insn_by_number (&end, btinfo, high);
449 if (found == 0)
450 {
451 /* Silently truncate the range. */
452 btrace_insn_end (&end, btinfo);
453 }
454 else
455 {
456 /* We want both begin and end to be inclusive. */
457 btrace_insn_next (&end, 1);
458 }
459
460 btrace_insn_history (uiout, &begin, &end, flags);
461 btrace_set_insn_history (btinfo, &begin, &end);
462
463 do_cleanups (uiout_cleanup);
464 }
465
466 /* The to_insn_history_from method of target record-btrace. */
467
468 static void
469 record_btrace_insn_history_from (struct target_ops *self,
470 ULONGEST from, int size, int flags)
471 {
472 ULONGEST begin, end, context;
473
474 context = abs (size);
475 if (context == 0)
476 error (_("Bad record instruction-history-size."));
477
478 if (size < 0)
479 {
480 end = from;
481
482 if (from < context)
483 begin = 0;
484 else
485 begin = from - context + 1;
486 }
487 else
488 {
489 begin = from;
490 end = from + context - 1;
491
492 /* Check for wrap-around. */
493 if (end < begin)
494 end = ULONGEST_MAX;
495 }
496
497 record_btrace_insn_history_range (self, begin, end, flags);
498 }
499
500 /* Print the instruction number range for a function call history line. */
501
502 static void
503 btrace_call_history_insn_range (struct ui_out *uiout,
504 const struct btrace_function *bfun)
505 {
506 unsigned int begin, end, size;
507
508 size = VEC_length (btrace_insn_s, bfun->insn);
509 gdb_assert (size > 0);
510
511 begin = bfun->insn_offset;
512 end = begin + size - 1;
513
514 ui_out_field_uint (uiout, "insn begin", begin);
515 ui_out_text (uiout, ",");
516 ui_out_field_uint (uiout, "insn end", end);
517 }
518
519 /* Print the source line information for a function call history line. */
520
521 static void
522 btrace_call_history_src_line (struct ui_out *uiout,
523 const struct btrace_function *bfun)
524 {
525 struct symbol *sym;
526 int begin, end;
527
528 sym = bfun->sym;
529 if (sym == NULL)
530 return;
531
532 ui_out_field_string (uiout, "file",
533 symtab_to_filename_for_display (sym->symtab));
534
535 begin = bfun->lbegin;
536 end = bfun->lend;
537
538 if (end < begin)
539 return;
540
541 ui_out_text (uiout, ":");
542 ui_out_field_int (uiout, "min line", begin);
543
544 if (end == begin)
545 return;
546
547 ui_out_text (uiout, ",");
548 ui_out_field_int (uiout, "max line", end);
549 }
550
551 /* Get the name of a branch trace function. */
552
553 static const char *
554 btrace_get_bfun_name (const struct btrace_function *bfun)
555 {
556 struct minimal_symbol *msym;
557 struct symbol *sym;
558
559 if (bfun == NULL)
560 return "??";
561
562 msym = bfun->msym;
563 sym = bfun->sym;
564
565 if (sym != NULL)
566 return SYMBOL_PRINT_NAME (sym);
567 else if (msym != NULL)
568 return MSYMBOL_PRINT_NAME (msym);
569 else
570 return "??";
571 }
572
573 /* Disassemble a section of the recorded function trace. */
574
575 static void
576 btrace_call_history (struct ui_out *uiout,
577 const struct btrace_thread_info *btinfo,
578 const struct btrace_call_iterator *begin,
579 const struct btrace_call_iterator *end,
580 enum record_print_flag flags)
581 {
582 struct btrace_call_iterator it;
583
584 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
585 btrace_call_number (end));
586
587 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
588 {
589 const struct btrace_function *bfun;
590 struct minimal_symbol *msym;
591 struct symbol *sym;
592
593 bfun = btrace_call_get (&it);
594 sym = bfun->sym;
595 msym = bfun->msym;
596
597 /* Print the function index. */
598 ui_out_field_uint (uiout, "index", bfun->number);
599 ui_out_text (uiout, "\t");
600
601 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
602 {
603 int level = bfun->level + btinfo->level, i;
604
605 for (i = 0; i < level; ++i)
606 ui_out_text (uiout, " ");
607 }
608
609 if (sym != NULL)
610 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
611 else if (msym != NULL)
612 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
613 else if (!ui_out_is_mi_like_p (uiout))
614 ui_out_field_string (uiout, "function", "??");
615
616 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
617 {
618 ui_out_text (uiout, _("\tinst "));
619 btrace_call_history_insn_range (uiout, bfun);
620 }
621
622 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
623 {
624 ui_out_text (uiout, _("\tat "));
625 btrace_call_history_src_line (uiout, bfun);
626 }
627
628 ui_out_text (uiout, "\n");
629 }
630 }
631
632 /* The to_call_history method of target record-btrace. */
633
634 static void
635 record_btrace_call_history (struct target_ops *self, int size, int flags)
636 {
637 struct btrace_thread_info *btinfo;
638 struct btrace_call_history *history;
639 struct btrace_call_iterator begin, end;
640 struct cleanup *uiout_cleanup;
641 struct ui_out *uiout;
642 unsigned int context, covered;
643
644 uiout = current_uiout;
645 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
646 "insn history");
647 context = abs (size);
648 if (context == 0)
649 error (_("Bad record function-call-history-size."));
650
651 btinfo = require_btrace ();
652 history = btinfo->call_history;
653 if (history == NULL)
654 {
655 struct btrace_insn_iterator *replay;
656
657 DEBUG ("call-history (0x%x): %d", flags, size);
658
659 /* If we're replaying, we start at the replay position. Otherwise, we
660 start at the tail of the trace. */
661 replay = btinfo->replay;
662 if (replay != NULL)
663 {
664 begin.function = replay->function;
665 begin.btinfo = btinfo;
666 }
667 else
668 btrace_call_end (&begin, btinfo);
669
670 /* We start from here and expand in the requested direction. Then we
671 expand in the other direction, as well, to fill up any remaining
672 context. */
673 end = begin;
674 if (size < 0)
675 {
676 /* We want the current position covered, as well. */
677 covered = btrace_call_next (&end, 1);
678 covered += btrace_call_prev (&begin, context - covered);
679 covered += btrace_call_next (&end, context - covered);
680 }
681 else
682 {
683 covered = btrace_call_next (&end, context);
684 covered += btrace_call_prev (&begin, context- covered);
685 }
686 }
687 else
688 {
689 begin = history->begin;
690 end = history->end;
691
692 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
693 btrace_call_number (&begin), btrace_call_number (&end));
694
695 if (size < 0)
696 {
697 end = begin;
698 covered = btrace_call_prev (&begin, context);
699 }
700 else
701 {
702 begin = end;
703 covered = btrace_call_next (&end, context);
704 }
705 }
706
707 if (covered > 0)
708 btrace_call_history (uiout, btinfo, &begin, &end, flags);
709 else
710 {
711 if (size < 0)
712 printf_unfiltered (_("At the start of the branch trace record.\n"));
713 else
714 printf_unfiltered (_("At the end of the branch trace record.\n"));
715 }
716
717 btrace_set_call_history (btinfo, &begin, &end);
718 do_cleanups (uiout_cleanup);
719 }
720
721 /* The to_call_history_range method of target record-btrace. */
722
723 static void
724 record_btrace_call_history_range (struct target_ops *self,
725 ULONGEST from, ULONGEST to, int flags)
726 {
727 struct btrace_thread_info *btinfo;
728 struct btrace_call_history *history;
729 struct btrace_call_iterator begin, end;
730 struct cleanup *uiout_cleanup;
731 struct ui_out *uiout;
732 unsigned int low, high;
733 int found;
734
735 uiout = current_uiout;
736 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
737 "func history");
738 low = from;
739 high = to;
740
741 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
742
743 /* Check for wrap-arounds. */
744 if (low != from || high != to)
745 error (_("Bad range."));
746
747 if (high < low)
748 error (_("Bad range."));
749
750 btinfo = require_btrace ();
751
752 found = btrace_find_call_by_number (&begin, btinfo, low);
753 if (found == 0)
754 error (_("Range out of bounds."));
755
756 found = btrace_find_call_by_number (&end, btinfo, high);
757 if (found == 0)
758 {
759 /* Silently truncate the range. */
760 btrace_call_end (&end, btinfo);
761 }
762 else
763 {
764 /* We want both begin and end to be inclusive. */
765 btrace_call_next (&end, 1);
766 }
767
768 btrace_call_history (uiout, btinfo, &begin, &end, flags);
769 btrace_set_call_history (btinfo, &begin, &end);
770
771 do_cleanups (uiout_cleanup);
772 }
773
774 /* The to_call_history_from method of target record-btrace. */
775
776 static void
777 record_btrace_call_history_from (struct target_ops *self,
778 ULONGEST from, int size, int flags)
779 {
780 ULONGEST begin, end, context;
781
782 context = abs (size);
783 if (context == 0)
784 error (_("Bad record function-call-history-size."));
785
786 if (size < 0)
787 {
788 end = from;
789
790 if (from < context)
791 begin = 0;
792 else
793 begin = from - context + 1;
794 }
795 else
796 {
797 begin = from;
798 end = from + context - 1;
799
800 /* Check for wrap-around. */
801 if (end < begin)
802 end = ULONGEST_MAX;
803 }
804
805 record_btrace_call_history_range (self, begin, end, flags);
806 }
807
808 /* The to_record_is_replaying method of target record-btrace. */
809
810 static int
811 record_btrace_is_replaying (struct target_ops *self)
812 {
813 struct thread_info *tp;
814
815 ALL_THREADS (tp)
816 if (btrace_is_replaying (tp))
817 return 1;
818
819 return 0;
820 }
821
822 /* The to_xfer_partial method of target record-btrace. */
823
824 static enum target_xfer_status
825 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
826 const char *annex, gdb_byte *readbuf,
827 const gdb_byte *writebuf, ULONGEST offset,
828 ULONGEST len, ULONGEST *xfered_len)
829 {
830 struct target_ops *t;
831
832 /* Filter out requests that don't make sense during replay. */
833 if (replay_memory_access == replay_memory_access_read_only
834 && record_btrace_is_replaying (ops))
835 {
836 switch (object)
837 {
838 case TARGET_OBJECT_MEMORY:
839 {
840 struct target_section *section;
841
842 /* We do not allow writing memory in general. */
843 if (writebuf != NULL)
844 {
845 *xfered_len = len;
846 return TARGET_XFER_UNAVAILABLE;
847 }
848
849 /* We allow reading readonly memory. */
850 section = target_section_by_addr (ops, offset);
851 if (section != NULL)
852 {
853 /* Check if the section we found is readonly. */
854 if ((bfd_get_section_flags (section->the_bfd_section->owner,
855 section->the_bfd_section)
856 & SEC_READONLY) != 0)
857 {
858 /* Truncate the request to fit into this section. */
859 len = min (len, section->endaddr - offset);
860 break;
861 }
862 }
863
864 *xfered_len = len;
865 return TARGET_XFER_UNAVAILABLE;
866 }
867 }
868 }
869
870 /* Forward the request. */
871 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
872 if (ops->to_xfer_partial != NULL)
873 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
874 offset, len, xfered_len);
875
876 *xfered_len = len;
877 return TARGET_XFER_UNAVAILABLE;
878 }
879
880 /* The to_insert_breakpoint method of target record-btrace. */
881
882 static int
883 record_btrace_insert_breakpoint (struct target_ops *ops,
884 struct gdbarch *gdbarch,
885 struct bp_target_info *bp_tgt)
886 {
887 volatile struct gdb_exception except;
888 const char *old;
889 int ret;
890
891 /* Inserting breakpoints requires accessing memory. Allow it for the
892 duration of this function. */
893 old = replay_memory_access;
894 replay_memory_access = replay_memory_access_read_write;
895
896 ret = 0;
897 TRY_CATCH (except, RETURN_MASK_ALL)
898 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
899
900 replay_memory_access = old;
901
902 if (except.reason < 0)
903 throw_exception (except);
904
905 return ret;
906 }
907
908 /* The to_remove_breakpoint method of target record-btrace. */
909
910 static int
911 record_btrace_remove_breakpoint (struct target_ops *ops,
912 struct gdbarch *gdbarch,
913 struct bp_target_info *bp_tgt)
914 {
915 volatile struct gdb_exception except;
916 const char *old;
917 int ret;
918
919 /* Removing breakpoints requires accessing memory. Allow it for the
920 duration of this function. */
921 old = replay_memory_access;
922 replay_memory_access = replay_memory_access_read_write;
923
924 ret = 0;
925 TRY_CATCH (except, RETURN_MASK_ALL)
926 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
927
928 replay_memory_access = old;
929
930 if (except.reason < 0)
931 throw_exception (except);
932
933 return ret;
934 }
935
936 /* The to_fetch_registers method of target record-btrace. */
937
938 static void
939 record_btrace_fetch_registers (struct target_ops *ops,
940 struct regcache *regcache, int regno)
941 {
942 struct btrace_insn_iterator *replay;
943 struct thread_info *tp;
944
945 tp = find_thread_ptid (inferior_ptid);
946 gdb_assert (tp != NULL);
947
948 replay = tp->btrace.replay;
949 if (replay != NULL)
950 {
951 const struct btrace_insn *insn;
952 struct gdbarch *gdbarch;
953 int pcreg;
954
955 gdbarch = get_regcache_arch (regcache);
956 pcreg = gdbarch_pc_regnum (gdbarch);
957 if (pcreg < 0)
958 return;
959
960 /* We can only provide the PC register. */
961 if (regno >= 0 && regno != pcreg)
962 return;
963
964 insn = btrace_insn_get (replay);
965 gdb_assert (insn != NULL);
966
967 regcache_raw_supply (regcache, regno, &insn->pc);
968 }
969 else
970 {
971 struct target_ops *t;
972
973 for (t = ops->beneath; t != NULL; t = t->beneath)
974 if (t->to_fetch_registers != NULL)
975 {
976 t->to_fetch_registers (t, regcache, regno);
977 break;
978 }
979 }
980 }
981
982 /* The to_store_registers method of target record-btrace. */
983
984 static void
985 record_btrace_store_registers (struct target_ops *ops,
986 struct regcache *regcache, int regno)
987 {
988 struct target_ops *t;
989
990 if (record_btrace_is_replaying (ops))
991 error (_("This record target does not allow writing registers."));
992
993 gdb_assert (may_write_registers != 0);
994
995 for (t = ops->beneath; t != NULL; t = t->beneath)
996 if (t->to_store_registers != NULL)
997 {
998 t->to_store_registers (t, regcache, regno);
999 return;
1000 }
1001
1002 noprocess ();
1003 }
1004
1005 /* The to_prepare_to_store method of target record-btrace. */
1006
1007 static void
1008 record_btrace_prepare_to_store (struct target_ops *ops,
1009 struct regcache *regcache)
1010 {
1011 struct target_ops *t;
1012
1013 if (record_btrace_is_replaying (ops))
1014 return;
1015
1016 for (t = ops->beneath; t != NULL; t = t->beneath)
1017 if (t->to_prepare_to_store != NULL)
1018 {
1019 t->to_prepare_to_store (t, regcache);
1020 return;
1021 }
1022 }
1023
1024 /* The branch trace frame cache. */
1025
1026 struct btrace_frame_cache
1027 {
1028 /* The thread. */
1029 struct thread_info *tp;
1030
1031 /* The frame info. */
1032 struct frame_info *frame;
1033
1034 /* The branch trace function segment. */
1035 const struct btrace_function *bfun;
1036 };
1037
1038 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1039
1040 static htab_t bfcache;
1041
1042 /* hash_f for htab_create_alloc of bfcache. */
1043
1044 static hashval_t
1045 bfcache_hash (const void *arg)
1046 {
1047 const struct btrace_frame_cache *cache = arg;
1048
1049 return htab_hash_pointer (cache->frame);
1050 }
1051
1052 /* eq_f for htab_create_alloc of bfcache. */
1053
1054 static int
1055 bfcache_eq (const void *arg1, const void *arg2)
1056 {
1057 const struct btrace_frame_cache *cache1 = arg1;
1058 const struct btrace_frame_cache *cache2 = arg2;
1059
1060 return cache1->frame == cache2->frame;
1061 }
1062
1063 /* Create a new btrace frame cache. */
1064
1065 static struct btrace_frame_cache *
1066 bfcache_new (struct frame_info *frame)
1067 {
1068 struct btrace_frame_cache *cache;
1069 void **slot;
1070
1071 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1072 cache->frame = frame;
1073
1074 slot = htab_find_slot (bfcache, cache, INSERT);
1075 gdb_assert (*slot == NULL);
1076 *slot = cache;
1077
1078 return cache;
1079 }
1080
1081 /* Extract the branch trace function from a branch trace frame. */
1082
1083 static const struct btrace_function *
1084 btrace_get_frame_function (struct frame_info *frame)
1085 {
1086 const struct btrace_frame_cache *cache;
1087 const struct btrace_function *bfun;
1088 struct btrace_frame_cache pattern;
1089 void **slot;
1090
1091 pattern.frame = frame;
1092
1093 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1094 if (slot == NULL)
1095 return NULL;
1096
1097 cache = *slot;
1098 return cache->bfun;
1099 }
1100
1101 /* Implement stop_reason method for record_btrace_frame_unwind. */
1102
1103 static enum unwind_stop_reason
1104 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1105 void **this_cache)
1106 {
1107 const struct btrace_frame_cache *cache;
1108 const struct btrace_function *bfun;
1109
1110 cache = *this_cache;
1111 bfun = cache->bfun;
1112 gdb_assert (bfun != NULL);
1113
1114 if (bfun->up == NULL)
1115 return UNWIND_UNAVAILABLE;
1116
1117 return UNWIND_NO_REASON;
1118 }
1119
1120 /* Implement this_id method for record_btrace_frame_unwind. */
1121
1122 static void
1123 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1124 struct frame_id *this_id)
1125 {
1126 const struct btrace_frame_cache *cache;
1127 const struct btrace_function *bfun;
1128 CORE_ADDR code, special;
1129
1130 cache = *this_cache;
1131
1132 bfun = cache->bfun;
1133 gdb_assert (bfun != NULL);
1134
1135 while (bfun->segment.prev != NULL)
1136 bfun = bfun->segment.prev;
1137
1138 code = get_frame_func (this_frame);
1139 special = bfun->number;
1140
1141 *this_id = frame_id_build_unavailable_stack_special (code, special);
1142
1143 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1144 btrace_get_bfun_name (cache->bfun),
1145 core_addr_to_string_nz (this_id->code_addr),
1146 core_addr_to_string_nz (this_id->special_addr));
1147 }
1148
1149 /* Implement prev_register method for record_btrace_frame_unwind. */
1150
1151 static struct value *
1152 record_btrace_frame_prev_register (struct frame_info *this_frame,
1153 void **this_cache,
1154 int regnum)
1155 {
1156 const struct btrace_frame_cache *cache;
1157 const struct btrace_function *bfun, *caller;
1158 const struct btrace_insn *insn;
1159 struct gdbarch *gdbarch;
1160 CORE_ADDR pc;
1161 int pcreg;
1162
1163 gdbarch = get_frame_arch (this_frame);
1164 pcreg = gdbarch_pc_regnum (gdbarch);
1165 if (pcreg < 0 || regnum != pcreg)
1166 throw_error (NOT_AVAILABLE_ERROR,
1167 _("Registers are not available in btrace record history"));
1168
1169 cache = *this_cache;
1170 bfun = cache->bfun;
1171 gdb_assert (bfun != NULL);
1172
1173 caller = bfun->up;
1174 if (caller == NULL)
1175 throw_error (NOT_AVAILABLE_ERROR,
1176 _("No caller in btrace record history"));
1177
1178 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1179 {
1180 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1181 pc = insn->pc;
1182 }
1183 else
1184 {
1185 insn = VEC_last (btrace_insn_s, caller->insn);
1186 pc = insn->pc;
1187
1188 pc += gdb_insn_length (gdbarch, pc);
1189 }
1190
1191 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1192 btrace_get_bfun_name (bfun), bfun->level,
1193 core_addr_to_string_nz (pc));
1194
1195 return frame_unwind_got_address (this_frame, regnum, pc);
1196 }
1197
1198 /* Implement sniffer method for record_btrace_frame_unwind. */
1199
1200 static int
1201 record_btrace_frame_sniffer (const struct frame_unwind *self,
1202 struct frame_info *this_frame,
1203 void **this_cache)
1204 {
1205 const struct btrace_function *bfun;
1206 struct btrace_frame_cache *cache;
1207 struct thread_info *tp;
1208 struct frame_info *next;
1209
1210 /* THIS_FRAME does not contain a reference to its thread. */
1211 tp = find_thread_ptid (inferior_ptid);
1212 gdb_assert (tp != NULL);
1213
1214 bfun = NULL;
1215 next = get_next_frame (this_frame);
1216 if (next == NULL)
1217 {
1218 const struct btrace_insn_iterator *replay;
1219
1220 replay = tp->btrace.replay;
1221 if (replay != NULL)
1222 bfun = replay->function;
1223 }
1224 else
1225 {
1226 const struct btrace_function *callee;
1227
1228 callee = btrace_get_frame_function (next);
1229 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1230 bfun = callee->up;
1231 }
1232
1233 if (bfun == NULL)
1234 return 0;
1235
1236 DEBUG ("[frame] sniffed frame for %s on level %d",
1237 btrace_get_bfun_name (bfun), bfun->level);
1238
1239 /* This is our frame. Initialize the frame cache. */
1240 cache = bfcache_new (this_frame);
1241 cache->tp = tp;
1242 cache->bfun = bfun;
1243
1244 *this_cache = cache;
1245 return 1;
1246 }
1247
1248 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1249
1250 static int
1251 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1252 struct frame_info *this_frame,
1253 void **this_cache)
1254 {
1255 const struct btrace_function *bfun, *callee;
1256 struct btrace_frame_cache *cache;
1257 struct frame_info *next;
1258
1259 next = get_next_frame (this_frame);
1260 if (next == NULL)
1261 return 0;
1262
1263 callee = btrace_get_frame_function (next);
1264 if (callee == NULL)
1265 return 0;
1266
1267 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1268 return 0;
1269
1270 bfun = callee->up;
1271 if (bfun == NULL)
1272 return 0;
1273
1274 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1275 btrace_get_bfun_name (bfun), bfun->level);
1276
1277 /* This is our frame. Initialize the frame cache. */
1278 cache = bfcache_new (this_frame);
1279 cache->tp = find_thread_ptid (inferior_ptid);
1280 cache->bfun = bfun;
1281
1282 *this_cache = cache;
1283 return 1;
1284 }
1285
1286 static void
1287 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1288 {
1289 struct btrace_frame_cache *cache;
1290 void **slot;
1291
1292 cache = this_cache;
1293
1294 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1295 gdb_assert (slot != NULL);
1296
1297 htab_remove_elt (bfcache, cache);
1298 }
1299
1300 /* btrace recording does not store previous memory content, neither the stack
1301 frames content. Any unwinding would return errorneous results as the stack
1302 contents no longer matches the changed PC value restored from history.
1303 Therefore this unwinder reports any possibly unwound registers as
1304 <unavailable>. */
1305
1306 const struct frame_unwind record_btrace_frame_unwind =
1307 {
1308 NORMAL_FRAME,
1309 record_btrace_frame_unwind_stop_reason,
1310 record_btrace_frame_this_id,
1311 record_btrace_frame_prev_register,
1312 NULL,
1313 record_btrace_frame_sniffer,
1314 record_btrace_frame_dealloc_cache
1315 };
1316
1317 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1318 {
1319 TAILCALL_FRAME,
1320 record_btrace_frame_unwind_stop_reason,
1321 record_btrace_frame_this_id,
1322 record_btrace_frame_prev_register,
1323 NULL,
1324 record_btrace_tailcall_frame_sniffer,
1325 record_btrace_frame_dealloc_cache
1326 };
1327
1328 /* Implement the to_get_unwinder method. */
1329
1330 static const struct frame_unwind *
1331 record_btrace_to_get_unwinder (struct target_ops *self)
1332 {
1333 return &record_btrace_frame_unwind;
1334 }
1335
1336 /* Implement the to_get_tailcall_unwinder method. */
1337
1338 static const struct frame_unwind *
1339 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1340 {
1341 return &record_btrace_tailcall_frame_unwind;
1342 }
1343
1344 /* Indicate that TP should be resumed according to FLAG. */
1345
1346 static void
1347 record_btrace_resume_thread (struct thread_info *tp,
1348 enum btrace_thread_flag flag)
1349 {
1350 struct btrace_thread_info *btinfo;
1351
1352 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1353
1354 btinfo = &tp->btrace;
1355
1356 if ((btinfo->flags & BTHR_MOVE) != 0)
1357 error (_("Thread already moving."));
1358
1359 /* Fetch the latest branch trace. */
1360 btrace_fetch (tp);
1361
1362 btinfo->flags |= flag;
1363 }
1364
1365 /* Find the thread to resume given a PTID. */
1366
1367 static struct thread_info *
1368 record_btrace_find_resume_thread (ptid_t ptid)
1369 {
1370 struct thread_info *tp;
1371
1372 /* When asked to resume everything, we pick the current thread. */
1373 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1374 ptid = inferior_ptid;
1375
1376 return find_thread_ptid (ptid);
1377 }
1378
1379 /* Start replaying a thread. */
1380
1381 static struct btrace_insn_iterator *
1382 record_btrace_start_replaying (struct thread_info *tp)
1383 {
1384 volatile struct gdb_exception except;
1385 struct btrace_insn_iterator *replay;
1386 struct btrace_thread_info *btinfo;
1387 int executing;
1388
1389 btinfo = &tp->btrace;
1390 replay = NULL;
1391
1392 /* We can't start replaying without trace. */
1393 if (btinfo->begin == NULL)
1394 return NULL;
1395
1396 /* Clear the executing flag to allow changes to the current frame.
1397 We are not actually running, yet. We just started a reverse execution
1398 command or a record goto command.
1399 For the latter, EXECUTING is false and this has no effect.
1400 For the former, EXECUTING is true and we're in to_wait, about to
1401 move the thread. Since we need to recompute the stack, we temporarily
1402 set EXECUTING to flase. */
1403 executing = is_executing (tp->ptid);
1404 set_executing (tp->ptid, 0);
1405
1406 /* GDB stores the current frame_id when stepping in order to detects steps
1407 into subroutines.
1408 Since frames are computed differently when we're replaying, we need to
1409 recompute those stored frames and fix them up so we can still detect
1410 subroutines after we started replaying. */
1411 TRY_CATCH (except, RETURN_MASK_ALL)
1412 {
1413 struct frame_info *frame;
1414 struct frame_id frame_id;
1415 int upd_step_frame_id, upd_step_stack_frame_id;
1416
1417 /* The current frame without replaying - computed via normal unwind. */
1418 frame = get_current_frame ();
1419 frame_id = get_frame_id (frame);
1420
1421 /* Check if we need to update any stepping-related frame id's. */
1422 upd_step_frame_id = frame_id_eq (frame_id,
1423 tp->control.step_frame_id);
1424 upd_step_stack_frame_id = frame_id_eq (frame_id,
1425 tp->control.step_stack_frame_id);
1426
1427 /* We start replaying at the end of the branch trace. This corresponds
1428 to the current instruction. */
1429 replay = xmalloc (sizeof (*replay));
1430 btrace_insn_end (replay, btinfo);
1431
1432 /* We're not replaying, yet. */
1433 gdb_assert (btinfo->replay == NULL);
1434 btinfo->replay = replay;
1435
1436 /* Make sure we're not using any stale registers. */
1437 registers_changed_ptid (tp->ptid);
1438
1439 /* The current frame with replaying - computed via btrace unwind. */
1440 frame = get_current_frame ();
1441 frame_id = get_frame_id (frame);
1442
1443 /* Replace stepping related frames where necessary. */
1444 if (upd_step_frame_id)
1445 tp->control.step_frame_id = frame_id;
1446 if (upd_step_stack_frame_id)
1447 tp->control.step_stack_frame_id = frame_id;
1448 }
1449
1450 /* Restore the previous execution state. */
1451 set_executing (tp->ptid, executing);
1452
1453 if (except.reason < 0)
1454 {
1455 xfree (btinfo->replay);
1456 btinfo->replay = NULL;
1457
1458 registers_changed_ptid (tp->ptid);
1459
1460 throw_exception (except);
1461 }
1462
1463 return replay;
1464 }
1465
1466 /* Stop replaying a thread. */
1467
1468 static void
1469 record_btrace_stop_replaying (struct thread_info *tp)
1470 {
1471 struct btrace_thread_info *btinfo;
1472
1473 btinfo = &tp->btrace;
1474
1475 xfree (btinfo->replay);
1476 btinfo->replay = NULL;
1477
1478 /* Make sure we're not leaving any stale registers. */
1479 registers_changed_ptid (tp->ptid);
1480 }
1481
1482 /* The to_resume method of target record-btrace. */
1483
1484 static void
1485 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1486 enum gdb_signal signal)
1487 {
1488 struct thread_info *tp, *other;
1489 enum btrace_thread_flag flag;
1490
1491 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1492
1493 tp = record_btrace_find_resume_thread (ptid);
1494 if (tp == NULL)
1495 error (_("Cannot find thread to resume."));
1496
1497 /* Stop replaying other threads if the thread to resume is not replaying. */
1498 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1499 ALL_THREADS (other)
1500 record_btrace_stop_replaying (other);
1501
1502 /* As long as we're not replaying, just forward the request. */
1503 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1504 {
1505 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1506 if (ops->to_resume != NULL)
1507 return ops->to_resume (ops, ptid, step, signal);
1508
1509 error (_("Cannot find target for stepping."));
1510 }
1511
1512 /* Compute the btrace thread flag for the requested move. */
1513 if (step == 0)
1514 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1515 else
1516 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1517
1518 /* At the moment, we only move a single thread. We could also move
1519 all threads in parallel by single-stepping each resumed thread
1520 until the first runs into an event.
1521 When we do that, we would want to continue all other threads.
1522 For now, just resume one thread to not confuse to_wait. */
1523 record_btrace_resume_thread (tp, flag);
1524
1525 /* We just indicate the resume intent here. The actual stepping happens in
1526 record_btrace_wait below. */
1527 }
1528
1529 /* Find a thread to move. */
1530
1531 static struct thread_info *
1532 record_btrace_find_thread_to_move (ptid_t ptid)
1533 {
1534 struct thread_info *tp;
1535
1536 /* First check the parameter thread. */
1537 tp = find_thread_ptid (ptid);
1538 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1539 return tp;
1540
1541 /* Otherwise, find one other thread that has been resumed. */
1542 ALL_THREADS (tp)
1543 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1544 return tp;
1545
1546 return NULL;
1547 }
1548
1549 /* Return a target_waitstatus indicating that we ran out of history. */
1550
1551 static struct target_waitstatus
1552 btrace_step_no_history (void)
1553 {
1554 struct target_waitstatus status;
1555
1556 status.kind = TARGET_WAITKIND_NO_HISTORY;
1557
1558 return status;
1559 }
1560
1561 /* Return a target_waitstatus indicating that a step finished. */
1562
1563 static struct target_waitstatus
1564 btrace_step_stopped (void)
1565 {
1566 struct target_waitstatus status;
1567
1568 status.kind = TARGET_WAITKIND_STOPPED;
1569 status.value.sig = GDB_SIGNAL_TRAP;
1570
1571 return status;
1572 }
1573
1574 /* Clear the record histories. */
1575
1576 static void
1577 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1578 {
1579 xfree (btinfo->insn_history);
1580 xfree (btinfo->call_history);
1581
1582 btinfo->insn_history = NULL;
1583 btinfo->call_history = NULL;
1584 }
1585
1586 /* Step a single thread. */
1587
1588 static struct target_waitstatus
1589 record_btrace_step_thread (struct thread_info *tp)
1590 {
1591 struct btrace_insn_iterator *replay, end;
1592 struct btrace_thread_info *btinfo;
1593 struct address_space *aspace;
1594 struct inferior *inf;
1595 enum btrace_thread_flag flags;
1596 unsigned int steps;
1597
1598 /* We can't step without an execution history. */
1599 if (btrace_is_empty (tp))
1600 return btrace_step_no_history ();
1601
1602 btinfo = &tp->btrace;
1603 replay = btinfo->replay;
1604
1605 flags = btinfo->flags & BTHR_MOVE;
1606 btinfo->flags &= ~BTHR_MOVE;
1607
1608 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1609
1610 switch (flags)
1611 {
1612 default:
1613 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1614
1615 case BTHR_STEP:
1616 /* We're done if we're not replaying. */
1617 if (replay == NULL)
1618 return btrace_step_no_history ();
1619
1620 /* We are always able to step at least once. */
1621 steps = btrace_insn_next (replay, 1);
1622 gdb_assert (steps == 1);
1623
1624 /* Determine the end of the instruction trace. */
1625 btrace_insn_end (&end, btinfo);
1626
1627 /* We stop replaying if we reached the end of the trace. */
1628 if (btrace_insn_cmp (replay, &end) == 0)
1629 record_btrace_stop_replaying (tp);
1630
1631 return btrace_step_stopped ();
1632
1633 case BTHR_RSTEP:
1634 /* Start replaying if we're not already doing so. */
1635 if (replay == NULL)
1636 replay = record_btrace_start_replaying (tp);
1637
1638 /* If we can't step any further, we reached the end of the history. */
1639 steps = btrace_insn_prev (replay, 1);
1640 if (steps == 0)
1641 return btrace_step_no_history ();
1642
1643 return btrace_step_stopped ();
1644
1645 case BTHR_CONT:
1646 /* We're done if we're not replaying. */
1647 if (replay == NULL)
1648 return btrace_step_no_history ();
1649
1650 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1651 aspace = inf->aspace;
1652
1653 /* Determine the end of the instruction trace. */
1654 btrace_insn_end (&end, btinfo);
1655
1656 for (;;)
1657 {
1658 const struct btrace_insn *insn;
1659
1660 /* We are always able to step at least once. */
1661 steps = btrace_insn_next (replay, 1);
1662 gdb_assert (steps == 1);
1663
1664 /* We stop replaying if we reached the end of the trace. */
1665 if (btrace_insn_cmp (replay, &end) == 0)
1666 {
1667 record_btrace_stop_replaying (tp);
1668 return btrace_step_no_history ();
1669 }
1670
1671 insn = btrace_insn_get (replay);
1672 gdb_assert (insn);
1673
1674 DEBUG ("stepping %d (%s) ... %s", tp->num,
1675 target_pid_to_str (tp->ptid),
1676 core_addr_to_string_nz (insn->pc));
1677
1678 if (breakpoint_here_p (aspace, insn->pc))
1679 return btrace_step_stopped ();
1680 }
1681
1682 case BTHR_RCONT:
1683 /* Start replaying if we're not already doing so. */
1684 if (replay == NULL)
1685 replay = record_btrace_start_replaying (tp);
1686
1687 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1688 aspace = inf->aspace;
1689
1690 for (;;)
1691 {
1692 const struct btrace_insn *insn;
1693
1694 /* If we can't step any further, we're done. */
1695 steps = btrace_insn_prev (replay, 1);
1696 if (steps == 0)
1697 return btrace_step_no_history ();
1698
1699 insn = btrace_insn_get (replay);
1700 gdb_assert (insn);
1701
1702 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1703 target_pid_to_str (tp->ptid),
1704 core_addr_to_string_nz (insn->pc));
1705
1706 if (breakpoint_here_p (aspace, insn->pc))
1707 return btrace_step_stopped ();
1708 }
1709 }
1710 }
1711
1712 /* The to_wait method of target record-btrace. */
1713
1714 static ptid_t
1715 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1716 struct target_waitstatus *status, int options)
1717 {
1718 struct thread_info *tp, *other;
1719
1720 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1721
1722 /* As long as we're not replaying, just forward the request. */
1723 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1724 {
1725 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1726 if (ops->to_wait != NULL)
1727 return ops->to_wait (ops, ptid, status, options);
1728
1729 error (_("Cannot find target for waiting."));
1730 }
1731
1732 /* Let's find a thread to move. */
1733 tp = record_btrace_find_thread_to_move (ptid);
1734 if (tp == NULL)
1735 {
1736 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1737
1738 status->kind = TARGET_WAITKIND_IGNORE;
1739 return minus_one_ptid;
1740 }
1741
1742 /* We only move a single thread. We're not able to correlate threads. */
1743 *status = record_btrace_step_thread (tp);
1744
1745 /* Stop all other threads. */
1746 if (!non_stop)
1747 ALL_THREADS (other)
1748 other->btrace.flags &= ~BTHR_MOVE;
1749
1750 /* Start record histories anew from the current position. */
1751 record_btrace_clear_histories (&tp->btrace);
1752
1753 /* We moved the replay position but did not update registers. */
1754 registers_changed_ptid (tp->ptid);
1755
1756 return tp->ptid;
1757 }
1758
1759 /* The to_can_execute_reverse method of target record-btrace. */
1760
1761 static int
1762 record_btrace_can_execute_reverse (struct target_ops *self)
1763 {
1764 return 1;
1765 }
1766
1767 /* The to_decr_pc_after_break method of target record-btrace. */
1768
1769 static CORE_ADDR
1770 record_btrace_decr_pc_after_break (struct target_ops *ops,
1771 struct gdbarch *gdbarch)
1772 {
1773 /* When replaying, we do not actually execute the breakpoint instruction
1774 so there is no need to adjust the PC after hitting a breakpoint. */
1775 if (record_btrace_is_replaying (ops))
1776 return 0;
1777
1778 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1779 }
1780
1781 /* The to_find_new_threads method of target record-btrace. */
1782
1783 static void
1784 record_btrace_find_new_threads (struct target_ops *ops)
1785 {
1786 /* Don't expect new threads if we're replaying. */
1787 if (record_btrace_is_replaying (ops))
1788 return;
1789
1790 /* Forward the request. */
1791 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1792 if (ops->to_find_new_threads != NULL)
1793 {
1794 ops->to_find_new_threads (ops);
1795 break;
1796 }
1797 }
1798
1799 /* The to_thread_alive method of target record-btrace. */
1800
1801 static int
1802 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1803 {
1804 /* We don't add or remove threads during replay. */
1805 if (record_btrace_is_replaying (ops))
1806 return find_thread_ptid (ptid) != NULL;
1807
1808 /* Forward the request. */
1809 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1810 if (ops->to_thread_alive != NULL)
1811 return ops->to_thread_alive (ops, ptid);
1812
1813 return 0;
1814 }
1815
1816 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1817 is stopped. */
1818
1819 static void
1820 record_btrace_set_replay (struct thread_info *tp,
1821 const struct btrace_insn_iterator *it)
1822 {
1823 struct btrace_thread_info *btinfo;
1824
1825 btinfo = &tp->btrace;
1826
1827 if (it == NULL || it->function == NULL)
1828 record_btrace_stop_replaying (tp);
1829 else
1830 {
1831 if (btinfo->replay == NULL)
1832 record_btrace_start_replaying (tp);
1833 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1834 return;
1835
1836 *btinfo->replay = *it;
1837 registers_changed_ptid (tp->ptid);
1838 }
1839
1840 /* Start anew from the new replay position. */
1841 record_btrace_clear_histories (btinfo);
1842 }
1843
1844 /* The to_goto_record_begin method of target record-btrace. */
1845
1846 static void
1847 record_btrace_goto_begin (struct target_ops *self)
1848 {
1849 struct thread_info *tp;
1850 struct btrace_insn_iterator begin;
1851
1852 tp = require_btrace_thread ();
1853
1854 btrace_insn_begin (&begin, &tp->btrace);
1855 record_btrace_set_replay (tp, &begin);
1856
1857 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1858 }
1859
1860 /* The to_goto_record_end method of target record-btrace. */
1861
1862 static void
1863 record_btrace_goto_end (struct target_ops *ops)
1864 {
1865 struct thread_info *tp;
1866
1867 tp = require_btrace_thread ();
1868
1869 record_btrace_set_replay (tp, NULL);
1870
1871 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1872 }
1873
1874 /* The to_goto_record method of target record-btrace. */
1875
1876 static void
1877 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1878 {
1879 struct thread_info *tp;
1880 struct btrace_insn_iterator it;
1881 unsigned int number;
1882 int found;
1883
1884 number = insn;
1885
1886 /* Check for wrap-arounds. */
1887 if (number != insn)
1888 error (_("Instruction number out of range."));
1889
1890 tp = require_btrace_thread ();
1891
1892 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1893 if (found == 0)
1894 error (_("No such instruction."));
1895
1896 record_btrace_set_replay (tp, &it);
1897
1898 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1899 }
1900
1901 /* Initialize the record-btrace target ops. */
1902
1903 static void
1904 init_record_btrace_ops (void)
1905 {
1906 struct target_ops *ops;
1907
1908 ops = &record_btrace_ops;
1909 ops->to_shortname = "record-btrace";
1910 ops->to_longname = "Branch tracing target";
1911 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1912 ops->to_open = record_btrace_open;
1913 ops->to_close = record_btrace_close;
1914 ops->to_detach = record_detach;
1915 ops->to_disconnect = record_disconnect;
1916 ops->to_mourn_inferior = record_mourn_inferior;
1917 ops->to_kill = record_kill;
1918 ops->to_stop_recording = record_btrace_stop_recording;
1919 ops->to_info_record = record_btrace_info;
1920 ops->to_insn_history = record_btrace_insn_history;
1921 ops->to_insn_history_from = record_btrace_insn_history_from;
1922 ops->to_insn_history_range = record_btrace_insn_history_range;
1923 ops->to_call_history = record_btrace_call_history;
1924 ops->to_call_history_from = record_btrace_call_history_from;
1925 ops->to_call_history_range = record_btrace_call_history_range;
1926 ops->to_record_is_replaying = record_btrace_is_replaying;
1927 ops->to_xfer_partial = record_btrace_xfer_partial;
1928 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1929 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1930 ops->to_fetch_registers = record_btrace_fetch_registers;
1931 ops->to_store_registers = record_btrace_store_registers;
1932 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1933 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1934 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1935 ops->to_resume = record_btrace_resume;
1936 ops->to_wait = record_btrace_wait;
1937 ops->to_find_new_threads = record_btrace_find_new_threads;
1938 ops->to_thread_alive = record_btrace_thread_alive;
1939 ops->to_goto_record_begin = record_btrace_goto_begin;
1940 ops->to_goto_record_end = record_btrace_goto_end;
1941 ops->to_goto_record = record_btrace_goto;
1942 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1943 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1944 ops->to_stratum = record_stratum;
1945 ops->to_magic = OPS_MAGIC;
1946 }
1947
1948 /* Alias for "target record". */
1949
1950 static void
1951 cmd_record_btrace_start (char *args, int from_tty)
1952 {
1953 if (args != NULL && *args != 0)
1954 error (_("Invalid argument."));
1955
1956 execute_command ("target record-btrace", from_tty);
1957 }
1958
1959 /* The "set record btrace" command. */
1960
1961 static void
1962 cmd_set_record_btrace (char *args, int from_tty)
1963 {
1964 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
1965 }
1966
1967 /* The "show record btrace" command. */
1968
1969 static void
1970 cmd_show_record_btrace (char *args, int from_tty)
1971 {
1972 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
1973 }
1974
1975 /* The "show record btrace replay-memory-access" command. */
1976
1977 static void
1978 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
1979 struct cmd_list_element *c, const char *value)
1980 {
1981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
1982 replay_memory_access);
1983 }
1984
1985 void _initialize_record_btrace (void);
1986
1987 /* Initialize btrace commands. */
1988
1989 void
1990 _initialize_record_btrace (void)
1991 {
1992 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1993 _("Start branch trace recording."),
1994 &record_cmdlist);
1995 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1996
1997 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
1998 _("Set record options"), &set_record_btrace_cmdlist,
1999 "set record btrace ", 0, &set_record_cmdlist);
2000
2001 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2002 _("Show record options"), &show_record_btrace_cmdlist,
2003 "show record btrace ", 0, &show_record_cmdlist);
2004
2005 add_setshow_enum_cmd ("replay-memory-access", no_class,
2006 replay_memory_access_types, &replay_memory_access, _("\
2007 Set what memory accesses are allowed during replay."), _("\
2008 Show what memory accesses are allowed during replay."),
2009 _("Default is READ-ONLY.\n\n\
2010 The btrace record target does not trace data.\n\
2011 The memory therefore corresponds to the live target and not \
2012 to the current replay position.\n\n\
2013 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2014 When READ-WRITE, allow accesses to read-only and read-write memory during \
2015 replay."),
2016 NULL, cmd_show_replay_memory_access,
2017 &set_record_btrace_cmdlist,
2018 &show_record_btrace_cmdlist);
2019
2020 init_record_btrace_ops ();
2021 add_target (&record_btrace_ops);
2022
2023 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2024 xcalloc, xfree);
2025 }
This page took 0.102972 seconds and 4 git commands to generate.