Add target_ops argument to to_insn_history_from
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
41
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
44
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
47
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51 #define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61 /* Update the branch trace for the current thread and return a pointer to its
62 thread_info.
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
67 static struct thread_info *
68 require_btrace_thread (void)
69 {
70 struct thread_info *tp;
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
82
83 return tp;
84 }
85
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92 static struct btrace_thread_info *
93 require_btrace (void)
94 {
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
100 }
101
102 /* Enable branch tracing for one thread. Warn on errors. */
103
104 static void
105 record_btrace_enable_warn (struct thread_info *tp)
106 {
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114 }
115
116 /* Callback function to disable branch tracing for one thread. */
117
118 static void
119 record_btrace_disable_callback (void *arg)
120 {
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126 }
127
128 /* Enable automatic tracing of new threads. */
129
130 static void
131 record_btrace_auto_enable (void)
132 {
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137 }
138
139 /* Disable automatic tracing of new threads. */
140
141 static void
142 record_btrace_auto_disable (void)
143 {
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152 }
153
154 /* The to_open method of target record-btrace. */
155
156 static void
157 record_btrace_open (char *args, int from_tty)
158 {
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
164 record_preopen ();
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193 }
194
195 /* The to_stop_recording method of target record-btrace. */
196
197 static void
198 record_btrace_stop_recording (struct target_ops *self)
199 {
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209 }
210
211 /* The to_close method of target record-btrace. */
212
213 static void
214 record_btrace_close (struct target_ops *self)
215 {
216 struct thread_info *tp;
217
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
226 }
227
228 /* The to_info_record method of target record-btrace. */
229
230 static void
231 record_btrace_info (struct target_ops *self)
232 {
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
245 insns = 0;
246 calls = 0;
247
248 btinfo = &tp->btrace;
249
250 if (!btrace_is_empty (tp))
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
262 }
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
271 }
272
273 /* Print an unsigned int. */
274
275 static void
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277 {
278 ui_out_field_fmt (uiout, fld, "%u", val);
279 }
280
281 /* Disassemble a section of the recorded instruction trace. */
282
283 static void
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
287 {
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
290
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
293
294 gdbarch = target_gdbarch ();
295
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
297 {
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
309 }
310 }
311
312 /* The to_insn_history method of target record-btrace. */
313
314 static void
315 record_btrace_insn_history (struct target_ops *self, int size, int flags)
316 {
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
327 context = abs (size);
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
334 {
335 struct btrace_insn_iterator *replay;
336
337 DEBUG ("insn-history (0x%x): %d", flags, size);
338
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
363 }
364 else
365 {
366 begin = history->begin;
367 end = history->end;
368
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
371
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
382 }
383
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
393
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
396 }
397
398 /* The to_insn_history_range method of target record-btrace. */
399
400 static void
401 record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
402 {
403 struct btrace_thread_info *btinfo;
404 struct btrace_insn_history *history;
405 struct btrace_insn_iterator begin, end;
406 struct cleanup *uiout_cleanup;
407 struct ui_out *uiout;
408 unsigned int low, high;
409 int found;
410
411 uiout = current_uiout;
412 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
413 "insn history");
414 low = from;
415 high = to;
416
417 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
418
419 /* Check for wrap-arounds. */
420 if (low != from || high != to)
421 error (_("Bad range."));
422
423 if (high < low)
424 error (_("Bad range."));
425
426 btinfo = require_btrace ();
427
428 found = btrace_find_insn_by_number (&begin, btinfo, low);
429 if (found == 0)
430 error (_("Range out of bounds."));
431
432 found = btrace_find_insn_by_number (&end, btinfo, high);
433 if (found == 0)
434 {
435 /* Silently truncate the range. */
436 btrace_insn_end (&end, btinfo);
437 }
438 else
439 {
440 /* We want both begin and end to be inclusive. */
441 btrace_insn_next (&end, 1);
442 }
443
444 btrace_insn_history (uiout, &begin, &end, flags);
445 btrace_set_insn_history (btinfo, &begin, &end);
446
447 do_cleanups (uiout_cleanup);
448 }
449
450 /* The to_insn_history_from method of target record-btrace. */
451
452 static void
453 record_btrace_insn_history_from (struct target_ops *self,
454 ULONGEST from, int size, int flags)
455 {
456 ULONGEST begin, end, context;
457
458 context = abs (size);
459 if (context == 0)
460 error (_("Bad record instruction-history-size."));
461
462 if (size < 0)
463 {
464 end = from;
465
466 if (from < context)
467 begin = 0;
468 else
469 begin = from - context + 1;
470 }
471 else
472 {
473 begin = from;
474 end = from + context - 1;
475
476 /* Check for wrap-around. */
477 if (end < begin)
478 end = ULONGEST_MAX;
479 }
480
481 record_btrace_insn_history_range (begin, end, flags);
482 }
483
484 /* Print the instruction number range for a function call history line. */
485
486 static void
487 btrace_call_history_insn_range (struct ui_out *uiout,
488 const struct btrace_function *bfun)
489 {
490 unsigned int begin, end, size;
491
492 size = VEC_length (btrace_insn_s, bfun->insn);
493 gdb_assert (size > 0);
494
495 begin = bfun->insn_offset;
496 end = begin + size - 1;
497
498 ui_out_field_uint (uiout, "insn begin", begin);
499 ui_out_text (uiout, ",");
500 ui_out_field_uint (uiout, "insn end", end);
501 }
502
503 /* Print the source line information for a function call history line. */
504
505 static void
506 btrace_call_history_src_line (struct ui_out *uiout,
507 const struct btrace_function *bfun)
508 {
509 struct symbol *sym;
510 int begin, end;
511
512 sym = bfun->sym;
513 if (sym == NULL)
514 return;
515
516 ui_out_field_string (uiout, "file",
517 symtab_to_filename_for_display (sym->symtab));
518
519 begin = bfun->lbegin;
520 end = bfun->lend;
521
522 if (end < begin)
523 return;
524
525 ui_out_text (uiout, ":");
526 ui_out_field_int (uiout, "min line", begin);
527
528 if (end == begin)
529 return;
530
531 ui_out_text (uiout, ",");
532 ui_out_field_int (uiout, "max line", end);
533 }
534
535 /* Get the name of a branch trace function. */
536
537 static const char *
538 btrace_get_bfun_name (const struct btrace_function *bfun)
539 {
540 struct minimal_symbol *msym;
541 struct symbol *sym;
542
543 if (bfun == NULL)
544 return "??";
545
546 msym = bfun->msym;
547 sym = bfun->sym;
548
549 if (sym != NULL)
550 return SYMBOL_PRINT_NAME (sym);
551 else if (msym != NULL)
552 return SYMBOL_PRINT_NAME (msym);
553 else
554 return "??";
555 }
556
557 /* Disassemble a section of the recorded function trace. */
558
559 static void
560 btrace_call_history (struct ui_out *uiout,
561 const struct btrace_thread_info *btinfo,
562 const struct btrace_call_iterator *begin,
563 const struct btrace_call_iterator *end,
564 enum record_print_flag flags)
565 {
566 struct btrace_call_iterator it;
567
568 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
569 btrace_call_number (end));
570
571 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
572 {
573 const struct btrace_function *bfun;
574 struct minimal_symbol *msym;
575 struct symbol *sym;
576
577 bfun = btrace_call_get (&it);
578 sym = bfun->sym;
579 msym = bfun->msym;
580
581 /* Print the function index. */
582 ui_out_field_uint (uiout, "index", bfun->number);
583 ui_out_text (uiout, "\t");
584
585 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
586 {
587 int level = bfun->level + btinfo->level, i;
588
589 for (i = 0; i < level; ++i)
590 ui_out_text (uiout, " ");
591 }
592
593 if (sym != NULL)
594 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
595 else if (msym != NULL)
596 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
597 else if (!ui_out_is_mi_like_p (uiout))
598 ui_out_field_string (uiout, "function", "??");
599
600 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
601 {
602 ui_out_text (uiout, _("\tinst "));
603 btrace_call_history_insn_range (uiout, bfun);
604 }
605
606 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
607 {
608 ui_out_text (uiout, _("\tat "));
609 btrace_call_history_src_line (uiout, bfun);
610 }
611
612 ui_out_text (uiout, "\n");
613 }
614 }
615
616 /* The to_call_history method of target record-btrace. */
617
618 static void
619 record_btrace_call_history (int size, int flags)
620 {
621 struct btrace_thread_info *btinfo;
622 struct btrace_call_history *history;
623 struct btrace_call_iterator begin, end;
624 struct cleanup *uiout_cleanup;
625 struct ui_out *uiout;
626 unsigned int context, covered;
627
628 uiout = current_uiout;
629 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
630 "insn history");
631 context = abs (size);
632 if (context == 0)
633 error (_("Bad record function-call-history-size."));
634
635 btinfo = require_btrace ();
636 history = btinfo->call_history;
637 if (history == NULL)
638 {
639 struct btrace_insn_iterator *replay;
640
641 DEBUG ("call-history (0x%x): %d", flags, size);
642
643 /* If we're replaying, we start at the replay position. Otherwise, we
644 start at the tail of the trace. */
645 replay = btinfo->replay;
646 if (replay != NULL)
647 {
648 begin.function = replay->function;
649 begin.btinfo = btinfo;
650 }
651 else
652 btrace_call_end (&begin, btinfo);
653
654 /* We start from here and expand in the requested direction. Then we
655 expand in the other direction, as well, to fill up any remaining
656 context. */
657 end = begin;
658 if (size < 0)
659 {
660 /* We want the current position covered, as well. */
661 covered = btrace_call_next (&end, 1);
662 covered += btrace_call_prev (&begin, context - covered);
663 covered += btrace_call_next (&end, context - covered);
664 }
665 else
666 {
667 covered = btrace_call_next (&end, context);
668 covered += btrace_call_prev (&begin, context- covered);
669 }
670 }
671 else
672 {
673 begin = history->begin;
674 end = history->end;
675
676 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
677 btrace_call_number (&begin), btrace_call_number (&end));
678
679 if (size < 0)
680 {
681 end = begin;
682 covered = btrace_call_prev (&begin, context);
683 }
684 else
685 {
686 begin = end;
687 covered = btrace_call_next (&end, context);
688 }
689 }
690
691 if (covered > 0)
692 btrace_call_history (uiout, btinfo, &begin, &end, flags);
693 else
694 {
695 if (size < 0)
696 printf_unfiltered (_("At the start of the branch trace record.\n"));
697 else
698 printf_unfiltered (_("At the end of the branch trace record.\n"));
699 }
700
701 btrace_set_call_history (btinfo, &begin, &end);
702 do_cleanups (uiout_cleanup);
703 }
704
705 /* The to_call_history_range method of target record-btrace. */
706
707 static void
708 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
709 {
710 struct btrace_thread_info *btinfo;
711 struct btrace_call_history *history;
712 struct btrace_call_iterator begin, end;
713 struct cleanup *uiout_cleanup;
714 struct ui_out *uiout;
715 unsigned int low, high;
716 int found;
717
718 uiout = current_uiout;
719 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
720 "func history");
721 low = from;
722 high = to;
723
724 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
725
726 /* Check for wrap-arounds. */
727 if (low != from || high != to)
728 error (_("Bad range."));
729
730 if (high < low)
731 error (_("Bad range."));
732
733 btinfo = require_btrace ();
734
735 found = btrace_find_call_by_number (&begin, btinfo, low);
736 if (found == 0)
737 error (_("Range out of bounds."));
738
739 found = btrace_find_call_by_number (&end, btinfo, high);
740 if (found == 0)
741 {
742 /* Silently truncate the range. */
743 btrace_call_end (&end, btinfo);
744 }
745 else
746 {
747 /* We want both begin and end to be inclusive. */
748 btrace_call_next (&end, 1);
749 }
750
751 btrace_call_history (uiout, btinfo, &begin, &end, flags);
752 btrace_set_call_history (btinfo, &begin, &end);
753
754 do_cleanups (uiout_cleanup);
755 }
756
757 /* The to_call_history_from method of target record-btrace. */
758
759 static void
760 record_btrace_call_history_from (ULONGEST from, int size, int flags)
761 {
762 ULONGEST begin, end, context;
763
764 context = abs (size);
765 if (context == 0)
766 error (_("Bad record function-call-history-size."));
767
768 if (size < 0)
769 {
770 end = from;
771
772 if (from < context)
773 begin = 0;
774 else
775 begin = from - context + 1;
776 }
777 else
778 {
779 begin = from;
780 end = from + context - 1;
781
782 /* Check for wrap-around. */
783 if (end < begin)
784 end = ULONGEST_MAX;
785 }
786
787 record_btrace_call_history_range (begin, end, flags);
788 }
789
790 /* The to_record_is_replaying method of target record-btrace. */
791
792 static int
793 record_btrace_is_replaying (struct target_ops *self)
794 {
795 struct thread_info *tp;
796
797 ALL_THREADS (tp)
798 if (btrace_is_replaying (tp))
799 return 1;
800
801 return 0;
802 }
803
804 /* The to_xfer_partial method of target record-btrace. */
805
806 static enum target_xfer_status
807 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
808 const char *annex, gdb_byte *readbuf,
809 const gdb_byte *writebuf, ULONGEST offset,
810 ULONGEST len, ULONGEST *xfered_len)
811 {
812 struct target_ops *t;
813
814 /* Filter out requests that don't make sense during replay. */
815 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
816 {
817 switch (object)
818 {
819 case TARGET_OBJECT_MEMORY:
820 {
821 struct target_section *section;
822
823 /* We do not allow writing memory in general. */
824 if (writebuf != NULL)
825 {
826 *xfered_len = len;
827 return TARGET_XFER_E_UNAVAILABLE;
828 }
829
830 /* We allow reading readonly memory. */
831 section = target_section_by_addr (ops, offset);
832 if (section != NULL)
833 {
834 /* Check if the section we found is readonly. */
835 if ((bfd_get_section_flags (section->the_bfd_section->owner,
836 section->the_bfd_section)
837 & SEC_READONLY) != 0)
838 {
839 /* Truncate the request to fit into this section. */
840 len = min (len, section->endaddr - offset);
841 break;
842 }
843 }
844
845 *xfered_len = len;
846 return TARGET_XFER_E_UNAVAILABLE;
847 }
848 }
849 }
850
851 /* Forward the request. */
852 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
853 if (ops->to_xfer_partial != NULL)
854 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
855 offset, len, xfered_len);
856
857 *xfered_len = len;
858 return TARGET_XFER_E_UNAVAILABLE;
859 }
860
861 /* The to_insert_breakpoint method of target record-btrace. */
862
863 static int
864 record_btrace_insert_breakpoint (struct target_ops *ops,
865 struct gdbarch *gdbarch,
866 struct bp_target_info *bp_tgt)
867 {
868 volatile struct gdb_exception except;
869 int old, ret;
870
871 /* Inserting breakpoints requires accessing memory. Allow it for the
872 duration of this function. */
873 old = record_btrace_allow_memory_access;
874 record_btrace_allow_memory_access = 1;
875
876 ret = 0;
877 TRY_CATCH (except, RETURN_MASK_ALL)
878 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
879
880 record_btrace_allow_memory_access = old;
881
882 if (except.reason < 0)
883 throw_exception (except);
884
885 return ret;
886 }
887
888 /* The to_remove_breakpoint method of target record-btrace. */
889
890 static int
891 record_btrace_remove_breakpoint (struct target_ops *ops,
892 struct gdbarch *gdbarch,
893 struct bp_target_info *bp_tgt)
894 {
895 volatile struct gdb_exception except;
896 int old, ret;
897
898 /* Removing breakpoints requires accessing memory. Allow it for the
899 duration of this function. */
900 old = record_btrace_allow_memory_access;
901 record_btrace_allow_memory_access = 1;
902
903 ret = 0;
904 TRY_CATCH (except, RETURN_MASK_ALL)
905 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
906
907 record_btrace_allow_memory_access = old;
908
909 if (except.reason < 0)
910 throw_exception (except);
911
912 return ret;
913 }
914
915 /* The to_fetch_registers method of target record-btrace. */
916
917 static void
918 record_btrace_fetch_registers (struct target_ops *ops,
919 struct regcache *regcache, int regno)
920 {
921 struct btrace_insn_iterator *replay;
922 struct thread_info *tp;
923
924 tp = find_thread_ptid (inferior_ptid);
925 gdb_assert (tp != NULL);
926
927 replay = tp->btrace.replay;
928 if (replay != NULL)
929 {
930 const struct btrace_insn *insn;
931 struct gdbarch *gdbarch;
932 int pcreg;
933
934 gdbarch = get_regcache_arch (regcache);
935 pcreg = gdbarch_pc_regnum (gdbarch);
936 if (pcreg < 0)
937 return;
938
939 /* We can only provide the PC register. */
940 if (regno >= 0 && regno != pcreg)
941 return;
942
943 insn = btrace_insn_get (replay);
944 gdb_assert (insn != NULL);
945
946 regcache_raw_supply (regcache, regno, &insn->pc);
947 }
948 else
949 {
950 struct target_ops *t;
951
952 for (t = ops->beneath; t != NULL; t = t->beneath)
953 if (t->to_fetch_registers != NULL)
954 {
955 t->to_fetch_registers (t, regcache, regno);
956 break;
957 }
958 }
959 }
960
961 /* The to_store_registers method of target record-btrace. */
962
963 static void
964 record_btrace_store_registers (struct target_ops *ops,
965 struct regcache *regcache, int regno)
966 {
967 struct target_ops *t;
968
969 if (record_btrace_is_replaying (ops))
970 error (_("This record target does not allow writing registers."));
971
972 gdb_assert (may_write_registers != 0);
973
974 for (t = ops->beneath; t != NULL; t = t->beneath)
975 if (t->to_store_registers != NULL)
976 {
977 t->to_store_registers (t, regcache, regno);
978 return;
979 }
980
981 noprocess ();
982 }
983
984 /* The to_prepare_to_store method of target record-btrace. */
985
986 static void
987 record_btrace_prepare_to_store (struct target_ops *ops,
988 struct regcache *regcache)
989 {
990 struct target_ops *t;
991
992 if (record_btrace_is_replaying (ops))
993 return;
994
995 for (t = ops->beneath; t != NULL; t = t->beneath)
996 if (t->to_prepare_to_store != NULL)
997 {
998 t->to_prepare_to_store (t, regcache);
999 return;
1000 }
1001 }
1002
1003 /* The branch trace frame cache. */
1004
1005 struct btrace_frame_cache
1006 {
1007 /* The thread. */
1008 struct thread_info *tp;
1009
1010 /* The frame info. */
1011 struct frame_info *frame;
1012
1013 /* The branch trace function segment. */
1014 const struct btrace_function *bfun;
1015 };
1016
1017 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1018
1019 static htab_t bfcache;
1020
1021 /* hash_f for htab_create_alloc of bfcache. */
1022
1023 static hashval_t
1024 bfcache_hash (const void *arg)
1025 {
1026 const struct btrace_frame_cache *cache = arg;
1027
1028 return htab_hash_pointer (cache->frame);
1029 }
1030
1031 /* eq_f for htab_create_alloc of bfcache. */
1032
1033 static int
1034 bfcache_eq (const void *arg1, const void *arg2)
1035 {
1036 const struct btrace_frame_cache *cache1 = arg1;
1037 const struct btrace_frame_cache *cache2 = arg2;
1038
1039 return cache1->frame == cache2->frame;
1040 }
1041
1042 /* Create a new btrace frame cache. */
1043
1044 static struct btrace_frame_cache *
1045 bfcache_new (struct frame_info *frame)
1046 {
1047 struct btrace_frame_cache *cache;
1048 void **slot;
1049
1050 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1051 cache->frame = frame;
1052
1053 slot = htab_find_slot (bfcache, cache, INSERT);
1054 gdb_assert (*slot == NULL);
1055 *slot = cache;
1056
1057 return cache;
1058 }
1059
1060 /* Extract the branch trace function from a branch trace frame. */
1061
1062 static const struct btrace_function *
1063 btrace_get_frame_function (struct frame_info *frame)
1064 {
1065 const struct btrace_frame_cache *cache;
1066 const struct btrace_function *bfun;
1067 struct btrace_frame_cache pattern;
1068 void **slot;
1069
1070 pattern.frame = frame;
1071
1072 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1073 if (slot == NULL)
1074 return NULL;
1075
1076 cache = *slot;
1077 return cache->bfun;
1078 }
1079
1080 /* Implement stop_reason method for record_btrace_frame_unwind. */
1081
1082 static enum unwind_stop_reason
1083 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1084 void **this_cache)
1085 {
1086 const struct btrace_frame_cache *cache;
1087 const struct btrace_function *bfun;
1088
1089 cache = *this_cache;
1090 bfun = cache->bfun;
1091 gdb_assert (bfun != NULL);
1092
1093 if (bfun->up == NULL)
1094 return UNWIND_UNAVAILABLE;
1095
1096 return UNWIND_NO_REASON;
1097 }
1098
1099 /* Implement this_id method for record_btrace_frame_unwind. */
1100
1101 static void
1102 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1103 struct frame_id *this_id)
1104 {
1105 const struct btrace_frame_cache *cache;
1106 const struct btrace_function *bfun;
1107 CORE_ADDR code, special;
1108
1109 cache = *this_cache;
1110
1111 bfun = cache->bfun;
1112 gdb_assert (bfun != NULL);
1113
1114 while (bfun->segment.prev != NULL)
1115 bfun = bfun->segment.prev;
1116
1117 code = get_frame_func (this_frame);
1118 special = bfun->number;
1119
1120 *this_id = frame_id_build_unavailable_stack_special (code, special);
1121
1122 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1123 btrace_get_bfun_name (cache->bfun),
1124 core_addr_to_string_nz (this_id->code_addr),
1125 core_addr_to_string_nz (this_id->special_addr));
1126 }
1127
1128 /* Implement prev_register method for record_btrace_frame_unwind. */
1129
1130 static struct value *
1131 record_btrace_frame_prev_register (struct frame_info *this_frame,
1132 void **this_cache,
1133 int regnum)
1134 {
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun, *caller;
1137 const struct btrace_insn *insn;
1138 struct gdbarch *gdbarch;
1139 CORE_ADDR pc;
1140 int pcreg;
1141
1142 gdbarch = get_frame_arch (this_frame);
1143 pcreg = gdbarch_pc_regnum (gdbarch);
1144 if (pcreg < 0 || regnum != pcreg)
1145 throw_error (NOT_AVAILABLE_ERROR,
1146 _("Registers are not available in btrace record history"));
1147
1148 cache = *this_cache;
1149 bfun = cache->bfun;
1150 gdb_assert (bfun != NULL);
1151
1152 caller = bfun->up;
1153 if (caller == NULL)
1154 throw_error (NOT_AVAILABLE_ERROR,
1155 _("No caller in btrace record history"));
1156
1157 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1158 {
1159 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1160 pc = insn->pc;
1161 }
1162 else
1163 {
1164 insn = VEC_last (btrace_insn_s, caller->insn);
1165 pc = insn->pc;
1166
1167 pc += gdb_insn_length (gdbarch, pc);
1168 }
1169
1170 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1171 btrace_get_bfun_name (bfun), bfun->level,
1172 core_addr_to_string_nz (pc));
1173
1174 return frame_unwind_got_address (this_frame, regnum, pc);
1175 }
1176
1177 /* Implement sniffer method for record_btrace_frame_unwind. */
1178
1179 static int
1180 record_btrace_frame_sniffer (const struct frame_unwind *self,
1181 struct frame_info *this_frame,
1182 void **this_cache)
1183 {
1184 const struct btrace_function *bfun;
1185 struct btrace_frame_cache *cache;
1186 struct thread_info *tp;
1187 struct frame_info *next;
1188
1189 /* THIS_FRAME does not contain a reference to its thread. */
1190 tp = find_thread_ptid (inferior_ptid);
1191 gdb_assert (tp != NULL);
1192
1193 bfun = NULL;
1194 next = get_next_frame (this_frame);
1195 if (next == NULL)
1196 {
1197 const struct btrace_insn_iterator *replay;
1198
1199 replay = tp->btrace.replay;
1200 if (replay != NULL)
1201 bfun = replay->function;
1202 }
1203 else
1204 {
1205 const struct btrace_function *callee;
1206
1207 callee = btrace_get_frame_function (next);
1208 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1209 bfun = callee->up;
1210 }
1211
1212 if (bfun == NULL)
1213 return 0;
1214
1215 DEBUG ("[frame] sniffed frame for %s on level %d",
1216 btrace_get_bfun_name (bfun), bfun->level);
1217
1218 /* This is our frame. Initialize the frame cache. */
1219 cache = bfcache_new (this_frame);
1220 cache->tp = tp;
1221 cache->bfun = bfun;
1222
1223 *this_cache = cache;
1224 return 1;
1225 }
1226
1227 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1228
1229 static int
1230 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1231 struct frame_info *this_frame,
1232 void **this_cache)
1233 {
1234 const struct btrace_function *bfun, *callee;
1235 struct btrace_frame_cache *cache;
1236 struct frame_info *next;
1237
1238 next = get_next_frame (this_frame);
1239 if (next == NULL)
1240 return 0;
1241
1242 callee = btrace_get_frame_function (next);
1243 if (callee == NULL)
1244 return 0;
1245
1246 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1247 return 0;
1248
1249 bfun = callee->up;
1250 if (bfun == NULL)
1251 return 0;
1252
1253 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1254 btrace_get_bfun_name (bfun), bfun->level);
1255
1256 /* This is our frame. Initialize the frame cache. */
1257 cache = bfcache_new (this_frame);
1258 cache->tp = find_thread_ptid (inferior_ptid);
1259 cache->bfun = bfun;
1260
1261 *this_cache = cache;
1262 return 1;
1263 }
1264
1265 static void
1266 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1267 {
1268 struct btrace_frame_cache *cache;
1269 void **slot;
1270
1271 cache = this_cache;
1272
1273 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1274 gdb_assert (slot != NULL);
1275
1276 htab_remove_elt (bfcache, cache);
1277 }
1278
1279 /* btrace recording does not store previous memory content, neither the stack
1280 frames content. Any unwinding would return errorneous results as the stack
1281 contents no longer matches the changed PC value restored from history.
1282 Therefore this unwinder reports any possibly unwound registers as
1283 <unavailable>. */
1284
1285 const struct frame_unwind record_btrace_frame_unwind =
1286 {
1287 NORMAL_FRAME,
1288 record_btrace_frame_unwind_stop_reason,
1289 record_btrace_frame_this_id,
1290 record_btrace_frame_prev_register,
1291 NULL,
1292 record_btrace_frame_sniffer,
1293 record_btrace_frame_dealloc_cache
1294 };
1295
1296 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1297 {
1298 TAILCALL_FRAME,
1299 record_btrace_frame_unwind_stop_reason,
1300 record_btrace_frame_this_id,
1301 record_btrace_frame_prev_register,
1302 NULL,
1303 record_btrace_tailcall_frame_sniffer,
1304 record_btrace_frame_dealloc_cache
1305 };
1306
1307 /* Indicate that TP should be resumed according to FLAG. */
1308
1309 static void
1310 record_btrace_resume_thread (struct thread_info *tp,
1311 enum btrace_thread_flag flag)
1312 {
1313 struct btrace_thread_info *btinfo;
1314
1315 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1316
1317 btinfo = &tp->btrace;
1318
1319 if ((btinfo->flags & BTHR_MOVE) != 0)
1320 error (_("Thread already moving."));
1321
1322 /* Fetch the latest branch trace. */
1323 btrace_fetch (tp);
1324
1325 btinfo->flags |= flag;
1326 }
1327
1328 /* Find the thread to resume given a PTID. */
1329
1330 static struct thread_info *
1331 record_btrace_find_resume_thread (ptid_t ptid)
1332 {
1333 struct thread_info *tp;
1334
1335 /* When asked to resume everything, we pick the current thread. */
1336 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1337 ptid = inferior_ptid;
1338
1339 return find_thread_ptid (ptid);
1340 }
1341
1342 /* Start replaying a thread. */
1343
1344 static struct btrace_insn_iterator *
1345 record_btrace_start_replaying (struct thread_info *tp)
1346 {
1347 volatile struct gdb_exception except;
1348 struct btrace_insn_iterator *replay;
1349 struct btrace_thread_info *btinfo;
1350 int executing;
1351
1352 btinfo = &tp->btrace;
1353 replay = NULL;
1354
1355 /* We can't start replaying without trace. */
1356 if (btinfo->begin == NULL)
1357 return NULL;
1358
1359 /* Clear the executing flag to allow changes to the current frame.
1360 We are not actually running, yet. We just started a reverse execution
1361 command or a record goto command.
1362 For the latter, EXECUTING is false and this has no effect.
1363 For the former, EXECUTING is true and we're in to_wait, about to
1364 move the thread. Since we need to recompute the stack, we temporarily
1365 set EXECUTING to flase. */
1366 executing = is_executing (tp->ptid);
1367 set_executing (tp->ptid, 0);
1368
1369 /* GDB stores the current frame_id when stepping in order to detects steps
1370 into subroutines.
1371 Since frames are computed differently when we're replaying, we need to
1372 recompute those stored frames and fix them up so we can still detect
1373 subroutines after we started replaying. */
1374 TRY_CATCH (except, RETURN_MASK_ALL)
1375 {
1376 struct frame_info *frame;
1377 struct frame_id frame_id;
1378 int upd_step_frame_id, upd_step_stack_frame_id;
1379
1380 /* The current frame without replaying - computed via normal unwind. */
1381 frame = get_current_frame ();
1382 frame_id = get_frame_id (frame);
1383
1384 /* Check if we need to update any stepping-related frame id's. */
1385 upd_step_frame_id = frame_id_eq (frame_id,
1386 tp->control.step_frame_id);
1387 upd_step_stack_frame_id = frame_id_eq (frame_id,
1388 tp->control.step_stack_frame_id);
1389
1390 /* We start replaying at the end of the branch trace. This corresponds
1391 to the current instruction. */
1392 replay = xmalloc (sizeof (*replay));
1393 btrace_insn_end (replay, btinfo);
1394
1395 /* We're not replaying, yet. */
1396 gdb_assert (btinfo->replay == NULL);
1397 btinfo->replay = replay;
1398
1399 /* Make sure we're not using any stale registers. */
1400 registers_changed_ptid (tp->ptid);
1401
1402 /* The current frame with replaying - computed via btrace unwind. */
1403 frame = get_current_frame ();
1404 frame_id = get_frame_id (frame);
1405
1406 /* Replace stepping related frames where necessary. */
1407 if (upd_step_frame_id)
1408 tp->control.step_frame_id = frame_id;
1409 if (upd_step_stack_frame_id)
1410 tp->control.step_stack_frame_id = frame_id;
1411 }
1412
1413 /* Restore the previous execution state. */
1414 set_executing (tp->ptid, executing);
1415
1416 if (except.reason < 0)
1417 {
1418 xfree (btinfo->replay);
1419 btinfo->replay = NULL;
1420
1421 registers_changed_ptid (tp->ptid);
1422
1423 throw_exception (except);
1424 }
1425
1426 return replay;
1427 }
1428
1429 /* Stop replaying a thread. */
1430
1431 static void
1432 record_btrace_stop_replaying (struct thread_info *tp)
1433 {
1434 struct btrace_thread_info *btinfo;
1435
1436 btinfo = &tp->btrace;
1437
1438 xfree (btinfo->replay);
1439 btinfo->replay = NULL;
1440
1441 /* Make sure we're not leaving any stale registers. */
1442 registers_changed_ptid (tp->ptid);
1443 }
1444
1445 /* The to_resume method of target record-btrace. */
1446
1447 static void
1448 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1449 enum gdb_signal signal)
1450 {
1451 struct thread_info *tp, *other;
1452 enum btrace_thread_flag flag;
1453
1454 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1455
1456 tp = record_btrace_find_resume_thread (ptid);
1457 if (tp == NULL)
1458 error (_("Cannot find thread to resume."));
1459
1460 /* Stop replaying other threads if the thread to resume is not replaying. */
1461 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1462 ALL_THREADS (other)
1463 record_btrace_stop_replaying (other);
1464
1465 /* As long as we're not replaying, just forward the request. */
1466 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1467 {
1468 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1469 if (ops->to_resume != NULL)
1470 return ops->to_resume (ops, ptid, step, signal);
1471
1472 error (_("Cannot find target for stepping."));
1473 }
1474
1475 /* Compute the btrace thread flag for the requested move. */
1476 if (step == 0)
1477 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1478 else
1479 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1480
1481 /* At the moment, we only move a single thread. We could also move
1482 all threads in parallel by single-stepping each resumed thread
1483 until the first runs into an event.
1484 When we do that, we would want to continue all other threads.
1485 For now, just resume one thread to not confuse to_wait. */
1486 record_btrace_resume_thread (tp, flag);
1487
1488 /* We just indicate the resume intent here. The actual stepping happens in
1489 record_btrace_wait below. */
1490 }
1491
1492 /* Find a thread to move. */
1493
1494 static struct thread_info *
1495 record_btrace_find_thread_to_move (ptid_t ptid)
1496 {
1497 struct thread_info *tp;
1498
1499 /* First check the parameter thread. */
1500 tp = find_thread_ptid (ptid);
1501 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1502 return tp;
1503
1504 /* Otherwise, find one other thread that has been resumed. */
1505 ALL_THREADS (tp)
1506 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1507 return tp;
1508
1509 return NULL;
1510 }
1511
1512 /* Return a target_waitstatus indicating that we ran out of history. */
1513
1514 static struct target_waitstatus
1515 btrace_step_no_history (void)
1516 {
1517 struct target_waitstatus status;
1518
1519 status.kind = TARGET_WAITKIND_NO_HISTORY;
1520
1521 return status;
1522 }
1523
1524 /* Return a target_waitstatus indicating that a step finished. */
1525
1526 static struct target_waitstatus
1527 btrace_step_stopped (void)
1528 {
1529 struct target_waitstatus status;
1530
1531 status.kind = TARGET_WAITKIND_STOPPED;
1532 status.value.sig = GDB_SIGNAL_TRAP;
1533
1534 return status;
1535 }
1536
1537 /* Clear the record histories. */
1538
1539 static void
1540 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1541 {
1542 xfree (btinfo->insn_history);
1543 xfree (btinfo->call_history);
1544
1545 btinfo->insn_history = NULL;
1546 btinfo->call_history = NULL;
1547 }
1548
1549 /* Step a single thread. */
1550
1551 static struct target_waitstatus
1552 record_btrace_step_thread (struct thread_info *tp)
1553 {
1554 struct btrace_insn_iterator *replay, end;
1555 struct btrace_thread_info *btinfo;
1556 struct address_space *aspace;
1557 struct inferior *inf;
1558 enum btrace_thread_flag flags;
1559 unsigned int steps;
1560
1561 btinfo = &tp->btrace;
1562 replay = btinfo->replay;
1563
1564 flags = btinfo->flags & BTHR_MOVE;
1565 btinfo->flags &= ~BTHR_MOVE;
1566
1567 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1568
1569 switch (flags)
1570 {
1571 default:
1572 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1573
1574 case BTHR_STEP:
1575 /* We're done if we're not replaying. */
1576 if (replay == NULL)
1577 return btrace_step_no_history ();
1578
1579 /* We are always able to step at least once. */
1580 steps = btrace_insn_next (replay, 1);
1581 gdb_assert (steps == 1);
1582
1583 /* Determine the end of the instruction trace. */
1584 btrace_insn_end (&end, btinfo);
1585
1586 /* We stop replaying if we reached the end of the trace. */
1587 if (btrace_insn_cmp (replay, &end) == 0)
1588 record_btrace_stop_replaying (tp);
1589
1590 return btrace_step_stopped ();
1591
1592 case BTHR_RSTEP:
1593 /* Start replaying if we're not already doing so. */
1594 if (replay == NULL)
1595 replay = record_btrace_start_replaying (tp);
1596
1597 /* If we can't step any further, we reached the end of the history. */
1598 steps = btrace_insn_prev (replay, 1);
1599 if (steps == 0)
1600 return btrace_step_no_history ();
1601
1602 return btrace_step_stopped ();
1603
1604 case BTHR_CONT:
1605 /* We're done if we're not replaying. */
1606 if (replay == NULL)
1607 return btrace_step_no_history ();
1608
1609 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1610 aspace = inf->aspace;
1611
1612 /* Determine the end of the instruction trace. */
1613 btrace_insn_end (&end, btinfo);
1614
1615 for (;;)
1616 {
1617 const struct btrace_insn *insn;
1618
1619 /* We are always able to step at least once. */
1620 steps = btrace_insn_next (replay, 1);
1621 gdb_assert (steps == 1);
1622
1623 /* We stop replaying if we reached the end of the trace. */
1624 if (btrace_insn_cmp (replay, &end) == 0)
1625 {
1626 record_btrace_stop_replaying (tp);
1627 return btrace_step_no_history ();
1628 }
1629
1630 insn = btrace_insn_get (replay);
1631 gdb_assert (insn);
1632
1633 DEBUG ("stepping %d (%s) ... %s", tp->num,
1634 target_pid_to_str (tp->ptid),
1635 core_addr_to_string_nz (insn->pc));
1636
1637 if (breakpoint_here_p (aspace, insn->pc))
1638 return btrace_step_stopped ();
1639 }
1640
1641 case BTHR_RCONT:
1642 /* Start replaying if we're not already doing so. */
1643 if (replay == NULL)
1644 replay = record_btrace_start_replaying (tp);
1645
1646 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1647 aspace = inf->aspace;
1648
1649 for (;;)
1650 {
1651 const struct btrace_insn *insn;
1652
1653 /* If we can't step any further, we're done. */
1654 steps = btrace_insn_prev (replay, 1);
1655 if (steps == 0)
1656 return btrace_step_no_history ();
1657
1658 insn = btrace_insn_get (replay);
1659 gdb_assert (insn);
1660
1661 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1662 target_pid_to_str (tp->ptid),
1663 core_addr_to_string_nz (insn->pc));
1664
1665 if (breakpoint_here_p (aspace, insn->pc))
1666 return btrace_step_stopped ();
1667 }
1668 }
1669 }
1670
1671 /* The to_wait method of target record-btrace. */
1672
1673 static ptid_t
1674 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1675 struct target_waitstatus *status, int options)
1676 {
1677 struct thread_info *tp, *other;
1678
1679 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1680
1681 /* As long as we're not replaying, just forward the request. */
1682 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1683 {
1684 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1685 if (ops->to_wait != NULL)
1686 return ops->to_wait (ops, ptid, status, options);
1687
1688 error (_("Cannot find target for waiting."));
1689 }
1690
1691 /* Let's find a thread to move. */
1692 tp = record_btrace_find_thread_to_move (ptid);
1693 if (tp == NULL)
1694 {
1695 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1696
1697 status->kind = TARGET_WAITKIND_IGNORE;
1698 return minus_one_ptid;
1699 }
1700
1701 /* We only move a single thread. We're not able to correlate threads. */
1702 *status = record_btrace_step_thread (tp);
1703
1704 /* Stop all other threads. */
1705 if (!non_stop)
1706 ALL_THREADS (other)
1707 other->btrace.flags &= ~BTHR_MOVE;
1708
1709 /* Start record histories anew from the current position. */
1710 record_btrace_clear_histories (&tp->btrace);
1711
1712 /* We moved the replay position but did not update registers. */
1713 registers_changed_ptid (tp->ptid);
1714
1715 return tp->ptid;
1716 }
1717
1718 /* The to_can_execute_reverse method of target record-btrace. */
1719
1720 static int
1721 record_btrace_can_execute_reverse (struct target_ops *self)
1722 {
1723 return 1;
1724 }
1725
1726 /* The to_decr_pc_after_break method of target record-btrace. */
1727
1728 static CORE_ADDR
1729 record_btrace_decr_pc_after_break (struct target_ops *ops,
1730 struct gdbarch *gdbarch)
1731 {
1732 /* When replaying, we do not actually execute the breakpoint instruction
1733 so there is no need to adjust the PC after hitting a breakpoint. */
1734 if (record_btrace_is_replaying (ops))
1735 return 0;
1736
1737 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
1738 }
1739
1740 /* The to_find_new_threads method of target record-btrace. */
1741
1742 static void
1743 record_btrace_find_new_threads (struct target_ops *ops)
1744 {
1745 /* Don't expect new threads if we're replaying. */
1746 if (record_btrace_is_replaying (ops))
1747 return;
1748
1749 /* Forward the request. */
1750 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1751 if (ops->to_find_new_threads != NULL)
1752 {
1753 ops->to_find_new_threads (ops);
1754 break;
1755 }
1756 }
1757
1758 /* The to_thread_alive method of target record-btrace. */
1759
1760 static int
1761 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1762 {
1763 /* We don't add or remove threads during replay. */
1764 if (record_btrace_is_replaying (ops))
1765 return find_thread_ptid (ptid) != NULL;
1766
1767 /* Forward the request. */
1768 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1769 if (ops->to_thread_alive != NULL)
1770 return ops->to_thread_alive (ops, ptid);
1771
1772 return 0;
1773 }
1774
1775 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1776 is stopped. */
1777
1778 static void
1779 record_btrace_set_replay (struct thread_info *tp,
1780 const struct btrace_insn_iterator *it)
1781 {
1782 struct btrace_thread_info *btinfo;
1783
1784 btinfo = &tp->btrace;
1785
1786 if (it == NULL || it->function == NULL)
1787 record_btrace_stop_replaying (tp);
1788 else
1789 {
1790 if (btinfo->replay == NULL)
1791 record_btrace_start_replaying (tp);
1792 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1793 return;
1794
1795 *btinfo->replay = *it;
1796 registers_changed_ptid (tp->ptid);
1797 }
1798
1799 /* Start anew from the new replay position. */
1800 record_btrace_clear_histories (btinfo);
1801 }
1802
1803 /* The to_goto_record_begin method of target record-btrace. */
1804
1805 static void
1806 record_btrace_goto_begin (struct target_ops *self)
1807 {
1808 struct thread_info *tp;
1809 struct btrace_insn_iterator begin;
1810
1811 tp = require_btrace_thread ();
1812
1813 btrace_insn_begin (&begin, &tp->btrace);
1814 record_btrace_set_replay (tp, &begin);
1815
1816 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1817 }
1818
1819 /* The to_goto_record_end method of target record-btrace. */
1820
1821 static void
1822 record_btrace_goto_end (struct target_ops *ops)
1823 {
1824 struct thread_info *tp;
1825
1826 tp = require_btrace_thread ();
1827
1828 record_btrace_set_replay (tp, NULL);
1829
1830 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1831 }
1832
1833 /* The to_goto_record method of target record-btrace. */
1834
1835 static void
1836 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1837 {
1838 struct thread_info *tp;
1839 struct btrace_insn_iterator it;
1840 unsigned int number;
1841 int found;
1842
1843 number = insn;
1844
1845 /* Check for wrap-arounds. */
1846 if (number != insn)
1847 error (_("Instruction number out of range."));
1848
1849 tp = require_btrace_thread ();
1850
1851 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1852 if (found == 0)
1853 error (_("No such instruction."));
1854
1855 record_btrace_set_replay (tp, &it);
1856
1857 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1858 }
1859
1860 /* Initialize the record-btrace target ops. */
1861
1862 static void
1863 init_record_btrace_ops (void)
1864 {
1865 struct target_ops *ops;
1866
1867 ops = &record_btrace_ops;
1868 ops->to_shortname = "record-btrace";
1869 ops->to_longname = "Branch tracing target";
1870 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1871 ops->to_open = record_btrace_open;
1872 ops->to_close = record_btrace_close;
1873 ops->to_detach = record_detach;
1874 ops->to_disconnect = record_disconnect;
1875 ops->to_mourn_inferior = record_mourn_inferior;
1876 ops->to_kill = record_kill;
1877 ops->to_create_inferior = find_default_create_inferior;
1878 ops->to_stop_recording = record_btrace_stop_recording;
1879 ops->to_info_record = record_btrace_info;
1880 ops->to_insn_history = record_btrace_insn_history;
1881 ops->to_insn_history_from = record_btrace_insn_history_from;
1882 ops->to_insn_history_range = record_btrace_insn_history_range;
1883 ops->to_call_history = record_btrace_call_history;
1884 ops->to_call_history_from = record_btrace_call_history_from;
1885 ops->to_call_history_range = record_btrace_call_history_range;
1886 ops->to_record_is_replaying = record_btrace_is_replaying;
1887 ops->to_xfer_partial = record_btrace_xfer_partial;
1888 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1889 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1890 ops->to_fetch_registers = record_btrace_fetch_registers;
1891 ops->to_store_registers = record_btrace_store_registers;
1892 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1893 ops->to_get_unwinder = &record_btrace_frame_unwind;
1894 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
1895 ops->to_resume = record_btrace_resume;
1896 ops->to_wait = record_btrace_wait;
1897 ops->to_find_new_threads = record_btrace_find_new_threads;
1898 ops->to_thread_alive = record_btrace_thread_alive;
1899 ops->to_goto_record_begin = record_btrace_goto_begin;
1900 ops->to_goto_record_end = record_btrace_goto_end;
1901 ops->to_goto_record = record_btrace_goto;
1902 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1903 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1904 ops->to_stratum = record_stratum;
1905 ops->to_magic = OPS_MAGIC;
1906 }
1907
1908 /* Alias for "target record". */
1909
1910 static void
1911 cmd_record_btrace_start (char *args, int from_tty)
1912 {
1913 if (args != NULL && *args != 0)
1914 error (_("Invalid argument."));
1915
1916 execute_command ("target record-btrace", from_tty);
1917 }
1918
1919 void _initialize_record_btrace (void);
1920
1921 /* Initialize btrace commands. */
1922
1923 void
1924 _initialize_record_btrace (void)
1925 {
1926 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1927 _("Start branch trace recording."),
1928 &record_cmdlist);
1929 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1930
1931 init_record_btrace_ops ();
1932 add_target (&record_btrace_ops);
1933
1934 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1935 xcalloc, xfree);
1936 }
This page took 0.070484 seconds and 5 git commands to generate.