[gdbserver] Remove unused max_jump_pad_size
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168 }
169
170 /* Enable automatic tracing of new threads. */
171
172 static void
173 record_btrace_auto_enable (void)
174 {
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179 }
180
181 /* Disable automatic tracing of new threads. */
182
183 static void
184 record_btrace_auto_disable (void)
185 {
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194 }
195
196 /* The record-btrace async event handler function. */
197
198 static void
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 {
201 inferior_event_handler (INF_REG_EVENT, NULL);
202 }
203
204 /* The to_open method of target record-btrace. */
205
206 static void
207 record_btrace_open (const char *args, int from_tty)
208 {
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
214 record_preopen ();
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
219 gdb_assert (record_btrace_thread_observer == NULL);
220
221 disable_chain = make_cleanup (null_cleanup, NULL);
222 ALL_NON_EXITED_THREADS (tp)
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
224 {
225 btrace_enable (tp, &record_btrace_conf);
226
227 make_cleanup (record_btrace_disable_callback, tp);
228 }
229
230 record_btrace_auto_enable ();
231
232 push_target (&record_btrace_ops);
233
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
236 NULL);
237 record_btrace_generating_corefile = 0;
238
239 observer_notify_record_changed (current_inferior (), 1);
240
241 discard_cleanups (disable_chain);
242 }
243
244 /* The to_stop_recording method of target record-btrace. */
245
246 static void
247 record_btrace_stop_recording (struct target_ops *self)
248 {
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
255 ALL_NON_EXITED_THREADS (tp)
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258 }
259
260 /* The to_close method of target record-btrace. */
261
262 static void
263 record_btrace_close (struct target_ops *self)
264 {
265 struct thread_info *tp;
266
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp)
277 btrace_teardown (tp);
278 }
279
280 /* The to_async method of target record-btrace. */
281
282 static void
283 record_btrace_async (struct target_ops *ops, int enable)
284 {
285 if (enable)
286 mark_async_event_handler (record_btrace_async_inferior_event_handler);
287 else
288 clear_async_event_handler (record_btrace_async_inferior_event_handler);
289
290 ops->beneath->to_async (ops->beneath, enable);
291 }
292
293 /* Adjusts the size and returns a human readable size suffix. */
294
295 static const char *
296 record_btrace_adjust_size (unsigned int *size)
297 {
298 unsigned int sz;
299
300 sz = *size;
301
302 if ((sz & ((1u << 30) - 1)) == 0)
303 {
304 *size = sz >> 30;
305 return "GB";
306 }
307 else if ((sz & ((1u << 20) - 1)) == 0)
308 {
309 *size = sz >> 20;
310 return "MB";
311 }
312 else if ((sz & ((1u << 10) - 1)) == 0)
313 {
314 *size = sz >> 10;
315 return "kB";
316 }
317 else
318 return "";
319 }
320
321 /* Print a BTS configuration. */
322
323 static void
324 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
325 {
326 const char *suffix;
327 unsigned int size;
328
329 size = conf->size;
330 if (size > 0)
331 {
332 suffix = record_btrace_adjust_size (&size);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
334 }
335 }
336
337 /* Print an Intel(R) Processor Trace configuration. */
338
339 static void
340 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
341 {
342 const char *suffix;
343 unsigned int size;
344
345 size = conf->size;
346 if (size > 0)
347 {
348 suffix = record_btrace_adjust_size (&size);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
350 }
351 }
352
353 /* Print a branch tracing configuration. */
354
355 static void
356 record_btrace_print_conf (const struct btrace_config *conf)
357 {
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf->format));
360
361 switch (conf->format)
362 {
363 case BTRACE_FORMAT_NONE:
364 return;
365
366 case BTRACE_FORMAT_BTS:
367 record_btrace_print_bts_conf (&conf->bts);
368 return;
369
370 case BTRACE_FORMAT_PT:
371 record_btrace_print_pt_conf (&conf->pt);
372 return;
373 }
374
375 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376 }
377
378 /* The to_info_record method of target record-btrace. */
379
380 static void
381 record_btrace_info (struct target_ops *self)
382 {
383 struct btrace_thread_info *btinfo;
384 const struct btrace_config *conf;
385 struct thread_info *tp;
386 unsigned int insns, calls, gaps;
387
388 DEBUG ("info");
389
390 tp = find_thread_ptid (inferior_ptid);
391 if (tp == NULL)
392 error (_("No thread."));
393
394 btinfo = &tp->btrace;
395
396 conf = btrace_conf (btinfo);
397 if (conf != NULL)
398 record_btrace_print_conf (conf);
399
400 btrace_fetch (tp);
401
402 insns = 0;
403 calls = 0;
404 gaps = 0;
405
406 if (!btrace_is_empty (tp))
407 {
408 struct btrace_call_iterator call;
409 struct btrace_insn_iterator insn;
410
411 btrace_call_end (&call, btinfo);
412 btrace_call_prev (&call, 1);
413 calls = btrace_call_number (&call);
414
415 btrace_insn_end (&insn, btinfo);
416
417 insns = btrace_insn_number (&insn);
418 if (insns != 0)
419 {
420 /* The last instruction does not really belong to the trace. */
421 insns -= 1;
422 }
423 else
424 {
425 unsigned int steps;
426
427 /* Skip gaps at the end. */
428 do
429 {
430 steps = btrace_insn_prev (&insn, 1);
431 if (steps == 0)
432 break;
433
434 insns = btrace_insn_number (&insn);
435 }
436 while (insns == 0);
437 }
438
439 gaps = btinfo->ngaps;
440 }
441
442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
443 "for thread %d (%s).\n"), insns, calls, gaps,
444 tp->num, target_pid_to_str (tp->ptid));
445
446 if (btrace_is_replaying (tp))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo->replay));
449 }
450
451 /* Print a decode error. */
452
453 static void
454 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
455 enum btrace_format format)
456 {
457 const char *errstr;
458 int is_error;
459
460 errstr = _("unknown");
461 is_error = 1;
462
463 switch (format)
464 {
465 default:
466 break;
467
468 case BTRACE_FORMAT_BTS:
469 switch (errcode)
470 {
471 default:
472 break;
473
474 case BDE_BTS_OVERFLOW:
475 errstr = _("instruction overflow");
476 break;
477
478 case BDE_BTS_INSN_SIZE:
479 errstr = _("unknown instruction");
480 break;
481 }
482 break;
483
484 #if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT:
486 switch (errcode)
487 {
488 case BDE_PT_USER_QUIT:
489 is_error = 0;
490 errstr = _("trace decode cancelled");
491 break;
492
493 case BDE_PT_DISABLED:
494 is_error = 0;
495 errstr = _("disabled");
496 break;
497
498 case BDE_PT_OVERFLOW:
499 is_error = 0;
500 errstr = _("overflow");
501 break;
502
503 default:
504 if (errcode < 0)
505 errstr = pt_errstr (pt_errcode (errcode));
506 break;
507 }
508 break;
509 #endif /* defined (HAVE_LIBIPT) */
510 }
511
512 ui_out_text (uiout, _("["));
513 if (is_error)
514 {
515 ui_out_text (uiout, _("decode error ("));
516 ui_out_field_int (uiout, "errcode", errcode);
517 ui_out_text (uiout, _("): "));
518 }
519 ui_out_text (uiout, errstr);
520 ui_out_text (uiout, _("]\n"));
521 }
522
523 /* Print an unsigned int. */
524
525 static void
526 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
527 {
528 ui_out_field_fmt (uiout, fld, "%u", val);
529 }
530
531 /* Disassemble a section of the recorded instruction trace. */
532
533 static void
534 btrace_insn_history (struct ui_out *uiout,
535 const struct btrace_thread_info *btinfo,
536 const struct btrace_insn_iterator *begin,
537 const struct btrace_insn_iterator *end, int flags)
538 {
539 struct gdbarch *gdbarch;
540 struct btrace_insn_iterator it;
541
542 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
543 btrace_insn_number (end));
544
545 gdbarch = target_gdbarch ();
546
547 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
548 {
549 const struct btrace_insn *insn;
550
551 insn = btrace_insn_get (&it);
552
553 /* A NULL instruction indicates a gap in the trace. */
554 if (insn == NULL)
555 {
556 const struct btrace_config *conf;
557
558 conf = btrace_conf (btinfo);
559
560 /* We have trace so we must have a configuration. */
561 gdb_assert (conf != NULL);
562
563 btrace_ui_out_decode_error (uiout, it.function->errcode,
564 conf->format);
565 }
566 else
567 {
568 char prefix[4];
569
570 /* We may add a speculation prefix later. We use the same space
571 that is used for the pc prefix. */
572 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
573 strncpy (prefix, pc_prefix (insn->pc), 3);
574 else
575 {
576 prefix[0] = ' ';
577 prefix[1] = ' ';
578 prefix[2] = ' ';
579 }
580 prefix[3] = 0;
581
582 /* Print the instruction index. */
583 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
584 ui_out_text (uiout, "\t");
585
586 /* Indicate speculative execution by a leading '?'. */
587 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
588 prefix[0] = '?';
589
590 /* Print the prefix; we tell gdb_disassembly below to omit it. */
591 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
592
593 /* Disassembly with '/m' flag may not produce the expected result.
594 See PR gdb/11833. */
595 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
596 1, insn->pc, insn->pc + 1);
597 }
598 }
599 }
600
601 /* The to_insn_history method of target record-btrace. */
602
603 static void
604 record_btrace_insn_history (struct target_ops *self, int size, int flags)
605 {
606 struct btrace_thread_info *btinfo;
607 struct btrace_insn_history *history;
608 struct btrace_insn_iterator begin, end;
609 struct cleanup *uiout_cleanup;
610 struct ui_out *uiout;
611 unsigned int context, covered;
612
613 uiout = current_uiout;
614 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
615 "insn history");
616 context = abs (size);
617 if (context == 0)
618 error (_("Bad record instruction-history-size."));
619
620 btinfo = require_btrace ();
621 history = btinfo->insn_history;
622 if (history == NULL)
623 {
624 struct btrace_insn_iterator *replay;
625
626 DEBUG ("insn-history (0x%x): %d", flags, size);
627
628 /* If we're replaying, we start at the replay position. Otherwise, we
629 start at the tail of the trace. */
630 replay = btinfo->replay;
631 if (replay != NULL)
632 begin = *replay;
633 else
634 btrace_insn_end (&begin, btinfo);
635
636 /* We start from here and expand in the requested direction. Then we
637 expand in the other direction, as well, to fill up any remaining
638 context. */
639 end = begin;
640 if (size < 0)
641 {
642 /* We want the current position covered, as well. */
643 covered = btrace_insn_next (&end, 1);
644 covered += btrace_insn_prev (&begin, context - covered);
645 covered += btrace_insn_next (&end, context - covered);
646 }
647 else
648 {
649 covered = btrace_insn_next (&end, context);
650 covered += btrace_insn_prev (&begin, context - covered);
651 }
652 }
653 else
654 {
655 begin = history->begin;
656 end = history->end;
657
658 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
659 btrace_insn_number (&begin), btrace_insn_number (&end));
660
661 if (size < 0)
662 {
663 end = begin;
664 covered = btrace_insn_prev (&begin, context);
665 }
666 else
667 {
668 begin = end;
669 covered = btrace_insn_next (&end, context);
670 }
671 }
672
673 if (covered > 0)
674 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
675 else
676 {
677 if (size < 0)
678 printf_unfiltered (_("At the start of the branch trace record.\n"));
679 else
680 printf_unfiltered (_("At the end of the branch trace record.\n"));
681 }
682
683 btrace_set_insn_history (btinfo, &begin, &end);
684 do_cleanups (uiout_cleanup);
685 }
686
687 /* The to_insn_history_range method of target record-btrace. */
688
689 static void
690 record_btrace_insn_history_range (struct target_ops *self,
691 ULONGEST from, ULONGEST to, int flags)
692 {
693 struct btrace_thread_info *btinfo;
694 struct btrace_insn_history *history;
695 struct btrace_insn_iterator begin, end;
696 struct cleanup *uiout_cleanup;
697 struct ui_out *uiout;
698 unsigned int low, high;
699 int found;
700
701 uiout = current_uiout;
702 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
703 "insn history");
704 low = from;
705 high = to;
706
707 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
708
709 /* Check for wrap-arounds. */
710 if (low != from || high != to)
711 error (_("Bad range."));
712
713 if (high < low)
714 error (_("Bad range."));
715
716 btinfo = require_btrace ();
717
718 found = btrace_find_insn_by_number (&begin, btinfo, low);
719 if (found == 0)
720 error (_("Range out of bounds."));
721
722 found = btrace_find_insn_by_number (&end, btinfo, high);
723 if (found == 0)
724 {
725 /* Silently truncate the range. */
726 btrace_insn_end (&end, btinfo);
727 }
728 else
729 {
730 /* We want both begin and end to be inclusive. */
731 btrace_insn_next (&end, 1);
732 }
733
734 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
735 btrace_set_insn_history (btinfo, &begin, &end);
736
737 do_cleanups (uiout_cleanup);
738 }
739
740 /* The to_insn_history_from method of target record-btrace. */
741
742 static void
743 record_btrace_insn_history_from (struct target_ops *self,
744 ULONGEST from, int size, int flags)
745 {
746 ULONGEST begin, end, context;
747
748 context = abs (size);
749 if (context == 0)
750 error (_("Bad record instruction-history-size."));
751
752 if (size < 0)
753 {
754 end = from;
755
756 if (from < context)
757 begin = 0;
758 else
759 begin = from - context + 1;
760 }
761 else
762 {
763 begin = from;
764 end = from + context - 1;
765
766 /* Check for wrap-around. */
767 if (end < begin)
768 end = ULONGEST_MAX;
769 }
770
771 record_btrace_insn_history_range (self, begin, end, flags);
772 }
773
774 /* Print the instruction number range for a function call history line. */
775
776 static void
777 btrace_call_history_insn_range (struct ui_out *uiout,
778 const struct btrace_function *bfun)
779 {
780 unsigned int begin, end, size;
781
782 size = VEC_length (btrace_insn_s, bfun->insn);
783 gdb_assert (size > 0);
784
785 begin = bfun->insn_offset;
786 end = begin + size - 1;
787
788 ui_out_field_uint (uiout, "insn begin", begin);
789 ui_out_text (uiout, ",");
790 ui_out_field_uint (uiout, "insn end", end);
791 }
792
793 /* Compute the lowest and highest source line for the instructions in BFUN
794 and return them in PBEGIN and PEND.
795 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
796 result from inlining or macro expansion. */
797
798 static void
799 btrace_compute_src_line_range (const struct btrace_function *bfun,
800 int *pbegin, int *pend)
801 {
802 struct btrace_insn *insn;
803 struct symtab *symtab;
804 struct symbol *sym;
805 unsigned int idx;
806 int begin, end;
807
808 begin = INT_MAX;
809 end = INT_MIN;
810
811 sym = bfun->sym;
812 if (sym == NULL)
813 goto out;
814
815 symtab = symbol_symtab (sym);
816
817 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
818 {
819 struct symtab_and_line sal;
820
821 sal = find_pc_line (insn->pc, 0);
822 if (sal.symtab != symtab || sal.line == 0)
823 continue;
824
825 begin = min (begin, sal.line);
826 end = max (end, sal.line);
827 }
828
829 out:
830 *pbegin = begin;
831 *pend = end;
832 }
833
834 /* Print the source line information for a function call history line. */
835
836 static void
837 btrace_call_history_src_line (struct ui_out *uiout,
838 const struct btrace_function *bfun)
839 {
840 struct symbol *sym;
841 int begin, end;
842
843 sym = bfun->sym;
844 if (sym == NULL)
845 return;
846
847 ui_out_field_string (uiout, "file",
848 symtab_to_filename_for_display (symbol_symtab (sym)));
849
850 btrace_compute_src_line_range (bfun, &begin, &end);
851 if (end < begin)
852 return;
853
854 ui_out_text (uiout, ":");
855 ui_out_field_int (uiout, "min line", begin);
856
857 if (end == begin)
858 return;
859
860 ui_out_text (uiout, ",");
861 ui_out_field_int (uiout, "max line", end);
862 }
863
864 /* Get the name of a branch trace function. */
865
866 static const char *
867 btrace_get_bfun_name (const struct btrace_function *bfun)
868 {
869 struct minimal_symbol *msym;
870 struct symbol *sym;
871
872 if (bfun == NULL)
873 return "??";
874
875 msym = bfun->msym;
876 sym = bfun->sym;
877
878 if (sym != NULL)
879 return SYMBOL_PRINT_NAME (sym);
880 else if (msym != NULL)
881 return MSYMBOL_PRINT_NAME (msym);
882 else
883 return "??";
884 }
885
886 /* Disassemble a section of the recorded function trace. */
887
888 static void
889 btrace_call_history (struct ui_out *uiout,
890 const struct btrace_thread_info *btinfo,
891 const struct btrace_call_iterator *begin,
892 const struct btrace_call_iterator *end,
893 enum record_print_flag flags)
894 {
895 struct btrace_call_iterator it;
896
897 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
898 btrace_call_number (end));
899
900 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
901 {
902 const struct btrace_function *bfun;
903 struct minimal_symbol *msym;
904 struct symbol *sym;
905
906 bfun = btrace_call_get (&it);
907 sym = bfun->sym;
908 msym = bfun->msym;
909
910 /* Print the function index. */
911 ui_out_field_uint (uiout, "index", bfun->number);
912 ui_out_text (uiout, "\t");
913
914 /* Indicate gaps in the trace. */
915 if (bfun->errcode != 0)
916 {
917 const struct btrace_config *conf;
918
919 conf = btrace_conf (btinfo);
920
921 /* We have trace so we must have a configuration. */
922 gdb_assert (conf != NULL);
923
924 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
925
926 continue;
927 }
928
929 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
930 {
931 int level = bfun->level + btinfo->level, i;
932
933 for (i = 0; i < level; ++i)
934 ui_out_text (uiout, " ");
935 }
936
937 if (sym != NULL)
938 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
939 else if (msym != NULL)
940 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
941 else if (!ui_out_is_mi_like_p (uiout))
942 ui_out_field_string (uiout, "function", "??");
943
944 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
945 {
946 ui_out_text (uiout, _("\tinst "));
947 btrace_call_history_insn_range (uiout, bfun);
948 }
949
950 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
951 {
952 ui_out_text (uiout, _("\tat "));
953 btrace_call_history_src_line (uiout, bfun);
954 }
955
956 ui_out_text (uiout, "\n");
957 }
958 }
959
960 /* The to_call_history method of target record-btrace. */
961
962 static void
963 record_btrace_call_history (struct target_ops *self, int size, int flags)
964 {
965 struct btrace_thread_info *btinfo;
966 struct btrace_call_history *history;
967 struct btrace_call_iterator begin, end;
968 struct cleanup *uiout_cleanup;
969 struct ui_out *uiout;
970 unsigned int context, covered;
971
972 uiout = current_uiout;
973 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
974 "insn history");
975 context = abs (size);
976 if (context == 0)
977 error (_("Bad record function-call-history-size."));
978
979 btinfo = require_btrace ();
980 history = btinfo->call_history;
981 if (history == NULL)
982 {
983 struct btrace_insn_iterator *replay;
984
985 DEBUG ("call-history (0x%x): %d", flags, size);
986
987 /* If we're replaying, we start at the replay position. Otherwise, we
988 start at the tail of the trace. */
989 replay = btinfo->replay;
990 if (replay != NULL)
991 {
992 begin.function = replay->function;
993 begin.btinfo = btinfo;
994 }
995 else
996 btrace_call_end (&begin, btinfo);
997
998 /* We start from here and expand in the requested direction. Then we
999 expand in the other direction, as well, to fill up any remaining
1000 context. */
1001 end = begin;
1002 if (size < 0)
1003 {
1004 /* We want the current position covered, as well. */
1005 covered = btrace_call_next (&end, 1);
1006 covered += btrace_call_prev (&begin, context - covered);
1007 covered += btrace_call_next (&end, context - covered);
1008 }
1009 else
1010 {
1011 covered = btrace_call_next (&end, context);
1012 covered += btrace_call_prev (&begin, context- covered);
1013 }
1014 }
1015 else
1016 {
1017 begin = history->begin;
1018 end = history->end;
1019
1020 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1021 btrace_call_number (&begin), btrace_call_number (&end));
1022
1023 if (size < 0)
1024 {
1025 end = begin;
1026 covered = btrace_call_prev (&begin, context);
1027 }
1028 else
1029 {
1030 begin = end;
1031 covered = btrace_call_next (&end, context);
1032 }
1033 }
1034
1035 if (covered > 0)
1036 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1037 else
1038 {
1039 if (size < 0)
1040 printf_unfiltered (_("At the start of the branch trace record.\n"));
1041 else
1042 printf_unfiltered (_("At the end of the branch trace record.\n"));
1043 }
1044
1045 btrace_set_call_history (btinfo, &begin, &end);
1046 do_cleanups (uiout_cleanup);
1047 }
1048
1049 /* The to_call_history_range method of target record-btrace. */
1050
1051 static void
1052 record_btrace_call_history_range (struct target_ops *self,
1053 ULONGEST from, ULONGEST to, int flags)
1054 {
1055 struct btrace_thread_info *btinfo;
1056 struct btrace_call_history *history;
1057 struct btrace_call_iterator begin, end;
1058 struct cleanup *uiout_cleanup;
1059 struct ui_out *uiout;
1060 unsigned int low, high;
1061 int found;
1062
1063 uiout = current_uiout;
1064 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1065 "func history");
1066 low = from;
1067 high = to;
1068
1069 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1070
1071 /* Check for wrap-arounds. */
1072 if (low != from || high != to)
1073 error (_("Bad range."));
1074
1075 if (high < low)
1076 error (_("Bad range."));
1077
1078 btinfo = require_btrace ();
1079
1080 found = btrace_find_call_by_number (&begin, btinfo, low);
1081 if (found == 0)
1082 error (_("Range out of bounds."));
1083
1084 found = btrace_find_call_by_number (&end, btinfo, high);
1085 if (found == 0)
1086 {
1087 /* Silently truncate the range. */
1088 btrace_call_end (&end, btinfo);
1089 }
1090 else
1091 {
1092 /* We want both begin and end to be inclusive. */
1093 btrace_call_next (&end, 1);
1094 }
1095
1096 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1097 btrace_set_call_history (btinfo, &begin, &end);
1098
1099 do_cleanups (uiout_cleanup);
1100 }
1101
1102 /* The to_call_history_from method of target record-btrace. */
1103
1104 static void
1105 record_btrace_call_history_from (struct target_ops *self,
1106 ULONGEST from, int size, int flags)
1107 {
1108 ULONGEST begin, end, context;
1109
1110 context = abs (size);
1111 if (context == 0)
1112 error (_("Bad record function-call-history-size."));
1113
1114 if (size < 0)
1115 {
1116 end = from;
1117
1118 if (from < context)
1119 begin = 0;
1120 else
1121 begin = from - context + 1;
1122 }
1123 else
1124 {
1125 begin = from;
1126 end = from + context - 1;
1127
1128 /* Check for wrap-around. */
1129 if (end < begin)
1130 end = ULONGEST_MAX;
1131 }
1132
1133 record_btrace_call_history_range (self, begin, end, flags);
1134 }
1135
1136 /* The to_record_is_replaying method of target record-btrace. */
1137
1138 static int
1139 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1140 {
1141 struct thread_info *tp;
1142
1143 ALL_NON_EXITED_THREADS (tp)
1144 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1145 return 1;
1146
1147 return 0;
1148 }
1149
1150 /* The to_record_will_replay method of target record-btrace. */
1151
1152 static int
1153 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1154 {
1155 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1156 }
1157
1158 /* The to_xfer_partial method of target record-btrace. */
1159
1160 static enum target_xfer_status
1161 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1162 const char *annex, gdb_byte *readbuf,
1163 const gdb_byte *writebuf, ULONGEST offset,
1164 ULONGEST len, ULONGEST *xfered_len)
1165 {
1166 struct target_ops *t;
1167
1168 /* Filter out requests that don't make sense during replay. */
1169 if (replay_memory_access == replay_memory_access_read_only
1170 && !record_btrace_generating_corefile
1171 && record_btrace_is_replaying (ops, inferior_ptid))
1172 {
1173 switch (object)
1174 {
1175 case TARGET_OBJECT_MEMORY:
1176 {
1177 struct target_section *section;
1178
1179 /* We do not allow writing memory in general. */
1180 if (writebuf != NULL)
1181 {
1182 *xfered_len = len;
1183 return TARGET_XFER_UNAVAILABLE;
1184 }
1185
1186 /* We allow reading readonly memory. */
1187 section = target_section_by_addr (ops, offset);
1188 if (section != NULL)
1189 {
1190 /* Check if the section we found is readonly. */
1191 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1192 section->the_bfd_section)
1193 & SEC_READONLY) != 0)
1194 {
1195 /* Truncate the request to fit into this section. */
1196 len = min (len, section->endaddr - offset);
1197 break;
1198 }
1199 }
1200
1201 *xfered_len = len;
1202 return TARGET_XFER_UNAVAILABLE;
1203 }
1204 }
1205 }
1206
1207 /* Forward the request. */
1208 ops = ops->beneath;
1209 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1210 offset, len, xfered_len);
1211 }
1212
1213 /* The to_insert_breakpoint method of target record-btrace. */
1214
1215 static int
1216 record_btrace_insert_breakpoint (struct target_ops *ops,
1217 struct gdbarch *gdbarch,
1218 struct bp_target_info *bp_tgt)
1219 {
1220 const char *old;
1221 int ret;
1222
1223 /* Inserting breakpoints requires accessing memory. Allow it for the
1224 duration of this function. */
1225 old = replay_memory_access;
1226 replay_memory_access = replay_memory_access_read_write;
1227
1228 ret = 0;
1229 TRY
1230 {
1231 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1232 }
1233 CATCH (except, RETURN_MASK_ALL)
1234 {
1235 replay_memory_access = old;
1236 throw_exception (except);
1237 }
1238 END_CATCH
1239 replay_memory_access = old;
1240
1241 return ret;
1242 }
1243
1244 /* The to_remove_breakpoint method of target record-btrace. */
1245
1246 static int
1247 record_btrace_remove_breakpoint (struct target_ops *ops,
1248 struct gdbarch *gdbarch,
1249 struct bp_target_info *bp_tgt)
1250 {
1251 const char *old;
1252 int ret;
1253
1254 /* Removing breakpoints requires accessing memory. Allow it for the
1255 duration of this function. */
1256 old = replay_memory_access;
1257 replay_memory_access = replay_memory_access_read_write;
1258
1259 ret = 0;
1260 TRY
1261 {
1262 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1263 }
1264 CATCH (except, RETURN_MASK_ALL)
1265 {
1266 replay_memory_access = old;
1267 throw_exception (except);
1268 }
1269 END_CATCH
1270 replay_memory_access = old;
1271
1272 return ret;
1273 }
1274
1275 /* The to_fetch_registers method of target record-btrace. */
1276
1277 static void
1278 record_btrace_fetch_registers (struct target_ops *ops,
1279 struct regcache *regcache, int regno)
1280 {
1281 struct btrace_insn_iterator *replay;
1282 struct thread_info *tp;
1283
1284 tp = find_thread_ptid (inferior_ptid);
1285 gdb_assert (tp != NULL);
1286
1287 replay = tp->btrace.replay;
1288 if (replay != NULL && !record_btrace_generating_corefile)
1289 {
1290 const struct btrace_insn *insn;
1291 struct gdbarch *gdbarch;
1292 int pcreg;
1293
1294 gdbarch = get_regcache_arch (regcache);
1295 pcreg = gdbarch_pc_regnum (gdbarch);
1296 if (pcreg < 0)
1297 return;
1298
1299 /* We can only provide the PC register. */
1300 if (regno >= 0 && regno != pcreg)
1301 return;
1302
1303 insn = btrace_insn_get (replay);
1304 gdb_assert (insn != NULL);
1305
1306 regcache_raw_supply (regcache, regno, &insn->pc);
1307 }
1308 else
1309 {
1310 struct target_ops *t = ops->beneath;
1311
1312 t->to_fetch_registers (t, regcache, regno);
1313 }
1314 }
1315
1316 /* The to_store_registers method of target record-btrace. */
1317
1318 static void
1319 record_btrace_store_registers (struct target_ops *ops,
1320 struct regcache *regcache, int regno)
1321 {
1322 struct target_ops *t;
1323
1324 if (!record_btrace_generating_corefile
1325 && record_btrace_is_replaying (ops, inferior_ptid))
1326 error (_("Cannot write registers while replaying."));
1327
1328 gdb_assert (may_write_registers != 0);
1329
1330 t = ops->beneath;
1331 t->to_store_registers (t, regcache, regno);
1332 }
1333
1334 /* The to_prepare_to_store method of target record-btrace. */
1335
1336 static void
1337 record_btrace_prepare_to_store (struct target_ops *ops,
1338 struct regcache *regcache)
1339 {
1340 struct target_ops *t;
1341
1342 if (!record_btrace_generating_corefile
1343 && record_btrace_is_replaying (ops, inferior_ptid))
1344 return;
1345
1346 t = ops->beneath;
1347 t->to_prepare_to_store (t, regcache);
1348 }
1349
1350 /* The branch trace frame cache. */
1351
1352 struct btrace_frame_cache
1353 {
1354 /* The thread. */
1355 struct thread_info *tp;
1356
1357 /* The frame info. */
1358 struct frame_info *frame;
1359
1360 /* The branch trace function segment. */
1361 const struct btrace_function *bfun;
1362 };
1363
1364 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1365
1366 static htab_t bfcache;
1367
1368 /* hash_f for htab_create_alloc of bfcache. */
1369
1370 static hashval_t
1371 bfcache_hash (const void *arg)
1372 {
1373 const struct btrace_frame_cache *cache = arg;
1374
1375 return htab_hash_pointer (cache->frame);
1376 }
1377
1378 /* eq_f for htab_create_alloc of bfcache. */
1379
1380 static int
1381 bfcache_eq (const void *arg1, const void *arg2)
1382 {
1383 const struct btrace_frame_cache *cache1 = arg1;
1384 const struct btrace_frame_cache *cache2 = arg2;
1385
1386 return cache1->frame == cache2->frame;
1387 }
1388
1389 /* Create a new btrace frame cache. */
1390
1391 static struct btrace_frame_cache *
1392 bfcache_new (struct frame_info *frame)
1393 {
1394 struct btrace_frame_cache *cache;
1395 void **slot;
1396
1397 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1398 cache->frame = frame;
1399
1400 slot = htab_find_slot (bfcache, cache, INSERT);
1401 gdb_assert (*slot == NULL);
1402 *slot = cache;
1403
1404 return cache;
1405 }
1406
1407 /* Extract the branch trace function from a branch trace frame. */
1408
1409 static const struct btrace_function *
1410 btrace_get_frame_function (struct frame_info *frame)
1411 {
1412 const struct btrace_frame_cache *cache;
1413 const struct btrace_function *bfun;
1414 struct btrace_frame_cache pattern;
1415 void **slot;
1416
1417 pattern.frame = frame;
1418
1419 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1420 if (slot == NULL)
1421 return NULL;
1422
1423 cache = *slot;
1424 return cache->bfun;
1425 }
1426
1427 /* Implement stop_reason method for record_btrace_frame_unwind. */
1428
1429 static enum unwind_stop_reason
1430 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1431 void **this_cache)
1432 {
1433 const struct btrace_frame_cache *cache;
1434 const struct btrace_function *bfun;
1435
1436 cache = *this_cache;
1437 bfun = cache->bfun;
1438 gdb_assert (bfun != NULL);
1439
1440 if (bfun->up == NULL)
1441 return UNWIND_UNAVAILABLE;
1442
1443 return UNWIND_NO_REASON;
1444 }
1445
1446 /* Implement this_id method for record_btrace_frame_unwind. */
1447
1448 static void
1449 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1450 struct frame_id *this_id)
1451 {
1452 const struct btrace_frame_cache *cache;
1453 const struct btrace_function *bfun;
1454 CORE_ADDR code, special;
1455
1456 cache = *this_cache;
1457
1458 bfun = cache->bfun;
1459 gdb_assert (bfun != NULL);
1460
1461 while (bfun->segment.prev != NULL)
1462 bfun = bfun->segment.prev;
1463
1464 code = get_frame_func (this_frame);
1465 special = bfun->number;
1466
1467 *this_id = frame_id_build_unavailable_stack_special (code, special);
1468
1469 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1470 btrace_get_bfun_name (cache->bfun),
1471 core_addr_to_string_nz (this_id->code_addr),
1472 core_addr_to_string_nz (this_id->special_addr));
1473 }
1474
1475 /* Implement prev_register method for record_btrace_frame_unwind. */
1476
1477 static struct value *
1478 record_btrace_frame_prev_register (struct frame_info *this_frame,
1479 void **this_cache,
1480 int regnum)
1481 {
1482 const struct btrace_frame_cache *cache;
1483 const struct btrace_function *bfun, *caller;
1484 const struct btrace_insn *insn;
1485 struct gdbarch *gdbarch;
1486 CORE_ADDR pc;
1487 int pcreg;
1488
1489 gdbarch = get_frame_arch (this_frame);
1490 pcreg = gdbarch_pc_regnum (gdbarch);
1491 if (pcreg < 0 || regnum != pcreg)
1492 throw_error (NOT_AVAILABLE_ERROR,
1493 _("Registers are not available in btrace record history"));
1494
1495 cache = *this_cache;
1496 bfun = cache->bfun;
1497 gdb_assert (bfun != NULL);
1498
1499 caller = bfun->up;
1500 if (caller == NULL)
1501 throw_error (NOT_AVAILABLE_ERROR,
1502 _("No caller in btrace record history"));
1503
1504 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1505 {
1506 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1507 pc = insn->pc;
1508 }
1509 else
1510 {
1511 insn = VEC_last (btrace_insn_s, caller->insn);
1512 pc = insn->pc;
1513
1514 pc += gdb_insn_length (gdbarch, pc);
1515 }
1516
1517 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1518 btrace_get_bfun_name (bfun), bfun->level,
1519 core_addr_to_string_nz (pc));
1520
1521 return frame_unwind_got_address (this_frame, regnum, pc);
1522 }
1523
1524 /* Implement sniffer method for record_btrace_frame_unwind. */
1525
1526 static int
1527 record_btrace_frame_sniffer (const struct frame_unwind *self,
1528 struct frame_info *this_frame,
1529 void **this_cache)
1530 {
1531 const struct btrace_function *bfun;
1532 struct btrace_frame_cache *cache;
1533 struct thread_info *tp;
1534 struct frame_info *next;
1535
1536 /* THIS_FRAME does not contain a reference to its thread. */
1537 tp = find_thread_ptid (inferior_ptid);
1538 gdb_assert (tp != NULL);
1539
1540 bfun = NULL;
1541 next = get_next_frame (this_frame);
1542 if (next == NULL)
1543 {
1544 const struct btrace_insn_iterator *replay;
1545
1546 replay = tp->btrace.replay;
1547 if (replay != NULL)
1548 bfun = replay->function;
1549 }
1550 else
1551 {
1552 const struct btrace_function *callee;
1553
1554 callee = btrace_get_frame_function (next);
1555 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1556 bfun = callee->up;
1557 }
1558
1559 if (bfun == NULL)
1560 return 0;
1561
1562 DEBUG ("[frame] sniffed frame for %s on level %d",
1563 btrace_get_bfun_name (bfun), bfun->level);
1564
1565 /* This is our frame. Initialize the frame cache. */
1566 cache = bfcache_new (this_frame);
1567 cache->tp = tp;
1568 cache->bfun = bfun;
1569
1570 *this_cache = cache;
1571 return 1;
1572 }
1573
1574 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1575
1576 static int
1577 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1578 struct frame_info *this_frame,
1579 void **this_cache)
1580 {
1581 const struct btrace_function *bfun, *callee;
1582 struct btrace_frame_cache *cache;
1583 struct frame_info *next;
1584
1585 next = get_next_frame (this_frame);
1586 if (next == NULL)
1587 return 0;
1588
1589 callee = btrace_get_frame_function (next);
1590 if (callee == NULL)
1591 return 0;
1592
1593 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1594 return 0;
1595
1596 bfun = callee->up;
1597 if (bfun == NULL)
1598 return 0;
1599
1600 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1601 btrace_get_bfun_name (bfun), bfun->level);
1602
1603 /* This is our frame. Initialize the frame cache. */
1604 cache = bfcache_new (this_frame);
1605 cache->tp = find_thread_ptid (inferior_ptid);
1606 cache->bfun = bfun;
1607
1608 *this_cache = cache;
1609 return 1;
1610 }
1611
1612 static void
1613 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1614 {
1615 struct btrace_frame_cache *cache;
1616 void **slot;
1617
1618 cache = this_cache;
1619
1620 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1621 gdb_assert (slot != NULL);
1622
1623 htab_remove_elt (bfcache, cache);
1624 }
1625
1626 /* btrace recording does not store previous memory content, neither the stack
1627 frames content. Any unwinding would return errorneous results as the stack
1628 contents no longer matches the changed PC value restored from history.
1629 Therefore this unwinder reports any possibly unwound registers as
1630 <unavailable>. */
1631
1632 const struct frame_unwind record_btrace_frame_unwind =
1633 {
1634 NORMAL_FRAME,
1635 record_btrace_frame_unwind_stop_reason,
1636 record_btrace_frame_this_id,
1637 record_btrace_frame_prev_register,
1638 NULL,
1639 record_btrace_frame_sniffer,
1640 record_btrace_frame_dealloc_cache
1641 };
1642
1643 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1644 {
1645 TAILCALL_FRAME,
1646 record_btrace_frame_unwind_stop_reason,
1647 record_btrace_frame_this_id,
1648 record_btrace_frame_prev_register,
1649 NULL,
1650 record_btrace_tailcall_frame_sniffer,
1651 record_btrace_frame_dealloc_cache
1652 };
1653
1654 /* Implement the to_get_unwinder method. */
1655
1656 static const struct frame_unwind *
1657 record_btrace_to_get_unwinder (struct target_ops *self)
1658 {
1659 return &record_btrace_frame_unwind;
1660 }
1661
1662 /* Implement the to_get_tailcall_unwinder method. */
1663
1664 static const struct frame_unwind *
1665 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1666 {
1667 return &record_btrace_tailcall_frame_unwind;
1668 }
1669
1670 /* Return a human-readable string for FLAG. */
1671
1672 static const char *
1673 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1674 {
1675 switch (flag)
1676 {
1677 case BTHR_STEP:
1678 return "step";
1679
1680 case BTHR_RSTEP:
1681 return "reverse-step";
1682
1683 case BTHR_CONT:
1684 return "cont";
1685
1686 case BTHR_RCONT:
1687 return "reverse-cont";
1688
1689 case BTHR_STOP:
1690 return "stop";
1691 }
1692
1693 return "<invalid>";
1694 }
1695
1696 /* Indicate that TP should be resumed according to FLAG. */
1697
1698 static void
1699 record_btrace_resume_thread (struct thread_info *tp,
1700 enum btrace_thread_flag flag)
1701 {
1702 struct btrace_thread_info *btinfo;
1703
1704 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1705 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1706
1707 btinfo = &tp->btrace;
1708
1709 /* Fetch the latest branch trace. */
1710 btrace_fetch (tp);
1711
1712 /* A resume request overwrites a preceding resume or stop request. */
1713 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1714 btinfo->flags |= flag;
1715 }
1716
1717 /* Get the current frame for TP. */
1718
1719 static struct frame_info *
1720 get_thread_current_frame (struct thread_info *tp)
1721 {
1722 struct frame_info *frame;
1723 ptid_t old_inferior_ptid;
1724 int executing;
1725
1726 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1727 old_inferior_ptid = inferior_ptid;
1728 inferior_ptid = tp->ptid;
1729
1730 /* Clear the executing flag to allow changes to the current frame.
1731 We are not actually running, yet. We just started a reverse execution
1732 command or a record goto command.
1733 For the latter, EXECUTING is false and this has no effect.
1734 For the former, EXECUTING is true and we're in to_wait, about to
1735 move the thread. Since we need to recompute the stack, we temporarily
1736 set EXECUTING to flase. */
1737 executing = is_executing (inferior_ptid);
1738 set_executing (inferior_ptid, 0);
1739
1740 frame = NULL;
1741 TRY
1742 {
1743 frame = get_current_frame ();
1744 }
1745 CATCH (except, RETURN_MASK_ALL)
1746 {
1747 /* Restore the previous execution state. */
1748 set_executing (inferior_ptid, executing);
1749
1750 /* Restore the previous inferior_ptid. */
1751 inferior_ptid = old_inferior_ptid;
1752
1753 throw_exception (except);
1754 }
1755 END_CATCH
1756
1757 /* Restore the previous execution state. */
1758 set_executing (inferior_ptid, executing);
1759
1760 /* Restore the previous inferior_ptid. */
1761 inferior_ptid = old_inferior_ptid;
1762
1763 return frame;
1764 }
1765
1766 /* Start replaying a thread. */
1767
1768 static struct btrace_insn_iterator *
1769 record_btrace_start_replaying (struct thread_info *tp)
1770 {
1771 struct btrace_insn_iterator *replay;
1772 struct btrace_thread_info *btinfo;
1773
1774 btinfo = &tp->btrace;
1775 replay = NULL;
1776
1777 /* We can't start replaying without trace. */
1778 if (btinfo->begin == NULL)
1779 return NULL;
1780
1781 /* GDB stores the current frame_id when stepping in order to detects steps
1782 into subroutines.
1783 Since frames are computed differently when we're replaying, we need to
1784 recompute those stored frames and fix them up so we can still detect
1785 subroutines after we started replaying. */
1786 TRY
1787 {
1788 struct frame_info *frame;
1789 struct frame_id frame_id;
1790 int upd_step_frame_id, upd_step_stack_frame_id;
1791
1792 /* The current frame without replaying - computed via normal unwind. */
1793 frame = get_thread_current_frame (tp);
1794 frame_id = get_frame_id (frame);
1795
1796 /* Check if we need to update any stepping-related frame id's. */
1797 upd_step_frame_id = frame_id_eq (frame_id,
1798 tp->control.step_frame_id);
1799 upd_step_stack_frame_id = frame_id_eq (frame_id,
1800 tp->control.step_stack_frame_id);
1801
1802 /* We start replaying at the end of the branch trace. This corresponds
1803 to the current instruction. */
1804 replay = XNEW (struct btrace_insn_iterator);
1805 btrace_insn_end (replay, btinfo);
1806
1807 /* Skip gaps at the end of the trace. */
1808 while (btrace_insn_get (replay) == NULL)
1809 {
1810 unsigned int steps;
1811
1812 steps = btrace_insn_prev (replay, 1);
1813 if (steps == 0)
1814 error (_("No trace."));
1815 }
1816
1817 /* We're not replaying, yet. */
1818 gdb_assert (btinfo->replay == NULL);
1819 btinfo->replay = replay;
1820
1821 /* Make sure we're not using any stale registers. */
1822 registers_changed_ptid (tp->ptid);
1823
1824 /* The current frame with replaying - computed via btrace unwind. */
1825 frame = get_thread_current_frame (tp);
1826 frame_id = get_frame_id (frame);
1827
1828 /* Replace stepping related frames where necessary. */
1829 if (upd_step_frame_id)
1830 tp->control.step_frame_id = frame_id;
1831 if (upd_step_stack_frame_id)
1832 tp->control.step_stack_frame_id = frame_id;
1833 }
1834 CATCH (except, RETURN_MASK_ALL)
1835 {
1836 xfree (btinfo->replay);
1837 btinfo->replay = NULL;
1838
1839 registers_changed_ptid (tp->ptid);
1840
1841 throw_exception (except);
1842 }
1843 END_CATCH
1844
1845 return replay;
1846 }
1847
1848 /* Stop replaying a thread. */
1849
1850 static void
1851 record_btrace_stop_replaying (struct thread_info *tp)
1852 {
1853 struct btrace_thread_info *btinfo;
1854
1855 btinfo = &tp->btrace;
1856
1857 xfree (btinfo->replay);
1858 btinfo->replay = NULL;
1859
1860 /* Make sure we're not leaving any stale registers. */
1861 registers_changed_ptid (tp->ptid);
1862 }
1863
1864 /* Stop replaying TP if it is at the end of its execution history. */
1865
1866 static void
1867 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1868 {
1869 struct btrace_insn_iterator *replay, end;
1870 struct btrace_thread_info *btinfo;
1871
1872 btinfo = &tp->btrace;
1873 replay = btinfo->replay;
1874
1875 if (replay == NULL)
1876 return;
1877
1878 btrace_insn_end (&end, btinfo);
1879
1880 if (btrace_insn_cmp (replay, &end) == 0)
1881 record_btrace_stop_replaying (tp);
1882 }
1883
1884 /* The to_resume method of target record-btrace. */
1885
1886 static void
1887 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1888 enum gdb_signal signal)
1889 {
1890 struct thread_info *tp;
1891 enum btrace_thread_flag flag, cflag;
1892
1893 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1894 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1895 step ? "step" : "cont");
1896
1897 /* Store the execution direction of the last resume.
1898
1899 If there is more than one to_resume call, we have to rely on infrun
1900 to not change the execution direction in-between. */
1901 record_btrace_resume_exec_dir = execution_direction;
1902
1903 /* As long as we're not replaying, just forward the request.
1904
1905 For non-stop targets this means that no thread is replaying. In order to
1906 make progress, we may need to explicitly move replaying threads to the end
1907 of their execution history. */
1908 if ((execution_direction != EXEC_REVERSE)
1909 && !record_btrace_is_replaying (ops, minus_one_ptid))
1910 {
1911 ops = ops->beneath;
1912 return ops->to_resume (ops, ptid, step, signal);
1913 }
1914
1915 /* Compute the btrace thread flag for the requested move. */
1916 if (execution_direction == EXEC_REVERSE)
1917 {
1918 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
1919 cflag = BTHR_RCONT;
1920 }
1921 else
1922 {
1923 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
1924 cflag = BTHR_CONT;
1925 }
1926
1927 /* We just indicate the resume intent here. The actual stepping happens in
1928 record_btrace_wait below.
1929
1930 For all-stop targets, we only step INFERIOR_PTID and continue others. */
1931 if (!target_is_non_stop_p ())
1932 {
1933 gdb_assert (ptid_match (inferior_ptid, ptid));
1934
1935 ALL_NON_EXITED_THREADS (tp)
1936 if (ptid_match (tp->ptid, ptid))
1937 {
1938 if (ptid_match (tp->ptid, inferior_ptid))
1939 record_btrace_resume_thread (tp, flag);
1940 else
1941 record_btrace_resume_thread (tp, cflag);
1942 }
1943 }
1944 else
1945 {
1946 ALL_NON_EXITED_THREADS (tp)
1947 if (ptid_match (tp->ptid, ptid))
1948 record_btrace_resume_thread (tp, flag);
1949 }
1950
1951 /* Async support. */
1952 if (target_can_async_p ())
1953 {
1954 target_async (1);
1955 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1956 }
1957 }
1958
1959 /* Cancel resuming TP. */
1960
1961 static void
1962 record_btrace_cancel_resume (struct thread_info *tp)
1963 {
1964 enum btrace_thread_flag flags;
1965
1966 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1967 if (flags == 0)
1968 return;
1969
1970 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1971 target_pid_to_str (tp->ptid), flags,
1972 btrace_thread_flag_to_str (flags));
1973
1974 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1975 record_btrace_stop_replaying_at_end (tp);
1976 }
1977
1978 /* Return a target_waitstatus indicating that we ran out of history. */
1979
1980 static struct target_waitstatus
1981 btrace_step_no_history (void)
1982 {
1983 struct target_waitstatus status;
1984
1985 status.kind = TARGET_WAITKIND_NO_HISTORY;
1986
1987 return status;
1988 }
1989
1990 /* Return a target_waitstatus indicating that a step finished. */
1991
1992 static struct target_waitstatus
1993 btrace_step_stopped (void)
1994 {
1995 struct target_waitstatus status;
1996
1997 status.kind = TARGET_WAITKIND_STOPPED;
1998 status.value.sig = GDB_SIGNAL_TRAP;
1999
2000 return status;
2001 }
2002
2003 /* Return a target_waitstatus indicating that a thread was stopped as
2004 requested. */
2005
2006 static struct target_waitstatus
2007 btrace_step_stopped_on_request (void)
2008 {
2009 struct target_waitstatus status;
2010
2011 status.kind = TARGET_WAITKIND_STOPPED;
2012 status.value.sig = GDB_SIGNAL_0;
2013
2014 return status;
2015 }
2016
2017 /* Return a target_waitstatus indicating a spurious stop. */
2018
2019 static struct target_waitstatus
2020 btrace_step_spurious (void)
2021 {
2022 struct target_waitstatus status;
2023
2024 status.kind = TARGET_WAITKIND_SPURIOUS;
2025
2026 return status;
2027 }
2028
2029 /* Return a target_waitstatus indicating that the thread was not resumed. */
2030
2031 static struct target_waitstatus
2032 btrace_step_no_resumed (void)
2033 {
2034 struct target_waitstatus status;
2035
2036 status.kind = TARGET_WAITKIND_NO_RESUMED;
2037
2038 return status;
2039 }
2040
2041 /* Return a target_waitstatus indicating that we should wait again. */
2042
2043 static struct target_waitstatus
2044 btrace_step_again (void)
2045 {
2046 struct target_waitstatus status;
2047
2048 status.kind = TARGET_WAITKIND_IGNORE;
2049
2050 return status;
2051 }
2052
2053 /* Clear the record histories. */
2054
2055 static void
2056 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2057 {
2058 xfree (btinfo->insn_history);
2059 xfree (btinfo->call_history);
2060
2061 btinfo->insn_history = NULL;
2062 btinfo->call_history = NULL;
2063 }
2064
2065 /* Check whether TP's current replay position is at a breakpoint. */
2066
2067 static int
2068 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2069 {
2070 struct btrace_insn_iterator *replay;
2071 struct btrace_thread_info *btinfo;
2072 const struct btrace_insn *insn;
2073 struct inferior *inf;
2074
2075 btinfo = &tp->btrace;
2076 replay = btinfo->replay;
2077
2078 if (replay == NULL)
2079 return 0;
2080
2081 insn = btrace_insn_get (replay);
2082 if (insn == NULL)
2083 return 0;
2084
2085 inf = find_inferior_ptid (tp->ptid);
2086 if (inf == NULL)
2087 return 0;
2088
2089 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2090 &btinfo->stop_reason);
2091 }
2092
2093 /* Step one instruction in forward direction. */
2094
2095 static struct target_waitstatus
2096 record_btrace_single_step_forward (struct thread_info *tp)
2097 {
2098 struct btrace_insn_iterator *replay, end;
2099 struct btrace_thread_info *btinfo;
2100
2101 btinfo = &tp->btrace;
2102 replay = btinfo->replay;
2103
2104 /* We're done if we're not replaying. */
2105 if (replay == NULL)
2106 return btrace_step_no_history ();
2107
2108 /* Check if we're stepping a breakpoint. */
2109 if (record_btrace_replay_at_breakpoint (tp))
2110 return btrace_step_stopped ();
2111
2112 /* Skip gaps during replay. */
2113 do
2114 {
2115 unsigned int steps;
2116
2117 /* We will bail out here if we continue stepping after reaching the end
2118 of the execution history. */
2119 steps = btrace_insn_next (replay, 1);
2120 if (steps == 0)
2121 return btrace_step_no_history ();
2122 }
2123 while (btrace_insn_get (replay) == NULL);
2124
2125 /* Determine the end of the instruction trace. */
2126 btrace_insn_end (&end, btinfo);
2127
2128 /* The execution trace contains (and ends with) the current instruction.
2129 This instruction has not been executed, yet, so the trace really ends
2130 one instruction earlier. */
2131 if (btrace_insn_cmp (replay, &end) == 0)
2132 return btrace_step_no_history ();
2133
2134 return btrace_step_spurious ();
2135 }
2136
2137 /* Step one instruction in backward direction. */
2138
2139 static struct target_waitstatus
2140 record_btrace_single_step_backward (struct thread_info *tp)
2141 {
2142 struct btrace_insn_iterator *replay;
2143 struct btrace_thread_info *btinfo;
2144
2145 btinfo = &tp->btrace;
2146 replay = btinfo->replay;
2147
2148 /* Start replaying if we're not already doing so. */
2149 if (replay == NULL)
2150 replay = record_btrace_start_replaying (tp);
2151
2152 /* If we can't step any further, we reached the end of the history.
2153 Skip gaps during replay. */
2154 do
2155 {
2156 unsigned int steps;
2157
2158 steps = btrace_insn_prev (replay, 1);
2159 if (steps == 0)
2160 return btrace_step_no_history ();
2161 }
2162 while (btrace_insn_get (replay) == NULL);
2163
2164 /* Check if we're stepping a breakpoint.
2165
2166 For reverse-stepping, this check is after the step. There is logic in
2167 infrun.c that handles reverse-stepping separately. See, for example,
2168 proceed and adjust_pc_after_break.
2169
2170 This code assumes that for reverse-stepping, PC points to the last
2171 de-executed instruction, whereas for forward-stepping PC points to the
2172 next to-be-executed instruction. */
2173 if (record_btrace_replay_at_breakpoint (tp))
2174 return btrace_step_stopped ();
2175
2176 return btrace_step_spurious ();
2177 }
2178
2179 /* Step a single thread. */
2180
2181 static struct target_waitstatus
2182 record_btrace_step_thread (struct thread_info *tp)
2183 {
2184 struct btrace_thread_info *btinfo;
2185 struct target_waitstatus status;
2186 enum btrace_thread_flag flags;
2187
2188 btinfo = &tp->btrace;
2189
2190 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2191 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2192
2193 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2194 target_pid_to_str (tp->ptid), flags,
2195 btrace_thread_flag_to_str (flags));
2196
2197 /* We can't step without an execution history. */
2198 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2199 return btrace_step_no_history ();
2200
2201 switch (flags)
2202 {
2203 default:
2204 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2205
2206 case BTHR_STOP:
2207 return btrace_step_stopped_on_request ();
2208
2209 case BTHR_STEP:
2210 status = record_btrace_single_step_forward (tp);
2211 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2212 break;
2213
2214 return btrace_step_stopped ();
2215
2216 case BTHR_RSTEP:
2217 status = record_btrace_single_step_backward (tp);
2218 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2219 break;
2220
2221 return btrace_step_stopped ();
2222
2223 case BTHR_CONT:
2224 status = record_btrace_single_step_forward (tp);
2225 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2226 break;
2227
2228 btinfo->flags |= flags;
2229 return btrace_step_again ();
2230
2231 case BTHR_RCONT:
2232 status = record_btrace_single_step_backward (tp);
2233 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2234 break;
2235
2236 btinfo->flags |= flags;
2237 return btrace_step_again ();
2238 }
2239
2240 /* We keep threads moving at the end of their execution history. The to_wait
2241 method will stop the thread for whom the event is reported. */
2242 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2243 btinfo->flags |= flags;
2244
2245 return status;
2246 }
2247
2248 /* A vector of threads. */
2249
2250 typedef struct thread_info * tp_t;
2251 DEF_VEC_P (tp_t);
2252
2253 /* Announce further events if necessary. */
2254
2255 static void
2256 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2257 const VEC (tp_t) *no_history)
2258 {
2259 int more_moving, more_no_history;
2260
2261 more_moving = !VEC_empty (tp_t, moving);
2262 more_no_history = !VEC_empty (tp_t, no_history);
2263
2264 if (!more_moving && !more_no_history)
2265 return;
2266
2267 if (more_moving)
2268 DEBUG ("movers pending");
2269
2270 if (more_no_history)
2271 DEBUG ("no-history pending");
2272
2273 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2274 }
2275
2276 /* The to_wait method of target record-btrace. */
2277
2278 static ptid_t
2279 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2280 struct target_waitstatus *status, int options)
2281 {
2282 VEC (tp_t) *moving, *no_history;
2283 struct thread_info *tp, *eventing;
2284 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2285
2286 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2287
2288 /* As long as we're not replaying, just forward the request. */
2289 if ((execution_direction != EXEC_REVERSE)
2290 && !record_btrace_is_replaying (ops, minus_one_ptid))
2291 {
2292 ops = ops->beneath;
2293 return ops->to_wait (ops, ptid, status, options);
2294 }
2295
2296 moving = NULL;
2297 no_history = NULL;
2298
2299 make_cleanup (VEC_cleanup (tp_t), &moving);
2300 make_cleanup (VEC_cleanup (tp_t), &no_history);
2301
2302 /* Keep a work list of moving threads. */
2303 ALL_NON_EXITED_THREADS (tp)
2304 if (ptid_match (tp->ptid, ptid)
2305 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2306 VEC_safe_push (tp_t, moving, tp);
2307
2308 if (VEC_empty (tp_t, moving))
2309 {
2310 *status = btrace_step_no_resumed ();
2311
2312 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2313 target_waitstatus_to_string (status));
2314
2315 do_cleanups (cleanups);
2316 return null_ptid;
2317 }
2318
2319 /* Step moving threads one by one, one step each, until either one thread
2320 reports an event or we run out of threads to step.
2321
2322 When stepping more than one thread, chances are that some threads reach
2323 the end of their execution history earlier than others. If we reported
2324 this immediately, all-stop on top of non-stop would stop all threads and
2325 resume the same threads next time. And we would report the same thread
2326 having reached the end of its execution history again.
2327
2328 In the worst case, this would starve the other threads. But even if other
2329 threads would be allowed to make progress, this would result in far too
2330 many intermediate stops.
2331
2332 We therefore delay the reporting of "no execution history" until we have
2333 nothing else to report. By this time, all threads should have moved to
2334 either the beginning or the end of their execution history. There will
2335 be a single user-visible stop. */
2336 eventing = NULL;
2337 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2338 {
2339 unsigned int ix;
2340
2341 ix = 0;
2342 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2343 {
2344 *status = record_btrace_step_thread (tp);
2345
2346 switch (status->kind)
2347 {
2348 case TARGET_WAITKIND_IGNORE:
2349 ix++;
2350 break;
2351
2352 case TARGET_WAITKIND_NO_HISTORY:
2353 VEC_safe_push (tp_t, no_history,
2354 VEC_ordered_remove (tp_t, moving, ix));
2355 break;
2356
2357 default:
2358 eventing = VEC_unordered_remove (tp_t, moving, ix);
2359 break;
2360 }
2361 }
2362 }
2363
2364 if (eventing == NULL)
2365 {
2366 /* We started with at least one moving thread. This thread must have
2367 either stopped or reached the end of its execution history.
2368
2369 In the former case, EVENTING must not be NULL.
2370 In the latter case, NO_HISTORY must not be empty. */
2371 gdb_assert (!VEC_empty (tp_t, no_history));
2372
2373 /* We kept threads moving at the end of their execution history. Stop
2374 EVENTING now that we are going to report its stop. */
2375 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2376 eventing->btrace.flags &= ~BTHR_MOVE;
2377
2378 *status = btrace_step_no_history ();
2379 }
2380
2381 gdb_assert (eventing != NULL);
2382
2383 /* We kept threads replaying at the end of their execution history. Stop
2384 replaying EVENTING now that we are going to report its stop. */
2385 record_btrace_stop_replaying_at_end (eventing);
2386
2387 /* Stop all other threads. */
2388 if (!target_is_non_stop_p ())
2389 ALL_NON_EXITED_THREADS (tp)
2390 record_btrace_cancel_resume (tp);
2391
2392 /* In async mode, we need to announce further events. */
2393 if (target_is_async_p ())
2394 record_btrace_maybe_mark_async_event (moving, no_history);
2395
2396 /* Start record histories anew from the current position. */
2397 record_btrace_clear_histories (&eventing->btrace);
2398
2399 /* We moved the replay position but did not update registers. */
2400 registers_changed_ptid (eventing->ptid);
2401
2402 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2403 target_pid_to_str (eventing->ptid),
2404 target_waitstatus_to_string (status));
2405
2406 do_cleanups (cleanups);
2407 return eventing->ptid;
2408 }
2409
2410 /* The to_stop method of target record-btrace. */
2411
2412 static void
2413 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2414 {
2415 DEBUG ("stop %s", target_pid_to_str (ptid));
2416
2417 /* As long as we're not replaying, just forward the request. */
2418 if ((execution_direction != EXEC_REVERSE)
2419 && !record_btrace_is_replaying (ops, minus_one_ptid))
2420 {
2421 ops = ops->beneath;
2422 ops->to_stop (ops, ptid);
2423 }
2424 else
2425 {
2426 struct thread_info *tp;
2427
2428 ALL_NON_EXITED_THREADS (tp)
2429 if (ptid_match (tp->ptid, ptid))
2430 {
2431 tp->btrace.flags &= ~BTHR_MOVE;
2432 tp->btrace.flags |= BTHR_STOP;
2433 }
2434 }
2435 }
2436
2437 /* The to_can_execute_reverse method of target record-btrace. */
2438
2439 static int
2440 record_btrace_can_execute_reverse (struct target_ops *self)
2441 {
2442 return 1;
2443 }
2444
2445 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2446
2447 static int
2448 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2449 {
2450 if (record_btrace_is_replaying (ops, minus_one_ptid))
2451 {
2452 struct thread_info *tp = inferior_thread ();
2453
2454 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2455 }
2456
2457 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2458 }
2459
2460 /* The to_supports_stopped_by_sw_breakpoint method of target
2461 record-btrace. */
2462
2463 static int
2464 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2465 {
2466 if (record_btrace_is_replaying (ops, minus_one_ptid))
2467 return 1;
2468
2469 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2470 }
2471
2472 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2473
2474 static int
2475 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2476 {
2477 if (record_btrace_is_replaying (ops, minus_one_ptid))
2478 {
2479 struct thread_info *tp = inferior_thread ();
2480
2481 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2482 }
2483
2484 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2485 }
2486
2487 /* The to_supports_stopped_by_hw_breakpoint method of target
2488 record-btrace. */
2489
2490 static int
2491 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2492 {
2493 if (record_btrace_is_replaying (ops, minus_one_ptid))
2494 return 1;
2495
2496 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2497 }
2498
2499 /* The to_update_thread_list method of target record-btrace. */
2500
2501 static void
2502 record_btrace_update_thread_list (struct target_ops *ops)
2503 {
2504 /* We don't add or remove threads during replay. */
2505 if (record_btrace_is_replaying (ops, minus_one_ptid))
2506 return;
2507
2508 /* Forward the request. */
2509 ops = ops->beneath;
2510 ops->to_update_thread_list (ops);
2511 }
2512
2513 /* The to_thread_alive method of target record-btrace. */
2514
2515 static int
2516 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2517 {
2518 /* We don't add or remove threads during replay. */
2519 if (record_btrace_is_replaying (ops, minus_one_ptid))
2520 return find_thread_ptid (ptid) != NULL;
2521
2522 /* Forward the request. */
2523 ops = ops->beneath;
2524 return ops->to_thread_alive (ops, ptid);
2525 }
2526
2527 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2528 is stopped. */
2529
2530 static void
2531 record_btrace_set_replay (struct thread_info *tp,
2532 const struct btrace_insn_iterator *it)
2533 {
2534 struct btrace_thread_info *btinfo;
2535
2536 btinfo = &tp->btrace;
2537
2538 if (it == NULL || it->function == NULL)
2539 record_btrace_stop_replaying (tp);
2540 else
2541 {
2542 if (btinfo->replay == NULL)
2543 record_btrace_start_replaying (tp);
2544 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2545 return;
2546
2547 *btinfo->replay = *it;
2548 registers_changed_ptid (tp->ptid);
2549 }
2550
2551 /* Start anew from the new replay position. */
2552 record_btrace_clear_histories (btinfo);
2553
2554 stop_pc = regcache_read_pc (get_current_regcache ());
2555 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2556 }
2557
2558 /* The to_goto_record_begin method of target record-btrace. */
2559
2560 static void
2561 record_btrace_goto_begin (struct target_ops *self)
2562 {
2563 struct thread_info *tp;
2564 struct btrace_insn_iterator begin;
2565
2566 tp = require_btrace_thread ();
2567
2568 btrace_insn_begin (&begin, &tp->btrace);
2569 record_btrace_set_replay (tp, &begin);
2570 }
2571
2572 /* The to_goto_record_end method of target record-btrace. */
2573
2574 static void
2575 record_btrace_goto_end (struct target_ops *ops)
2576 {
2577 struct thread_info *tp;
2578
2579 tp = require_btrace_thread ();
2580
2581 record_btrace_set_replay (tp, NULL);
2582 }
2583
2584 /* The to_goto_record method of target record-btrace. */
2585
2586 static void
2587 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2588 {
2589 struct thread_info *tp;
2590 struct btrace_insn_iterator it;
2591 unsigned int number;
2592 int found;
2593
2594 number = insn;
2595
2596 /* Check for wrap-arounds. */
2597 if (number != insn)
2598 error (_("Instruction number out of range."));
2599
2600 tp = require_btrace_thread ();
2601
2602 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2603 if (found == 0)
2604 error (_("No such instruction."));
2605
2606 record_btrace_set_replay (tp, &it);
2607 }
2608
2609 /* The to_record_stop_replaying method of target record-btrace. */
2610
2611 static void
2612 record_btrace_stop_replaying_all (struct target_ops *self)
2613 {
2614 struct thread_info *tp;
2615
2616 ALL_NON_EXITED_THREADS (tp)
2617 record_btrace_stop_replaying (tp);
2618 }
2619
2620 /* The to_execution_direction target method. */
2621
2622 static enum exec_direction_kind
2623 record_btrace_execution_direction (struct target_ops *self)
2624 {
2625 return record_btrace_resume_exec_dir;
2626 }
2627
2628 /* The to_prepare_to_generate_core target method. */
2629
2630 static void
2631 record_btrace_prepare_to_generate_core (struct target_ops *self)
2632 {
2633 record_btrace_generating_corefile = 1;
2634 }
2635
2636 /* The to_done_generating_core target method. */
2637
2638 static void
2639 record_btrace_done_generating_core (struct target_ops *self)
2640 {
2641 record_btrace_generating_corefile = 0;
2642 }
2643
2644 /* Initialize the record-btrace target ops. */
2645
2646 static void
2647 init_record_btrace_ops (void)
2648 {
2649 struct target_ops *ops;
2650
2651 ops = &record_btrace_ops;
2652 ops->to_shortname = "record-btrace";
2653 ops->to_longname = "Branch tracing target";
2654 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2655 ops->to_open = record_btrace_open;
2656 ops->to_close = record_btrace_close;
2657 ops->to_async = record_btrace_async;
2658 ops->to_detach = record_detach;
2659 ops->to_disconnect = record_disconnect;
2660 ops->to_mourn_inferior = record_mourn_inferior;
2661 ops->to_kill = record_kill;
2662 ops->to_stop_recording = record_btrace_stop_recording;
2663 ops->to_info_record = record_btrace_info;
2664 ops->to_insn_history = record_btrace_insn_history;
2665 ops->to_insn_history_from = record_btrace_insn_history_from;
2666 ops->to_insn_history_range = record_btrace_insn_history_range;
2667 ops->to_call_history = record_btrace_call_history;
2668 ops->to_call_history_from = record_btrace_call_history_from;
2669 ops->to_call_history_range = record_btrace_call_history_range;
2670 ops->to_record_is_replaying = record_btrace_is_replaying;
2671 ops->to_record_will_replay = record_btrace_will_replay;
2672 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2673 ops->to_xfer_partial = record_btrace_xfer_partial;
2674 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2675 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2676 ops->to_fetch_registers = record_btrace_fetch_registers;
2677 ops->to_store_registers = record_btrace_store_registers;
2678 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2679 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2680 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2681 ops->to_resume = record_btrace_resume;
2682 ops->to_wait = record_btrace_wait;
2683 ops->to_stop = record_btrace_stop;
2684 ops->to_update_thread_list = record_btrace_update_thread_list;
2685 ops->to_thread_alive = record_btrace_thread_alive;
2686 ops->to_goto_record_begin = record_btrace_goto_begin;
2687 ops->to_goto_record_end = record_btrace_goto_end;
2688 ops->to_goto_record = record_btrace_goto;
2689 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2690 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2691 ops->to_supports_stopped_by_sw_breakpoint
2692 = record_btrace_supports_stopped_by_sw_breakpoint;
2693 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2694 ops->to_supports_stopped_by_hw_breakpoint
2695 = record_btrace_supports_stopped_by_hw_breakpoint;
2696 ops->to_execution_direction = record_btrace_execution_direction;
2697 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2698 ops->to_done_generating_core = record_btrace_done_generating_core;
2699 ops->to_stratum = record_stratum;
2700 ops->to_magic = OPS_MAGIC;
2701 }
2702
2703 /* Start recording in BTS format. */
2704
2705 static void
2706 cmd_record_btrace_bts_start (char *args, int from_tty)
2707 {
2708 if (args != NULL && *args != 0)
2709 error (_("Invalid argument."));
2710
2711 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2712
2713 TRY
2714 {
2715 execute_command ("target record-btrace", from_tty);
2716 }
2717 CATCH (exception, RETURN_MASK_ALL)
2718 {
2719 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2720 throw_exception (exception);
2721 }
2722 END_CATCH
2723 }
2724
2725 /* Start recording Intel(R) Processor Trace. */
2726
2727 static void
2728 cmd_record_btrace_pt_start (char *args, int from_tty)
2729 {
2730 if (args != NULL && *args != 0)
2731 error (_("Invalid argument."));
2732
2733 record_btrace_conf.format = BTRACE_FORMAT_PT;
2734
2735 TRY
2736 {
2737 execute_command ("target record-btrace", from_tty);
2738 }
2739 CATCH (exception, RETURN_MASK_ALL)
2740 {
2741 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2742 throw_exception (exception);
2743 }
2744 END_CATCH
2745 }
2746
2747 /* Alias for "target record". */
2748
2749 static void
2750 cmd_record_btrace_start (char *args, int from_tty)
2751 {
2752 if (args != NULL && *args != 0)
2753 error (_("Invalid argument."));
2754
2755 record_btrace_conf.format = BTRACE_FORMAT_PT;
2756
2757 TRY
2758 {
2759 execute_command ("target record-btrace", from_tty);
2760 }
2761 CATCH (exception, RETURN_MASK_ALL)
2762 {
2763 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2764
2765 TRY
2766 {
2767 execute_command ("target record-btrace", from_tty);
2768 }
2769 CATCH (exception, RETURN_MASK_ALL)
2770 {
2771 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2772 throw_exception (exception);
2773 }
2774 END_CATCH
2775 }
2776 END_CATCH
2777 }
2778
2779 /* The "set record btrace" command. */
2780
2781 static void
2782 cmd_set_record_btrace (char *args, int from_tty)
2783 {
2784 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2785 }
2786
2787 /* The "show record btrace" command. */
2788
2789 static void
2790 cmd_show_record_btrace (char *args, int from_tty)
2791 {
2792 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2793 }
2794
2795 /* The "show record btrace replay-memory-access" command. */
2796
2797 static void
2798 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2799 struct cmd_list_element *c, const char *value)
2800 {
2801 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2802 replay_memory_access);
2803 }
2804
2805 /* The "set record btrace bts" command. */
2806
2807 static void
2808 cmd_set_record_btrace_bts (char *args, int from_tty)
2809 {
2810 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2811 "by an appropriate subcommand.\n"));
2812 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2813 all_commands, gdb_stdout);
2814 }
2815
2816 /* The "show record btrace bts" command. */
2817
2818 static void
2819 cmd_show_record_btrace_bts (char *args, int from_tty)
2820 {
2821 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2822 }
2823
2824 /* The "set record btrace pt" command. */
2825
2826 static void
2827 cmd_set_record_btrace_pt (char *args, int from_tty)
2828 {
2829 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2830 "by an appropriate subcommand.\n"));
2831 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2832 all_commands, gdb_stdout);
2833 }
2834
2835 /* The "show record btrace pt" command. */
2836
2837 static void
2838 cmd_show_record_btrace_pt (char *args, int from_tty)
2839 {
2840 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2841 }
2842
2843 /* The "record bts buffer-size" show value function. */
2844
2845 static void
2846 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2847 struct cmd_list_element *c,
2848 const char *value)
2849 {
2850 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2851 value);
2852 }
2853
2854 /* The "record pt buffer-size" show value function. */
2855
2856 static void
2857 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2858 struct cmd_list_element *c,
2859 const char *value)
2860 {
2861 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2862 value);
2863 }
2864
2865 void _initialize_record_btrace (void);
2866
2867 /* Initialize btrace commands. */
2868
2869 void
2870 _initialize_record_btrace (void)
2871 {
2872 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2873 _("Start branch trace recording."), &record_btrace_cmdlist,
2874 "record btrace ", 0, &record_cmdlist);
2875 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2876
2877 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2878 _("\
2879 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2880 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2881 This format may not be available on all processors."),
2882 &record_btrace_cmdlist);
2883 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2884
2885 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2886 _("\
2887 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2888 This format may not be available on all processors."),
2889 &record_btrace_cmdlist);
2890 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2891
2892 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2893 _("Set record options"), &set_record_btrace_cmdlist,
2894 "set record btrace ", 0, &set_record_cmdlist);
2895
2896 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2897 _("Show record options"), &show_record_btrace_cmdlist,
2898 "show record btrace ", 0, &show_record_cmdlist);
2899
2900 add_setshow_enum_cmd ("replay-memory-access", no_class,
2901 replay_memory_access_types, &replay_memory_access, _("\
2902 Set what memory accesses are allowed during replay."), _("\
2903 Show what memory accesses are allowed during replay."),
2904 _("Default is READ-ONLY.\n\n\
2905 The btrace record target does not trace data.\n\
2906 The memory therefore corresponds to the live target and not \
2907 to the current replay position.\n\n\
2908 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2909 When READ-WRITE, allow accesses to read-only and read-write memory during \
2910 replay."),
2911 NULL, cmd_show_replay_memory_access,
2912 &set_record_btrace_cmdlist,
2913 &show_record_btrace_cmdlist);
2914
2915 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2916 _("Set record btrace bts options"),
2917 &set_record_btrace_bts_cmdlist,
2918 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2919
2920 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2921 _("Show record btrace bts options"),
2922 &show_record_btrace_bts_cmdlist,
2923 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2924
2925 add_setshow_uinteger_cmd ("buffer-size", no_class,
2926 &record_btrace_conf.bts.size,
2927 _("Set the record/replay bts buffer size."),
2928 _("Show the record/replay bts buffer size."), _("\
2929 When starting recording request a trace buffer of this size. \
2930 The actual buffer size may differ from the requested size. \
2931 Use \"info record\" to see the actual buffer size.\n\n\
2932 Bigger buffers allow longer recording but also take more time to process \
2933 the recorded execution trace.\n\n\
2934 The trace buffer size may not be changed while recording."), NULL,
2935 show_record_bts_buffer_size_value,
2936 &set_record_btrace_bts_cmdlist,
2937 &show_record_btrace_bts_cmdlist);
2938
2939 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2940 _("Set record btrace pt options"),
2941 &set_record_btrace_pt_cmdlist,
2942 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2943
2944 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2945 _("Show record btrace pt options"),
2946 &show_record_btrace_pt_cmdlist,
2947 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2948
2949 add_setshow_uinteger_cmd ("buffer-size", no_class,
2950 &record_btrace_conf.pt.size,
2951 _("Set the record/replay pt buffer size."),
2952 _("Show the record/replay pt buffer size."), _("\
2953 Bigger buffers allow longer recording but also take more time to process \
2954 the recorded execution.\n\
2955 The actual buffer size may differ from the requested size. Use \"info record\" \
2956 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2957 &set_record_btrace_pt_cmdlist,
2958 &show_record_btrace_pt_cmdlist);
2959
2960 init_record_btrace_ops ();
2961 add_target (&record_btrace_ops);
2962
2963 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2964 xcalloc, xfree);
2965
2966 record_btrace_conf.bts.size = 64 * 1024;
2967 record_btrace_conf.pt.size = 16 * 1024;
2968 }
This page took 0.094085 seconds and 4 git commands to generate.