btrace: add unsupported/untested messages when skipping tests
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446
447 insns = btrace_insn_number (&insn);
448 if (insns != 0)
449 {
450 /* The last instruction does not really belong to the trace. */
451 insns -= 1;
452 }
453 else
454 {
455 unsigned int steps;
456
457 /* Skip gaps at the end. */
458 do
459 {
460 steps = btrace_insn_prev (&insn, 1);
461 if (steps == 0)
462 break;
463
464 insns = btrace_insn_number (&insn);
465 }
466 while (insns == 0);
467 }
468
469 gaps = btinfo->ngaps;
470 }
471
472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
473 "for thread %s (%s).\n"), insns, calls, gaps,
474 print_thread_id (tp), target_pid_to_str (tp->ptid));
475
476 if (btrace_is_replaying (tp))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo->replay));
479 }
480
481 /* Print a decode error. */
482
483 static void
484 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
485 enum btrace_format format)
486 {
487 const char *errstr;
488 int is_error;
489
490 errstr = _("unknown");
491 is_error = 1;
492
493 switch (format)
494 {
495 default:
496 break;
497
498 case BTRACE_FORMAT_BTS:
499 switch (errcode)
500 {
501 default:
502 break;
503
504 case BDE_BTS_OVERFLOW:
505 errstr = _("instruction overflow");
506 break;
507
508 case BDE_BTS_INSN_SIZE:
509 errstr = _("unknown instruction");
510 break;
511 }
512 break;
513
514 #if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT:
516 switch (errcode)
517 {
518 case BDE_PT_USER_QUIT:
519 is_error = 0;
520 errstr = _("trace decode cancelled");
521 break;
522
523 case BDE_PT_DISABLED:
524 is_error = 0;
525 errstr = _("disabled");
526 break;
527
528 case BDE_PT_OVERFLOW:
529 is_error = 0;
530 errstr = _("overflow");
531 break;
532
533 default:
534 if (errcode < 0)
535 errstr = pt_errstr (pt_errcode (errcode));
536 break;
537 }
538 break;
539 #endif /* defined (HAVE_LIBIPT) */
540 }
541
542 uiout->text (_("["));
543 if (is_error)
544 {
545 uiout->text (_("decode error ("));
546 uiout->field_int ("errcode", errcode);
547 uiout->text (_("): "));
548 }
549 uiout->text (errstr);
550 uiout->text (_("]\n"));
551 }
552
553 /* Print an unsigned int. */
554
555 static void
556 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
557 {
558 uiout->field_fmt (fld, "%u", val);
559 }
560
561 /* A range of source lines. */
562
563 struct btrace_line_range
564 {
565 /* The symtab this line is from. */
566 struct symtab *symtab;
567
568 /* The first line (inclusive). */
569 int begin;
570
571 /* The last line (exclusive). */
572 int end;
573 };
574
575 /* Construct a line range. */
576
577 static struct btrace_line_range
578 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
579 {
580 struct btrace_line_range range;
581
582 range.symtab = symtab;
583 range.begin = begin;
584 range.end = end;
585
586 return range;
587 }
588
589 /* Add a line to a line range. */
590
591 static struct btrace_line_range
592 btrace_line_range_add (struct btrace_line_range range, int line)
593 {
594 if (range.end <= range.begin)
595 {
596 /* This is the first entry. */
597 range.begin = line;
598 range.end = line + 1;
599 }
600 else if (line < range.begin)
601 range.begin = line;
602 else if (range.end < line)
603 range.end = line;
604
605 return range;
606 }
607
608 /* Return non-zero if RANGE is empty, zero otherwise. */
609
610 static int
611 btrace_line_range_is_empty (struct btrace_line_range range)
612 {
613 return range.end <= range.begin;
614 }
615
616 /* Return non-zero if LHS contains RHS, zero otherwise. */
617
618 static int
619 btrace_line_range_contains_range (struct btrace_line_range lhs,
620 struct btrace_line_range rhs)
621 {
622 return ((lhs.symtab == rhs.symtab)
623 && (lhs.begin <= rhs.begin)
624 && (rhs.end <= lhs.end));
625 }
626
627 /* Find the line range associated with PC. */
628
629 static struct btrace_line_range
630 btrace_find_line_range (CORE_ADDR pc)
631 {
632 struct btrace_line_range range;
633 struct linetable_entry *lines;
634 struct linetable *ltable;
635 struct symtab *symtab;
636 int nlines, i;
637
638 symtab = find_pc_line_symtab (pc);
639 if (symtab == NULL)
640 return btrace_mk_line_range (NULL, 0, 0);
641
642 ltable = SYMTAB_LINETABLE (symtab);
643 if (ltable == NULL)
644 return btrace_mk_line_range (symtab, 0, 0);
645
646 nlines = ltable->nitems;
647 lines = ltable->item;
648 if (nlines <= 0)
649 return btrace_mk_line_range (symtab, 0, 0);
650
651 range = btrace_mk_line_range (symtab, 0, 0);
652 for (i = 0; i < nlines - 1; i++)
653 {
654 if ((lines[i].pc == pc) && (lines[i].line != 0))
655 range = btrace_line_range_add (range, lines[i].line);
656 }
657
658 return range;
659 }
660
661 /* Print source lines in LINES to UIOUT.
662
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
669
670 static void
671 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
672 struct cleanup **ui_item_chain, int flags)
673 {
674 print_source_lines_flags psl_flags;
675 int line;
676
677 psl_flags = 0;
678 if (flags & DISASSEMBLY_FILENAME)
679 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
680
681 for (line = lines.begin; line < lines.end; ++line)
682 {
683 if (*ui_item_chain != NULL)
684 do_cleanups (*ui_item_chain);
685
686 *ui_item_chain
687 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
688
689 print_source_lines (lines.symtab, line, line + 1, psl_flags);
690
691 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
692 }
693 }
694
695 /* Disassemble a section of the recorded instruction trace. */
696
697 static void
698 btrace_insn_history (struct ui_out *uiout,
699 const struct btrace_thread_info *btinfo,
700 const struct btrace_insn_iterator *begin,
701 const struct btrace_insn_iterator *end, int flags)
702 {
703 struct ui_file *stb;
704 struct cleanup *cleanups, *ui_item_chain;
705 struct gdbarch *gdbarch;
706 struct btrace_insn_iterator it;
707 struct btrace_line_range last_lines;
708
709 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
710 btrace_insn_number (end));
711
712 flags |= DISASSEMBLY_SPECULATIVE;
713
714 gdbarch = target_gdbarch ();
715 stb = mem_fileopen ();
716 cleanups = make_cleanup_ui_file_delete (stb);
717 gdb_disassembler di (gdbarch, stb);
718 last_lines = btrace_mk_line_range (NULL, 0, 0);
719
720 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
721
722 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
723 instructions corresponding to that line. */
724 ui_item_chain = NULL;
725
726 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
727 {
728 const struct btrace_insn *insn;
729
730 insn = btrace_insn_get (&it);
731
732 /* A NULL instruction indicates a gap in the trace. */
733 if (insn == NULL)
734 {
735 const struct btrace_config *conf;
736
737 conf = btrace_conf (btinfo);
738
739 /* We have trace so we must have a configuration. */
740 gdb_assert (conf != NULL);
741
742 btrace_ui_out_decode_error (uiout, it.function->errcode,
743 conf->format);
744 }
745 else
746 {
747 struct disasm_insn dinsn;
748
749 if ((flags & DISASSEMBLY_SOURCE) != 0)
750 {
751 struct btrace_line_range lines;
752
753 lines = btrace_find_line_range (insn->pc);
754 if (!btrace_line_range_is_empty (lines)
755 && !btrace_line_range_contains_range (last_lines, lines))
756 {
757 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
758 last_lines = lines;
759 }
760 else if (ui_item_chain == NULL)
761 {
762 ui_item_chain
763 = make_cleanup_ui_out_tuple_begin_end (uiout,
764 "src_and_asm_line");
765 /* No source information. */
766 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
767 }
768
769 gdb_assert (ui_item_chain != NULL);
770 }
771
772 memset (&dinsn, 0, sizeof (dinsn));
773 dinsn.number = btrace_insn_number (&it);
774 dinsn.addr = insn->pc;
775
776 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
777 dinsn.is_speculative = 1;
778
779 di.pretty_print_insn (uiout, &dinsn, flags);
780 }
781 }
782
783 do_cleanups (cleanups);
784 }
785
786 /* The to_insn_history method of target record-btrace. */
787
788 static void
789 record_btrace_insn_history (struct target_ops *self, int size, int flags)
790 {
791 struct btrace_thread_info *btinfo;
792 struct btrace_insn_history *history;
793 struct btrace_insn_iterator begin, end;
794 struct cleanup *uiout_cleanup;
795 struct ui_out *uiout;
796 unsigned int context, covered;
797
798 uiout = current_uiout;
799 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
800 "insn history");
801 context = abs (size);
802 if (context == 0)
803 error (_("Bad record instruction-history-size."));
804
805 btinfo = require_btrace ();
806 history = btinfo->insn_history;
807 if (history == NULL)
808 {
809 struct btrace_insn_iterator *replay;
810
811 DEBUG ("insn-history (0x%x): %d", flags, size);
812
813 /* If we're replaying, we start at the replay position. Otherwise, we
814 start at the tail of the trace. */
815 replay = btinfo->replay;
816 if (replay != NULL)
817 begin = *replay;
818 else
819 btrace_insn_end (&begin, btinfo);
820
821 /* We start from here and expand in the requested direction. Then we
822 expand in the other direction, as well, to fill up any remaining
823 context. */
824 end = begin;
825 if (size < 0)
826 {
827 /* We want the current position covered, as well. */
828 covered = btrace_insn_next (&end, 1);
829 covered += btrace_insn_prev (&begin, context - covered);
830 covered += btrace_insn_next (&end, context - covered);
831 }
832 else
833 {
834 covered = btrace_insn_next (&end, context);
835 covered += btrace_insn_prev (&begin, context - covered);
836 }
837 }
838 else
839 {
840 begin = history->begin;
841 end = history->end;
842
843 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
844 btrace_insn_number (&begin), btrace_insn_number (&end));
845
846 if (size < 0)
847 {
848 end = begin;
849 covered = btrace_insn_prev (&begin, context);
850 }
851 else
852 {
853 begin = end;
854 covered = btrace_insn_next (&end, context);
855 }
856 }
857
858 if (covered > 0)
859 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
860 else
861 {
862 if (size < 0)
863 printf_unfiltered (_("At the start of the branch trace record.\n"));
864 else
865 printf_unfiltered (_("At the end of the branch trace record.\n"));
866 }
867
868 btrace_set_insn_history (btinfo, &begin, &end);
869 do_cleanups (uiout_cleanup);
870 }
871
872 /* The to_insn_history_range method of target record-btrace. */
873
874 static void
875 record_btrace_insn_history_range (struct target_ops *self,
876 ULONGEST from, ULONGEST to, int flags)
877 {
878 struct btrace_thread_info *btinfo;
879 struct btrace_insn_history *history;
880 struct btrace_insn_iterator begin, end;
881 struct cleanup *uiout_cleanup;
882 struct ui_out *uiout;
883 unsigned int low, high;
884 int found;
885
886 uiout = current_uiout;
887 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
888 "insn history");
889 low = from;
890 high = to;
891
892 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
893
894 /* Check for wrap-arounds. */
895 if (low != from || high != to)
896 error (_("Bad range."));
897
898 if (high < low)
899 error (_("Bad range."));
900
901 btinfo = require_btrace ();
902
903 found = btrace_find_insn_by_number (&begin, btinfo, low);
904 if (found == 0)
905 error (_("Range out of bounds."));
906
907 found = btrace_find_insn_by_number (&end, btinfo, high);
908 if (found == 0)
909 {
910 /* Silently truncate the range. */
911 btrace_insn_end (&end, btinfo);
912 }
913 else
914 {
915 /* We want both begin and end to be inclusive. */
916 btrace_insn_next (&end, 1);
917 }
918
919 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
920 btrace_set_insn_history (btinfo, &begin, &end);
921
922 do_cleanups (uiout_cleanup);
923 }
924
925 /* The to_insn_history_from method of target record-btrace. */
926
927 static void
928 record_btrace_insn_history_from (struct target_ops *self,
929 ULONGEST from, int size, int flags)
930 {
931 ULONGEST begin, end, context;
932
933 context = abs (size);
934 if (context == 0)
935 error (_("Bad record instruction-history-size."));
936
937 if (size < 0)
938 {
939 end = from;
940
941 if (from < context)
942 begin = 0;
943 else
944 begin = from - context + 1;
945 }
946 else
947 {
948 begin = from;
949 end = from + context - 1;
950
951 /* Check for wrap-around. */
952 if (end < begin)
953 end = ULONGEST_MAX;
954 }
955
956 record_btrace_insn_history_range (self, begin, end, flags);
957 }
958
959 /* Print the instruction number range for a function call history line. */
960
961 static void
962 btrace_call_history_insn_range (struct ui_out *uiout,
963 const struct btrace_function *bfun)
964 {
965 unsigned int begin, end, size;
966
967 size = VEC_length (btrace_insn_s, bfun->insn);
968 gdb_assert (size > 0);
969
970 begin = bfun->insn_offset;
971 end = begin + size - 1;
972
973 ui_out_field_uint (uiout, "insn begin", begin);
974 uiout->text (",");
975 ui_out_field_uint (uiout, "insn end", end);
976 }
977
978 /* Compute the lowest and highest source line for the instructions in BFUN
979 and return them in PBEGIN and PEND.
980 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
981 result from inlining or macro expansion. */
982
983 static void
984 btrace_compute_src_line_range (const struct btrace_function *bfun,
985 int *pbegin, int *pend)
986 {
987 struct btrace_insn *insn;
988 struct symtab *symtab;
989 struct symbol *sym;
990 unsigned int idx;
991 int begin, end;
992
993 begin = INT_MAX;
994 end = INT_MIN;
995
996 sym = bfun->sym;
997 if (sym == NULL)
998 goto out;
999
1000 symtab = symbol_symtab (sym);
1001
1002 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1003 {
1004 struct symtab_and_line sal;
1005
1006 sal = find_pc_line (insn->pc, 0);
1007 if (sal.symtab != symtab || sal.line == 0)
1008 continue;
1009
1010 begin = std::min (begin, sal.line);
1011 end = std::max (end, sal.line);
1012 }
1013
1014 out:
1015 *pbegin = begin;
1016 *pend = end;
1017 }
1018
1019 /* Print the source line information for a function call history line. */
1020
1021 static void
1022 btrace_call_history_src_line (struct ui_out *uiout,
1023 const struct btrace_function *bfun)
1024 {
1025 struct symbol *sym;
1026 int begin, end;
1027
1028 sym = bfun->sym;
1029 if (sym == NULL)
1030 return;
1031
1032 uiout->field_string ("file",
1033 symtab_to_filename_for_display (symbol_symtab (sym)));
1034
1035 btrace_compute_src_line_range (bfun, &begin, &end);
1036 if (end < begin)
1037 return;
1038
1039 uiout->text (":");
1040 uiout->field_int ("min line", begin);
1041
1042 if (end == begin)
1043 return;
1044
1045 uiout->text (",");
1046 uiout->field_int ("max line", end);
1047 }
1048
1049 /* Get the name of a branch trace function. */
1050
1051 static const char *
1052 btrace_get_bfun_name (const struct btrace_function *bfun)
1053 {
1054 struct minimal_symbol *msym;
1055 struct symbol *sym;
1056
1057 if (bfun == NULL)
1058 return "??";
1059
1060 msym = bfun->msym;
1061 sym = bfun->sym;
1062
1063 if (sym != NULL)
1064 return SYMBOL_PRINT_NAME (sym);
1065 else if (msym != NULL)
1066 return MSYMBOL_PRINT_NAME (msym);
1067 else
1068 return "??";
1069 }
1070
1071 /* Disassemble a section of the recorded function trace. */
1072
1073 static void
1074 btrace_call_history (struct ui_out *uiout,
1075 const struct btrace_thread_info *btinfo,
1076 const struct btrace_call_iterator *begin,
1077 const struct btrace_call_iterator *end,
1078 int int_flags)
1079 {
1080 struct btrace_call_iterator it;
1081 record_print_flags flags = (enum record_print_flag) int_flags;
1082
1083 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1084 btrace_call_number (end));
1085
1086 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1087 {
1088 const struct btrace_function *bfun;
1089 struct minimal_symbol *msym;
1090 struct symbol *sym;
1091
1092 bfun = btrace_call_get (&it);
1093 sym = bfun->sym;
1094 msym = bfun->msym;
1095
1096 /* Print the function index. */
1097 ui_out_field_uint (uiout, "index", bfun->number);
1098 uiout->text ("\t");
1099
1100 /* Indicate gaps in the trace. */
1101 if (bfun->errcode != 0)
1102 {
1103 const struct btrace_config *conf;
1104
1105 conf = btrace_conf (btinfo);
1106
1107 /* We have trace so we must have a configuration. */
1108 gdb_assert (conf != NULL);
1109
1110 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1111
1112 continue;
1113 }
1114
1115 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1116 {
1117 int level = bfun->level + btinfo->level, i;
1118
1119 for (i = 0; i < level; ++i)
1120 uiout->text (" ");
1121 }
1122
1123 if (sym != NULL)
1124 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1125 else if (msym != NULL)
1126 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1127 else if (!uiout->is_mi_like_p ())
1128 uiout->field_string ("function", "??");
1129
1130 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1131 {
1132 uiout->text (_("\tinst "));
1133 btrace_call_history_insn_range (uiout, bfun);
1134 }
1135
1136 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1137 {
1138 uiout->text (_("\tat "));
1139 btrace_call_history_src_line (uiout, bfun);
1140 }
1141
1142 uiout->text ("\n");
1143 }
1144 }
1145
1146 /* The to_call_history method of target record-btrace. */
1147
1148 static void
1149 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1150 {
1151 struct btrace_thread_info *btinfo;
1152 struct btrace_call_history *history;
1153 struct btrace_call_iterator begin, end;
1154 struct cleanup *uiout_cleanup;
1155 struct ui_out *uiout;
1156 unsigned int context, covered;
1157 record_print_flags flags = (enum record_print_flag) int_flags;
1158
1159 uiout = current_uiout;
1160 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1161 "insn history");
1162 context = abs (size);
1163 if (context == 0)
1164 error (_("Bad record function-call-history-size."));
1165
1166 btinfo = require_btrace ();
1167 history = btinfo->call_history;
1168 if (history == NULL)
1169 {
1170 struct btrace_insn_iterator *replay;
1171
1172 DEBUG ("call-history (0x%x): %d", int_flags, size);
1173
1174 /* If we're replaying, we start at the replay position. Otherwise, we
1175 start at the tail of the trace. */
1176 replay = btinfo->replay;
1177 if (replay != NULL)
1178 {
1179 begin.function = replay->function;
1180 begin.btinfo = btinfo;
1181 }
1182 else
1183 btrace_call_end (&begin, btinfo);
1184
1185 /* We start from here and expand in the requested direction. Then we
1186 expand in the other direction, as well, to fill up any remaining
1187 context. */
1188 end = begin;
1189 if (size < 0)
1190 {
1191 /* We want the current position covered, as well. */
1192 covered = btrace_call_next (&end, 1);
1193 covered += btrace_call_prev (&begin, context - covered);
1194 covered += btrace_call_next (&end, context - covered);
1195 }
1196 else
1197 {
1198 covered = btrace_call_next (&end, context);
1199 covered += btrace_call_prev (&begin, context- covered);
1200 }
1201 }
1202 else
1203 {
1204 begin = history->begin;
1205 end = history->end;
1206
1207 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1208 btrace_call_number (&begin), btrace_call_number (&end));
1209
1210 if (size < 0)
1211 {
1212 end = begin;
1213 covered = btrace_call_prev (&begin, context);
1214 }
1215 else
1216 {
1217 begin = end;
1218 covered = btrace_call_next (&end, context);
1219 }
1220 }
1221
1222 if (covered > 0)
1223 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1224 else
1225 {
1226 if (size < 0)
1227 printf_unfiltered (_("At the start of the branch trace record.\n"));
1228 else
1229 printf_unfiltered (_("At the end of the branch trace record.\n"));
1230 }
1231
1232 btrace_set_call_history (btinfo, &begin, &end);
1233 do_cleanups (uiout_cleanup);
1234 }
1235
1236 /* The to_call_history_range method of target record-btrace. */
1237
1238 static void
1239 record_btrace_call_history_range (struct target_ops *self,
1240 ULONGEST from, ULONGEST to,
1241 int int_flags)
1242 {
1243 struct btrace_thread_info *btinfo;
1244 struct btrace_call_history *history;
1245 struct btrace_call_iterator begin, end;
1246 struct cleanup *uiout_cleanup;
1247 struct ui_out *uiout;
1248 unsigned int low, high;
1249 int found;
1250 record_print_flags flags = (enum record_print_flag) int_flags;
1251
1252 uiout = current_uiout;
1253 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1254 "func history");
1255 low = from;
1256 high = to;
1257
1258 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1259
1260 /* Check for wrap-arounds. */
1261 if (low != from || high != to)
1262 error (_("Bad range."));
1263
1264 if (high < low)
1265 error (_("Bad range."));
1266
1267 btinfo = require_btrace ();
1268
1269 found = btrace_find_call_by_number (&begin, btinfo, low);
1270 if (found == 0)
1271 error (_("Range out of bounds."));
1272
1273 found = btrace_find_call_by_number (&end, btinfo, high);
1274 if (found == 0)
1275 {
1276 /* Silently truncate the range. */
1277 btrace_call_end (&end, btinfo);
1278 }
1279 else
1280 {
1281 /* We want both begin and end to be inclusive. */
1282 btrace_call_next (&end, 1);
1283 }
1284
1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1286 btrace_set_call_history (btinfo, &begin, &end);
1287
1288 do_cleanups (uiout_cleanup);
1289 }
1290
1291 /* The to_call_history_from method of target record-btrace. */
1292
1293 static void
1294 record_btrace_call_history_from (struct target_ops *self,
1295 ULONGEST from, int size,
1296 int int_flags)
1297 {
1298 ULONGEST begin, end, context;
1299 record_print_flags flags = (enum record_print_flag) int_flags;
1300
1301 context = abs (size);
1302 if (context == 0)
1303 error (_("Bad record function-call-history-size."));
1304
1305 if (size < 0)
1306 {
1307 end = from;
1308
1309 if (from < context)
1310 begin = 0;
1311 else
1312 begin = from - context + 1;
1313 }
1314 else
1315 {
1316 begin = from;
1317 end = from + context - 1;
1318
1319 /* Check for wrap-around. */
1320 if (end < begin)
1321 end = ULONGEST_MAX;
1322 }
1323
1324 record_btrace_call_history_range (self, begin, end, flags);
1325 }
1326
1327 /* The to_record_is_replaying method of target record-btrace. */
1328
1329 static int
1330 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1331 {
1332 struct thread_info *tp;
1333
1334 ALL_NON_EXITED_THREADS (tp)
1335 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1336 return 1;
1337
1338 return 0;
1339 }
1340
1341 /* The to_record_will_replay method of target record-btrace. */
1342
1343 static int
1344 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1345 {
1346 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1347 }
1348
1349 /* The to_xfer_partial method of target record-btrace. */
1350
1351 static enum target_xfer_status
1352 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1353 const char *annex, gdb_byte *readbuf,
1354 const gdb_byte *writebuf, ULONGEST offset,
1355 ULONGEST len, ULONGEST *xfered_len)
1356 {
1357 struct target_ops *t;
1358
1359 /* Filter out requests that don't make sense during replay. */
1360 if (replay_memory_access == replay_memory_access_read_only
1361 && !record_btrace_generating_corefile
1362 && record_btrace_is_replaying (ops, inferior_ptid))
1363 {
1364 switch (object)
1365 {
1366 case TARGET_OBJECT_MEMORY:
1367 {
1368 struct target_section *section;
1369
1370 /* We do not allow writing memory in general. */
1371 if (writebuf != NULL)
1372 {
1373 *xfered_len = len;
1374 return TARGET_XFER_UNAVAILABLE;
1375 }
1376
1377 /* We allow reading readonly memory. */
1378 section = target_section_by_addr (ops, offset);
1379 if (section != NULL)
1380 {
1381 /* Check if the section we found is readonly. */
1382 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1383 section->the_bfd_section)
1384 & SEC_READONLY) != 0)
1385 {
1386 /* Truncate the request to fit into this section. */
1387 len = std::min (len, section->endaddr - offset);
1388 break;
1389 }
1390 }
1391
1392 *xfered_len = len;
1393 return TARGET_XFER_UNAVAILABLE;
1394 }
1395 }
1396 }
1397
1398 /* Forward the request. */
1399 ops = ops->beneath;
1400 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1401 offset, len, xfered_len);
1402 }
1403
1404 /* The to_insert_breakpoint method of target record-btrace. */
1405
1406 static int
1407 record_btrace_insert_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1410 {
1411 const char *old;
1412 int ret;
1413
1414 /* Inserting breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
1418
1419 ret = 0;
1420 TRY
1421 {
1422 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1423 }
1424 CATCH (except, RETURN_MASK_ALL)
1425 {
1426 replay_memory_access = old;
1427 throw_exception (except);
1428 }
1429 END_CATCH
1430 replay_memory_access = old;
1431
1432 return ret;
1433 }
1434
1435 /* The to_remove_breakpoint method of target record-btrace. */
1436
1437 static int
1438 record_btrace_remove_breakpoint (struct target_ops *ops,
1439 struct gdbarch *gdbarch,
1440 struct bp_target_info *bp_tgt,
1441 enum remove_bp_reason reason)
1442 {
1443 const char *old;
1444 int ret;
1445
1446 /* Removing breakpoints requires accessing memory. Allow it for the
1447 duration of this function. */
1448 old = replay_memory_access;
1449 replay_memory_access = replay_memory_access_read_write;
1450
1451 ret = 0;
1452 TRY
1453 {
1454 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1455 reason);
1456 }
1457 CATCH (except, RETURN_MASK_ALL)
1458 {
1459 replay_memory_access = old;
1460 throw_exception (except);
1461 }
1462 END_CATCH
1463 replay_memory_access = old;
1464
1465 return ret;
1466 }
1467
1468 /* The to_fetch_registers method of target record-btrace. */
1469
1470 static void
1471 record_btrace_fetch_registers (struct target_ops *ops,
1472 struct regcache *regcache, int regno)
1473 {
1474 struct btrace_insn_iterator *replay;
1475 struct thread_info *tp;
1476
1477 tp = find_thread_ptid (inferior_ptid);
1478 gdb_assert (tp != NULL);
1479
1480 replay = tp->btrace.replay;
1481 if (replay != NULL && !record_btrace_generating_corefile)
1482 {
1483 const struct btrace_insn *insn;
1484 struct gdbarch *gdbarch;
1485 int pcreg;
1486
1487 gdbarch = get_regcache_arch (regcache);
1488 pcreg = gdbarch_pc_regnum (gdbarch);
1489 if (pcreg < 0)
1490 return;
1491
1492 /* We can only provide the PC register. */
1493 if (regno >= 0 && regno != pcreg)
1494 return;
1495
1496 insn = btrace_insn_get (replay);
1497 gdb_assert (insn != NULL);
1498
1499 regcache_raw_supply (regcache, regno, &insn->pc);
1500 }
1501 else
1502 {
1503 struct target_ops *t = ops->beneath;
1504
1505 t->to_fetch_registers (t, regcache, regno);
1506 }
1507 }
1508
1509 /* The to_store_registers method of target record-btrace. */
1510
1511 static void
1512 record_btrace_store_registers (struct target_ops *ops,
1513 struct regcache *regcache, int regno)
1514 {
1515 struct target_ops *t;
1516
1517 if (!record_btrace_generating_corefile
1518 && record_btrace_is_replaying (ops, inferior_ptid))
1519 error (_("Cannot write registers while replaying."));
1520
1521 gdb_assert (may_write_registers != 0);
1522
1523 t = ops->beneath;
1524 t->to_store_registers (t, regcache, regno);
1525 }
1526
1527 /* The to_prepare_to_store method of target record-btrace. */
1528
1529 static void
1530 record_btrace_prepare_to_store (struct target_ops *ops,
1531 struct regcache *regcache)
1532 {
1533 struct target_ops *t;
1534
1535 if (!record_btrace_generating_corefile
1536 && record_btrace_is_replaying (ops, inferior_ptid))
1537 return;
1538
1539 t = ops->beneath;
1540 t->to_prepare_to_store (t, regcache);
1541 }
1542
1543 /* The branch trace frame cache. */
1544
1545 struct btrace_frame_cache
1546 {
1547 /* The thread. */
1548 struct thread_info *tp;
1549
1550 /* The frame info. */
1551 struct frame_info *frame;
1552
1553 /* The branch trace function segment. */
1554 const struct btrace_function *bfun;
1555 };
1556
1557 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1558
1559 static htab_t bfcache;
1560
1561 /* hash_f for htab_create_alloc of bfcache. */
1562
1563 static hashval_t
1564 bfcache_hash (const void *arg)
1565 {
1566 const struct btrace_frame_cache *cache
1567 = (const struct btrace_frame_cache *) arg;
1568
1569 return htab_hash_pointer (cache->frame);
1570 }
1571
1572 /* eq_f for htab_create_alloc of bfcache. */
1573
1574 static int
1575 bfcache_eq (const void *arg1, const void *arg2)
1576 {
1577 const struct btrace_frame_cache *cache1
1578 = (const struct btrace_frame_cache *) arg1;
1579 const struct btrace_frame_cache *cache2
1580 = (const struct btrace_frame_cache *) arg2;
1581
1582 return cache1->frame == cache2->frame;
1583 }
1584
1585 /* Create a new btrace frame cache. */
1586
1587 static struct btrace_frame_cache *
1588 bfcache_new (struct frame_info *frame)
1589 {
1590 struct btrace_frame_cache *cache;
1591 void **slot;
1592
1593 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1594 cache->frame = frame;
1595
1596 slot = htab_find_slot (bfcache, cache, INSERT);
1597 gdb_assert (*slot == NULL);
1598 *slot = cache;
1599
1600 return cache;
1601 }
1602
1603 /* Extract the branch trace function from a branch trace frame. */
1604
1605 static const struct btrace_function *
1606 btrace_get_frame_function (struct frame_info *frame)
1607 {
1608 const struct btrace_frame_cache *cache;
1609 const struct btrace_function *bfun;
1610 struct btrace_frame_cache pattern;
1611 void **slot;
1612
1613 pattern.frame = frame;
1614
1615 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1616 if (slot == NULL)
1617 return NULL;
1618
1619 cache = (const struct btrace_frame_cache *) *slot;
1620 return cache->bfun;
1621 }
1622
1623 /* Implement stop_reason method for record_btrace_frame_unwind. */
1624
1625 static enum unwind_stop_reason
1626 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1627 void **this_cache)
1628 {
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun;
1631
1632 cache = (const struct btrace_frame_cache *) *this_cache;
1633 bfun = cache->bfun;
1634 gdb_assert (bfun != NULL);
1635
1636 if (bfun->up == NULL)
1637 return UNWIND_UNAVAILABLE;
1638
1639 return UNWIND_NO_REASON;
1640 }
1641
1642 /* Implement this_id method for record_btrace_frame_unwind. */
1643
1644 static void
1645 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1646 struct frame_id *this_id)
1647 {
1648 const struct btrace_frame_cache *cache;
1649 const struct btrace_function *bfun;
1650 CORE_ADDR code, special;
1651
1652 cache = (const struct btrace_frame_cache *) *this_cache;
1653
1654 bfun = cache->bfun;
1655 gdb_assert (bfun != NULL);
1656
1657 while (bfun->segment.prev != NULL)
1658 bfun = bfun->segment.prev;
1659
1660 code = get_frame_func (this_frame);
1661 special = bfun->number;
1662
1663 *this_id = frame_id_build_unavailable_stack_special (code, special);
1664
1665 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1666 btrace_get_bfun_name (cache->bfun),
1667 core_addr_to_string_nz (this_id->code_addr),
1668 core_addr_to_string_nz (this_id->special_addr));
1669 }
1670
1671 /* Implement prev_register method for record_btrace_frame_unwind. */
1672
1673 static struct value *
1674 record_btrace_frame_prev_register (struct frame_info *this_frame,
1675 void **this_cache,
1676 int regnum)
1677 {
1678 const struct btrace_frame_cache *cache;
1679 const struct btrace_function *bfun, *caller;
1680 const struct btrace_insn *insn;
1681 struct gdbarch *gdbarch;
1682 CORE_ADDR pc;
1683 int pcreg;
1684
1685 gdbarch = get_frame_arch (this_frame);
1686 pcreg = gdbarch_pc_regnum (gdbarch);
1687 if (pcreg < 0 || regnum != pcreg)
1688 throw_error (NOT_AVAILABLE_ERROR,
1689 _("Registers are not available in btrace record history"));
1690
1691 cache = (const struct btrace_frame_cache *) *this_cache;
1692 bfun = cache->bfun;
1693 gdb_assert (bfun != NULL);
1694
1695 caller = bfun->up;
1696 if (caller == NULL)
1697 throw_error (NOT_AVAILABLE_ERROR,
1698 _("No caller in btrace record history"));
1699
1700 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1701 {
1702 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1703 pc = insn->pc;
1704 }
1705 else
1706 {
1707 insn = VEC_last (btrace_insn_s, caller->insn);
1708 pc = insn->pc;
1709
1710 pc += gdb_insn_length (gdbarch, pc);
1711 }
1712
1713 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1714 btrace_get_bfun_name (bfun), bfun->level,
1715 core_addr_to_string_nz (pc));
1716
1717 return frame_unwind_got_address (this_frame, regnum, pc);
1718 }
1719
1720 /* Implement sniffer method for record_btrace_frame_unwind. */
1721
1722 static int
1723 record_btrace_frame_sniffer (const struct frame_unwind *self,
1724 struct frame_info *this_frame,
1725 void **this_cache)
1726 {
1727 const struct btrace_function *bfun;
1728 struct btrace_frame_cache *cache;
1729 struct thread_info *tp;
1730 struct frame_info *next;
1731
1732 /* THIS_FRAME does not contain a reference to its thread. */
1733 tp = find_thread_ptid (inferior_ptid);
1734 gdb_assert (tp != NULL);
1735
1736 bfun = NULL;
1737 next = get_next_frame (this_frame);
1738 if (next == NULL)
1739 {
1740 const struct btrace_insn_iterator *replay;
1741
1742 replay = tp->btrace.replay;
1743 if (replay != NULL)
1744 bfun = replay->function;
1745 }
1746 else
1747 {
1748 const struct btrace_function *callee;
1749
1750 callee = btrace_get_frame_function (next);
1751 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1752 bfun = callee->up;
1753 }
1754
1755 if (bfun == NULL)
1756 return 0;
1757
1758 DEBUG ("[frame] sniffed frame for %s on level %d",
1759 btrace_get_bfun_name (bfun), bfun->level);
1760
1761 /* This is our frame. Initialize the frame cache. */
1762 cache = bfcache_new (this_frame);
1763 cache->tp = tp;
1764 cache->bfun = bfun;
1765
1766 *this_cache = cache;
1767 return 1;
1768 }
1769
1770 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1771
1772 static int
1773 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1774 struct frame_info *this_frame,
1775 void **this_cache)
1776 {
1777 const struct btrace_function *bfun, *callee;
1778 struct btrace_frame_cache *cache;
1779 struct frame_info *next;
1780
1781 next = get_next_frame (this_frame);
1782 if (next == NULL)
1783 return 0;
1784
1785 callee = btrace_get_frame_function (next);
1786 if (callee == NULL)
1787 return 0;
1788
1789 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1790 return 0;
1791
1792 bfun = callee->up;
1793 if (bfun == NULL)
1794 return 0;
1795
1796 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1798
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = find_thread_ptid (inferior_ptid);
1802 cache->bfun = bfun;
1803
1804 *this_cache = cache;
1805 return 1;
1806 }
1807
1808 static void
1809 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1810 {
1811 struct btrace_frame_cache *cache;
1812 void **slot;
1813
1814 cache = (struct btrace_frame_cache *) this_cache;
1815
1816 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1817 gdb_assert (slot != NULL);
1818
1819 htab_remove_elt (bfcache, cache);
1820 }
1821
1822 /* btrace recording does not store previous memory content, neither the stack
1823 frames content. Any unwinding would return errorneous results as the stack
1824 contents no longer matches the changed PC value restored from history.
1825 Therefore this unwinder reports any possibly unwound registers as
1826 <unavailable>. */
1827
1828 const struct frame_unwind record_btrace_frame_unwind =
1829 {
1830 NORMAL_FRAME,
1831 record_btrace_frame_unwind_stop_reason,
1832 record_btrace_frame_this_id,
1833 record_btrace_frame_prev_register,
1834 NULL,
1835 record_btrace_frame_sniffer,
1836 record_btrace_frame_dealloc_cache
1837 };
1838
1839 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1840 {
1841 TAILCALL_FRAME,
1842 record_btrace_frame_unwind_stop_reason,
1843 record_btrace_frame_this_id,
1844 record_btrace_frame_prev_register,
1845 NULL,
1846 record_btrace_tailcall_frame_sniffer,
1847 record_btrace_frame_dealloc_cache
1848 };
1849
1850 /* Implement the to_get_unwinder method. */
1851
1852 static const struct frame_unwind *
1853 record_btrace_to_get_unwinder (struct target_ops *self)
1854 {
1855 return &record_btrace_frame_unwind;
1856 }
1857
1858 /* Implement the to_get_tailcall_unwinder method. */
1859
1860 static const struct frame_unwind *
1861 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1862 {
1863 return &record_btrace_tailcall_frame_unwind;
1864 }
1865
1866 /* Return a human-readable string for FLAG. */
1867
1868 static const char *
1869 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1870 {
1871 switch (flag)
1872 {
1873 case BTHR_STEP:
1874 return "step";
1875
1876 case BTHR_RSTEP:
1877 return "reverse-step";
1878
1879 case BTHR_CONT:
1880 return "cont";
1881
1882 case BTHR_RCONT:
1883 return "reverse-cont";
1884
1885 case BTHR_STOP:
1886 return "stop";
1887 }
1888
1889 return "<invalid>";
1890 }
1891
1892 /* Indicate that TP should be resumed according to FLAG. */
1893
1894 static void
1895 record_btrace_resume_thread (struct thread_info *tp,
1896 enum btrace_thread_flag flag)
1897 {
1898 struct btrace_thread_info *btinfo;
1899
1900 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1901 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1902
1903 btinfo = &tp->btrace;
1904
1905 /* Fetch the latest branch trace. */
1906 btrace_fetch (tp);
1907
1908 /* A resume request overwrites a preceding resume or stop request. */
1909 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1910 btinfo->flags |= flag;
1911 }
1912
1913 /* Get the current frame for TP. */
1914
1915 static struct frame_info *
1916 get_thread_current_frame (struct thread_info *tp)
1917 {
1918 struct frame_info *frame;
1919 ptid_t old_inferior_ptid;
1920 int executing;
1921
1922 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1923 old_inferior_ptid = inferior_ptid;
1924 inferior_ptid = tp->ptid;
1925
1926 /* Clear the executing flag to allow changes to the current frame.
1927 We are not actually running, yet. We just started a reverse execution
1928 command or a record goto command.
1929 For the latter, EXECUTING is false and this has no effect.
1930 For the former, EXECUTING is true and we're in to_wait, about to
1931 move the thread. Since we need to recompute the stack, we temporarily
1932 set EXECUTING to flase. */
1933 executing = is_executing (inferior_ptid);
1934 set_executing (inferior_ptid, 0);
1935
1936 frame = NULL;
1937 TRY
1938 {
1939 frame = get_current_frame ();
1940 }
1941 CATCH (except, RETURN_MASK_ALL)
1942 {
1943 /* Restore the previous execution state. */
1944 set_executing (inferior_ptid, executing);
1945
1946 /* Restore the previous inferior_ptid. */
1947 inferior_ptid = old_inferior_ptid;
1948
1949 throw_exception (except);
1950 }
1951 END_CATCH
1952
1953 /* Restore the previous execution state. */
1954 set_executing (inferior_ptid, executing);
1955
1956 /* Restore the previous inferior_ptid. */
1957 inferior_ptid = old_inferior_ptid;
1958
1959 return frame;
1960 }
1961
1962 /* Start replaying a thread. */
1963
1964 static struct btrace_insn_iterator *
1965 record_btrace_start_replaying (struct thread_info *tp)
1966 {
1967 struct btrace_insn_iterator *replay;
1968 struct btrace_thread_info *btinfo;
1969
1970 btinfo = &tp->btrace;
1971 replay = NULL;
1972
1973 /* We can't start replaying without trace. */
1974 if (btinfo->begin == NULL)
1975 return NULL;
1976
1977 /* GDB stores the current frame_id when stepping in order to detects steps
1978 into subroutines.
1979 Since frames are computed differently when we're replaying, we need to
1980 recompute those stored frames and fix them up so we can still detect
1981 subroutines after we started replaying. */
1982 TRY
1983 {
1984 struct frame_info *frame;
1985 struct frame_id frame_id;
1986 int upd_step_frame_id, upd_step_stack_frame_id;
1987
1988 /* The current frame without replaying - computed via normal unwind. */
1989 frame = get_thread_current_frame (tp);
1990 frame_id = get_frame_id (frame);
1991
1992 /* Check if we need to update any stepping-related frame id's. */
1993 upd_step_frame_id = frame_id_eq (frame_id,
1994 tp->control.step_frame_id);
1995 upd_step_stack_frame_id = frame_id_eq (frame_id,
1996 tp->control.step_stack_frame_id);
1997
1998 /* We start replaying at the end of the branch trace. This corresponds
1999 to the current instruction. */
2000 replay = XNEW (struct btrace_insn_iterator);
2001 btrace_insn_end (replay, btinfo);
2002
2003 /* Skip gaps at the end of the trace. */
2004 while (btrace_insn_get (replay) == NULL)
2005 {
2006 unsigned int steps;
2007
2008 steps = btrace_insn_prev (replay, 1);
2009 if (steps == 0)
2010 error (_("No trace."));
2011 }
2012
2013 /* We're not replaying, yet. */
2014 gdb_assert (btinfo->replay == NULL);
2015 btinfo->replay = replay;
2016
2017 /* Make sure we're not using any stale registers. */
2018 registers_changed_ptid (tp->ptid);
2019
2020 /* The current frame with replaying - computed via btrace unwind. */
2021 frame = get_thread_current_frame (tp);
2022 frame_id = get_frame_id (frame);
2023
2024 /* Replace stepping related frames where necessary. */
2025 if (upd_step_frame_id)
2026 tp->control.step_frame_id = frame_id;
2027 if (upd_step_stack_frame_id)
2028 tp->control.step_stack_frame_id = frame_id;
2029 }
2030 CATCH (except, RETURN_MASK_ALL)
2031 {
2032 xfree (btinfo->replay);
2033 btinfo->replay = NULL;
2034
2035 registers_changed_ptid (tp->ptid);
2036
2037 throw_exception (except);
2038 }
2039 END_CATCH
2040
2041 return replay;
2042 }
2043
2044 /* Stop replaying a thread. */
2045
2046 static void
2047 record_btrace_stop_replaying (struct thread_info *tp)
2048 {
2049 struct btrace_thread_info *btinfo;
2050
2051 btinfo = &tp->btrace;
2052
2053 xfree (btinfo->replay);
2054 btinfo->replay = NULL;
2055
2056 /* Make sure we're not leaving any stale registers. */
2057 registers_changed_ptid (tp->ptid);
2058 }
2059
2060 /* Stop replaying TP if it is at the end of its execution history. */
2061
2062 static void
2063 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2064 {
2065 struct btrace_insn_iterator *replay, end;
2066 struct btrace_thread_info *btinfo;
2067
2068 btinfo = &tp->btrace;
2069 replay = btinfo->replay;
2070
2071 if (replay == NULL)
2072 return;
2073
2074 btrace_insn_end (&end, btinfo);
2075
2076 if (btrace_insn_cmp (replay, &end) == 0)
2077 record_btrace_stop_replaying (tp);
2078 }
2079
2080 /* The to_resume method of target record-btrace. */
2081
2082 static void
2083 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2084 enum gdb_signal signal)
2085 {
2086 struct thread_info *tp;
2087 enum btrace_thread_flag flag, cflag;
2088
2089 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2090 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2091 step ? "step" : "cont");
2092
2093 /* Store the execution direction of the last resume.
2094
2095 If there is more than one to_resume call, we have to rely on infrun
2096 to not change the execution direction in-between. */
2097 record_btrace_resume_exec_dir = execution_direction;
2098
2099 /* As long as we're not replaying, just forward the request.
2100
2101 For non-stop targets this means that no thread is replaying. In order to
2102 make progress, we may need to explicitly move replaying threads to the end
2103 of their execution history. */
2104 if ((execution_direction != EXEC_REVERSE)
2105 && !record_btrace_is_replaying (ops, minus_one_ptid))
2106 {
2107 ops = ops->beneath;
2108 ops->to_resume (ops, ptid, step, signal);
2109 return;
2110 }
2111
2112 /* Compute the btrace thread flag for the requested move. */
2113 if (execution_direction == EXEC_REVERSE)
2114 {
2115 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2116 cflag = BTHR_RCONT;
2117 }
2118 else
2119 {
2120 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2121 cflag = BTHR_CONT;
2122 }
2123
2124 /* We just indicate the resume intent here. The actual stepping happens in
2125 record_btrace_wait below.
2126
2127 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2128 if (!target_is_non_stop_p ())
2129 {
2130 gdb_assert (ptid_match (inferior_ptid, ptid));
2131
2132 ALL_NON_EXITED_THREADS (tp)
2133 if (ptid_match (tp->ptid, ptid))
2134 {
2135 if (ptid_match (tp->ptid, inferior_ptid))
2136 record_btrace_resume_thread (tp, flag);
2137 else
2138 record_btrace_resume_thread (tp, cflag);
2139 }
2140 }
2141 else
2142 {
2143 ALL_NON_EXITED_THREADS (tp)
2144 if (ptid_match (tp->ptid, ptid))
2145 record_btrace_resume_thread (tp, flag);
2146 }
2147
2148 /* Async support. */
2149 if (target_can_async_p ())
2150 {
2151 target_async (1);
2152 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2153 }
2154 }
2155
2156 /* The to_commit_resume method of target record-btrace. */
2157
2158 static void
2159 record_btrace_commit_resume (struct target_ops *ops)
2160 {
2161 if ((execution_direction != EXEC_REVERSE)
2162 && !record_btrace_is_replaying (ops, minus_one_ptid))
2163 ops->beneath->to_commit_resume (ops->beneath);
2164 }
2165
2166 /* Cancel resuming TP. */
2167
2168 static void
2169 record_btrace_cancel_resume (struct thread_info *tp)
2170 {
2171 enum btrace_thread_flag flags;
2172
2173 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2174 if (flags == 0)
2175 return;
2176
2177 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2178 print_thread_id (tp),
2179 target_pid_to_str (tp->ptid), flags,
2180 btrace_thread_flag_to_str (flags));
2181
2182 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2183 record_btrace_stop_replaying_at_end (tp);
2184 }
2185
2186 /* Return a target_waitstatus indicating that we ran out of history. */
2187
2188 static struct target_waitstatus
2189 btrace_step_no_history (void)
2190 {
2191 struct target_waitstatus status;
2192
2193 status.kind = TARGET_WAITKIND_NO_HISTORY;
2194
2195 return status;
2196 }
2197
2198 /* Return a target_waitstatus indicating that a step finished. */
2199
2200 static struct target_waitstatus
2201 btrace_step_stopped (void)
2202 {
2203 struct target_waitstatus status;
2204
2205 status.kind = TARGET_WAITKIND_STOPPED;
2206 status.value.sig = GDB_SIGNAL_TRAP;
2207
2208 return status;
2209 }
2210
2211 /* Return a target_waitstatus indicating that a thread was stopped as
2212 requested. */
2213
2214 static struct target_waitstatus
2215 btrace_step_stopped_on_request (void)
2216 {
2217 struct target_waitstatus status;
2218
2219 status.kind = TARGET_WAITKIND_STOPPED;
2220 status.value.sig = GDB_SIGNAL_0;
2221
2222 return status;
2223 }
2224
2225 /* Return a target_waitstatus indicating a spurious stop. */
2226
2227 static struct target_waitstatus
2228 btrace_step_spurious (void)
2229 {
2230 struct target_waitstatus status;
2231
2232 status.kind = TARGET_WAITKIND_SPURIOUS;
2233
2234 return status;
2235 }
2236
2237 /* Return a target_waitstatus indicating that the thread was not resumed. */
2238
2239 static struct target_waitstatus
2240 btrace_step_no_resumed (void)
2241 {
2242 struct target_waitstatus status;
2243
2244 status.kind = TARGET_WAITKIND_NO_RESUMED;
2245
2246 return status;
2247 }
2248
2249 /* Return a target_waitstatus indicating that we should wait again. */
2250
2251 static struct target_waitstatus
2252 btrace_step_again (void)
2253 {
2254 struct target_waitstatus status;
2255
2256 status.kind = TARGET_WAITKIND_IGNORE;
2257
2258 return status;
2259 }
2260
2261 /* Clear the record histories. */
2262
2263 static void
2264 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2265 {
2266 xfree (btinfo->insn_history);
2267 xfree (btinfo->call_history);
2268
2269 btinfo->insn_history = NULL;
2270 btinfo->call_history = NULL;
2271 }
2272
2273 /* Check whether TP's current replay position is at a breakpoint. */
2274
2275 static int
2276 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2277 {
2278 struct btrace_insn_iterator *replay;
2279 struct btrace_thread_info *btinfo;
2280 const struct btrace_insn *insn;
2281 struct inferior *inf;
2282
2283 btinfo = &tp->btrace;
2284 replay = btinfo->replay;
2285
2286 if (replay == NULL)
2287 return 0;
2288
2289 insn = btrace_insn_get (replay);
2290 if (insn == NULL)
2291 return 0;
2292
2293 inf = find_inferior_ptid (tp->ptid);
2294 if (inf == NULL)
2295 return 0;
2296
2297 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2298 &btinfo->stop_reason);
2299 }
2300
2301 /* Step one instruction in forward direction. */
2302
2303 static struct target_waitstatus
2304 record_btrace_single_step_forward (struct thread_info *tp)
2305 {
2306 struct btrace_insn_iterator *replay, end, start;
2307 struct btrace_thread_info *btinfo;
2308
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 /* We're done if we're not replaying. */
2313 if (replay == NULL)
2314 return btrace_step_no_history ();
2315
2316 /* Check if we're stepping a breakpoint. */
2317 if (record_btrace_replay_at_breakpoint (tp))
2318 return btrace_step_stopped ();
2319
2320 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2321 jump back to the instruction at which we started. */
2322 start = *replay;
2323 do
2324 {
2325 unsigned int steps;
2326
2327 /* We will bail out here if we continue stepping after reaching the end
2328 of the execution history. */
2329 steps = btrace_insn_next (replay, 1);
2330 if (steps == 0)
2331 {
2332 *replay = start;
2333 return btrace_step_no_history ();
2334 }
2335 }
2336 while (btrace_insn_get (replay) == NULL);
2337
2338 /* Determine the end of the instruction trace. */
2339 btrace_insn_end (&end, btinfo);
2340
2341 /* The execution trace contains (and ends with) the current instruction.
2342 This instruction has not been executed, yet, so the trace really ends
2343 one instruction earlier. */
2344 if (btrace_insn_cmp (replay, &end) == 0)
2345 return btrace_step_no_history ();
2346
2347 return btrace_step_spurious ();
2348 }
2349
2350 /* Step one instruction in backward direction. */
2351
2352 static struct target_waitstatus
2353 record_btrace_single_step_backward (struct thread_info *tp)
2354 {
2355 struct btrace_insn_iterator *replay, start;
2356 struct btrace_thread_info *btinfo;
2357
2358 btinfo = &tp->btrace;
2359 replay = btinfo->replay;
2360
2361 /* Start replaying if we're not already doing so. */
2362 if (replay == NULL)
2363 replay = record_btrace_start_replaying (tp);
2364
2365 /* If we can't step any further, we reached the end of the history.
2366 Skip gaps during replay. If we end up at a gap (at the beginning of
2367 the trace), jump back to the instruction at which we started. */
2368 start = *replay;
2369 do
2370 {
2371 unsigned int steps;
2372
2373 steps = btrace_insn_prev (replay, 1);
2374 if (steps == 0)
2375 {
2376 *replay = start;
2377 return btrace_step_no_history ();
2378 }
2379 }
2380 while (btrace_insn_get (replay) == NULL);
2381
2382 /* Check if we're stepping a breakpoint.
2383
2384 For reverse-stepping, this check is after the step. There is logic in
2385 infrun.c that handles reverse-stepping separately. See, for example,
2386 proceed and adjust_pc_after_break.
2387
2388 This code assumes that for reverse-stepping, PC points to the last
2389 de-executed instruction, whereas for forward-stepping PC points to the
2390 next to-be-executed instruction. */
2391 if (record_btrace_replay_at_breakpoint (tp))
2392 return btrace_step_stopped ();
2393
2394 return btrace_step_spurious ();
2395 }
2396
2397 /* Step a single thread. */
2398
2399 static struct target_waitstatus
2400 record_btrace_step_thread (struct thread_info *tp)
2401 {
2402 struct btrace_thread_info *btinfo;
2403 struct target_waitstatus status;
2404 enum btrace_thread_flag flags;
2405
2406 btinfo = &tp->btrace;
2407
2408 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2409 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2410
2411 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2412 target_pid_to_str (tp->ptid), flags,
2413 btrace_thread_flag_to_str (flags));
2414
2415 /* We can't step without an execution history. */
2416 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2417 return btrace_step_no_history ();
2418
2419 switch (flags)
2420 {
2421 default:
2422 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2423
2424 case BTHR_STOP:
2425 return btrace_step_stopped_on_request ();
2426
2427 case BTHR_STEP:
2428 status = record_btrace_single_step_forward (tp);
2429 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2430 break;
2431
2432 return btrace_step_stopped ();
2433
2434 case BTHR_RSTEP:
2435 status = record_btrace_single_step_backward (tp);
2436 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2437 break;
2438
2439 return btrace_step_stopped ();
2440
2441 case BTHR_CONT:
2442 status = record_btrace_single_step_forward (tp);
2443 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2444 break;
2445
2446 btinfo->flags |= flags;
2447 return btrace_step_again ();
2448
2449 case BTHR_RCONT:
2450 status = record_btrace_single_step_backward (tp);
2451 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2452 break;
2453
2454 btinfo->flags |= flags;
2455 return btrace_step_again ();
2456 }
2457
2458 /* We keep threads moving at the end of their execution history. The to_wait
2459 method will stop the thread for whom the event is reported. */
2460 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2461 btinfo->flags |= flags;
2462
2463 return status;
2464 }
2465
2466 /* A vector of threads. */
2467
2468 typedef struct thread_info * tp_t;
2469 DEF_VEC_P (tp_t);
2470
2471 /* Announce further events if necessary. */
2472
2473 static void
2474 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2475 const VEC (tp_t) *no_history)
2476 {
2477 int more_moving, more_no_history;
2478
2479 more_moving = !VEC_empty (tp_t, moving);
2480 more_no_history = !VEC_empty (tp_t, no_history);
2481
2482 if (!more_moving && !more_no_history)
2483 return;
2484
2485 if (more_moving)
2486 DEBUG ("movers pending");
2487
2488 if (more_no_history)
2489 DEBUG ("no-history pending");
2490
2491 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2492 }
2493
2494 /* The to_wait method of target record-btrace. */
2495
2496 static ptid_t
2497 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2498 struct target_waitstatus *status, int options)
2499 {
2500 VEC (tp_t) *moving, *no_history;
2501 struct thread_info *tp, *eventing;
2502 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2503
2504 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2505
2506 /* As long as we're not replaying, just forward the request. */
2507 if ((execution_direction != EXEC_REVERSE)
2508 && !record_btrace_is_replaying (ops, minus_one_ptid))
2509 {
2510 ops = ops->beneath;
2511 return ops->to_wait (ops, ptid, status, options);
2512 }
2513
2514 moving = NULL;
2515 no_history = NULL;
2516
2517 make_cleanup (VEC_cleanup (tp_t), &moving);
2518 make_cleanup (VEC_cleanup (tp_t), &no_history);
2519
2520 /* Keep a work list of moving threads. */
2521 ALL_NON_EXITED_THREADS (tp)
2522 if (ptid_match (tp->ptid, ptid)
2523 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2524 VEC_safe_push (tp_t, moving, tp);
2525
2526 if (VEC_empty (tp_t, moving))
2527 {
2528 *status = btrace_step_no_resumed ();
2529
2530 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2531 target_waitstatus_to_string (status));
2532
2533 do_cleanups (cleanups);
2534 return null_ptid;
2535 }
2536
2537 /* Step moving threads one by one, one step each, until either one thread
2538 reports an event or we run out of threads to step.
2539
2540 When stepping more than one thread, chances are that some threads reach
2541 the end of their execution history earlier than others. If we reported
2542 this immediately, all-stop on top of non-stop would stop all threads and
2543 resume the same threads next time. And we would report the same thread
2544 having reached the end of its execution history again.
2545
2546 In the worst case, this would starve the other threads. But even if other
2547 threads would be allowed to make progress, this would result in far too
2548 many intermediate stops.
2549
2550 We therefore delay the reporting of "no execution history" until we have
2551 nothing else to report. By this time, all threads should have moved to
2552 either the beginning or the end of their execution history. There will
2553 be a single user-visible stop. */
2554 eventing = NULL;
2555 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2556 {
2557 unsigned int ix;
2558
2559 ix = 0;
2560 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2561 {
2562 *status = record_btrace_step_thread (tp);
2563
2564 switch (status->kind)
2565 {
2566 case TARGET_WAITKIND_IGNORE:
2567 ix++;
2568 break;
2569
2570 case TARGET_WAITKIND_NO_HISTORY:
2571 VEC_safe_push (tp_t, no_history,
2572 VEC_ordered_remove (tp_t, moving, ix));
2573 break;
2574
2575 default:
2576 eventing = VEC_unordered_remove (tp_t, moving, ix);
2577 break;
2578 }
2579 }
2580 }
2581
2582 if (eventing == NULL)
2583 {
2584 /* We started with at least one moving thread. This thread must have
2585 either stopped or reached the end of its execution history.
2586
2587 In the former case, EVENTING must not be NULL.
2588 In the latter case, NO_HISTORY must not be empty. */
2589 gdb_assert (!VEC_empty (tp_t, no_history));
2590
2591 /* We kept threads moving at the end of their execution history. Stop
2592 EVENTING now that we are going to report its stop. */
2593 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2594 eventing->btrace.flags &= ~BTHR_MOVE;
2595
2596 *status = btrace_step_no_history ();
2597 }
2598
2599 gdb_assert (eventing != NULL);
2600
2601 /* We kept threads replaying at the end of their execution history. Stop
2602 replaying EVENTING now that we are going to report its stop. */
2603 record_btrace_stop_replaying_at_end (eventing);
2604
2605 /* Stop all other threads. */
2606 if (!target_is_non_stop_p ())
2607 ALL_NON_EXITED_THREADS (tp)
2608 record_btrace_cancel_resume (tp);
2609
2610 /* In async mode, we need to announce further events. */
2611 if (target_is_async_p ())
2612 record_btrace_maybe_mark_async_event (moving, no_history);
2613
2614 /* Start record histories anew from the current position. */
2615 record_btrace_clear_histories (&eventing->btrace);
2616
2617 /* We moved the replay position but did not update registers. */
2618 registers_changed_ptid (eventing->ptid);
2619
2620 DEBUG ("wait ended by thread %s (%s): %s",
2621 print_thread_id (eventing),
2622 target_pid_to_str (eventing->ptid),
2623 target_waitstatus_to_string (status));
2624
2625 do_cleanups (cleanups);
2626 return eventing->ptid;
2627 }
2628
2629 /* The to_stop method of target record-btrace. */
2630
2631 static void
2632 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2633 {
2634 DEBUG ("stop %s", target_pid_to_str (ptid));
2635
2636 /* As long as we're not replaying, just forward the request. */
2637 if ((execution_direction != EXEC_REVERSE)
2638 && !record_btrace_is_replaying (ops, minus_one_ptid))
2639 {
2640 ops = ops->beneath;
2641 ops->to_stop (ops, ptid);
2642 }
2643 else
2644 {
2645 struct thread_info *tp;
2646
2647 ALL_NON_EXITED_THREADS (tp)
2648 if (ptid_match (tp->ptid, ptid))
2649 {
2650 tp->btrace.flags &= ~BTHR_MOVE;
2651 tp->btrace.flags |= BTHR_STOP;
2652 }
2653 }
2654 }
2655
2656 /* The to_can_execute_reverse method of target record-btrace. */
2657
2658 static int
2659 record_btrace_can_execute_reverse (struct target_ops *self)
2660 {
2661 return 1;
2662 }
2663
2664 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2665
2666 static int
2667 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2668 {
2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
2670 {
2671 struct thread_info *tp = inferior_thread ();
2672
2673 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2674 }
2675
2676 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2677 }
2678
2679 /* The to_supports_stopped_by_sw_breakpoint method of target
2680 record-btrace. */
2681
2682 static int
2683 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2684 {
2685 if (record_btrace_is_replaying (ops, minus_one_ptid))
2686 return 1;
2687
2688 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2689 }
2690
2691 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2692
2693 static int
2694 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2695 {
2696 if (record_btrace_is_replaying (ops, minus_one_ptid))
2697 {
2698 struct thread_info *tp = inferior_thread ();
2699
2700 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2701 }
2702
2703 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2704 }
2705
2706 /* The to_supports_stopped_by_hw_breakpoint method of target
2707 record-btrace. */
2708
2709 static int
2710 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2711 {
2712 if (record_btrace_is_replaying (ops, minus_one_ptid))
2713 return 1;
2714
2715 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2716 }
2717
2718 /* The to_update_thread_list method of target record-btrace. */
2719
2720 static void
2721 record_btrace_update_thread_list (struct target_ops *ops)
2722 {
2723 /* We don't add or remove threads during replay. */
2724 if (record_btrace_is_replaying (ops, minus_one_ptid))
2725 return;
2726
2727 /* Forward the request. */
2728 ops = ops->beneath;
2729 ops->to_update_thread_list (ops);
2730 }
2731
2732 /* The to_thread_alive method of target record-btrace. */
2733
2734 static int
2735 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2736 {
2737 /* We don't add or remove threads during replay. */
2738 if (record_btrace_is_replaying (ops, minus_one_ptid))
2739 return find_thread_ptid (ptid) != NULL;
2740
2741 /* Forward the request. */
2742 ops = ops->beneath;
2743 return ops->to_thread_alive (ops, ptid);
2744 }
2745
2746 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 is stopped. */
2748
2749 static void
2750 record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2752 {
2753 struct btrace_thread_info *btinfo;
2754
2755 btinfo = &tp->btrace;
2756
2757 if (it == NULL || it->function == NULL)
2758 record_btrace_stop_replaying (tp);
2759 else
2760 {
2761 if (btinfo->replay == NULL)
2762 record_btrace_start_replaying (tp);
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2764 return;
2765
2766 *btinfo->replay = *it;
2767 registers_changed_ptid (tp->ptid);
2768 }
2769
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
2772
2773 stop_pc = regcache_read_pc (get_current_regcache ());
2774 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2775 }
2776
2777 /* The to_goto_record_begin method of target record-btrace. */
2778
2779 static void
2780 record_btrace_goto_begin (struct target_ops *self)
2781 {
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator begin;
2784
2785 tp = require_btrace_thread ();
2786
2787 btrace_insn_begin (&begin, &tp->btrace);
2788
2789 /* Skip gaps at the beginning of the trace. */
2790 while (btrace_insn_get (&begin) == NULL)
2791 {
2792 unsigned int steps;
2793
2794 steps = btrace_insn_next (&begin, 1);
2795 if (steps == 0)
2796 error (_("No trace."));
2797 }
2798
2799 record_btrace_set_replay (tp, &begin);
2800 }
2801
2802 /* The to_goto_record_end method of target record-btrace. */
2803
2804 static void
2805 record_btrace_goto_end (struct target_ops *ops)
2806 {
2807 struct thread_info *tp;
2808
2809 tp = require_btrace_thread ();
2810
2811 record_btrace_set_replay (tp, NULL);
2812 }
2813
2814 /* The to_goto_record method of target record-btrace. */
2815
2816 static void
2817 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2818 {
2819 struct thread_info *tp;
2820 struct btrace_insn_iterator it;
2821 unsigned int number;
2822 int found;
2823
2824 number = insn;
2825
2826 /* Check for wrap-arounds. */
2827 if (number != insn)
2828 error (_("Instruction number out of range."));
2829
2830 tp = require_btrace_thread ();
2831
2832 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2833 if (found == 0)
2834 error (_("No such instruction."));
2835
2836 record_btrace_set_replay (tp, &it);
2837 }
2838
2839 /* The to_record_stop_replaying method of target record-btrace. */
2840
2841 static void
2842 record_btrace_stop_replaying_all (struct target_ops *self)
2843 {
2844 struct thread_info *tp;
2845
2846 ALL_NON_EXITED_THREADS (tp)
2847 record_btrace_stop_replaying (tp);
2848 }
2849
2850 /* The to_execution_direction target method. */
2851
2852 static enum exec_direction_kind
2853 record_btrace_execution_direction (struct target_ops *self)
2854 {
2855 return record_btrace_resume_exec_dir;
2856 }
2857
2858 /* The to_prepare_to_generate_core target method. */
2859
2860 static void
2861 record_btrace_prepare_to_generate_core (struct target_ops *self)
2862 {
2863 record_btrace_generating_corefile = 1;
2864 }
2865
2866 /* The to_done_generating_core target method. */
2867
2868 static void
2869 record_btrace_done_generating_core (struct target_ops *self)
2870 {
2871 record_btrace_generating_corefile = 0;
2872 }
2873
2874 /* Initialize the record-btrace target ops. */
2875
2876 static void
2877 init_record_btrace_ops (void)
2878 {
2879 struct target_ops *ops;
2880
2881 ops = &record_btrace_ops;
2882 ops->to_shortname = "record-btrace";
2883 ops->to_longname = "Branch tracing target";
2884 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2885 ops->to_open = record_btrace_open;
2886 ops->to_close = record_btrace_close;
2887 ops->to_async = record_btrace_async;
2888 ops->to_detach = record_detach;
2889 ops->to_disconnect = record_btrace_disconnect;
2890 ops->to_mourn_inferior = record_mourn_inferior;
2891 ops->to_kill = record_kill;
2892 ops->to_stop_recording = record_btrace_stop_recording;
2893 ops->to_info_record = record_btrace_info;
2894 ops->to_insn_history = record_btrace_insn_history;
2895 ops->to_insn_history_from = record_btrace_insn_history_from;
2896 ops->to_insn_history_range = record_btrace_insn_history_range;
2897 ops->to_call_history = record_btrace_call_history;
2898 ops->to_call_history_from = record_btrace_call_history_from;
2899 ops->to_call_history_range = record_btrace_call_history_range;
2900 ops->to_record_is_replaying = record_btrace_is_replaying;
2901 ops->to_record_will_replay = record_btrace_will_replay;
2902 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2903 ops->to_xfer_partial = record_btrace_xfer_partial;
2904 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2905 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2906 ops->to_fetch_registers = record_btrace_fetch_registers;
2907 ops->to_store_registers = record_btrace_store_registers;
2908 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2909 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2910 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2911 ops->to_resume = record_btrace_resume;
2912 ops->to_commit_resume = record_btrace_commit_resume;
2913 ops->to_wait = record_btrace_wait;
2914 ops->to_stop = record_btrace_stop;
2915 ops->to_update_thread_list = record_btrace_update_thread_list;
2916 ops->to_thread_alive = record_btrace_thread_alive;
2917 ops->to_goto_record_begin = record_btrace_goto_begin;
2918 ops->to_goto_record_end = record_btrace_goto_end;
2919 ops->to_goto_record = record_btrace_goto;
2920 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2921 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2922 ops->to_supports_stopped_by_sw_breakpoint
2923 = record_btrace_supports_stopped_by_sw_breakpoint;
2924 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2925 ops->to_supports_stopped_by_hw_breakpoint
2926 = record_btrace_supports_stopped_by_hw_breakpoint;
2927 ops->to_execution_direction = record_btrace_execution_direction;
2928 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2929 ops->to_done_generating_core = record_btrace_done_generating_core;
2930 ops->to_stratum = record_stratum;
2931 ops->to_magic = OPS_MAGIC;
2932 }
2933
2934 /* Start recording in BTS format. */
2935
2936 static void
2937 cmd_record_btrace_bts_start (char *args, int from_tty)
2938 {
2939 if (args != NULL && *args != 0)
2940 error (_("Invalid argument."));
2941
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2943
2944 TRY
2945 {
2946 execute_command ("target record-btrace", from_tty);
2947 }
2948 CATCH (exception, RETURN_MASK_ALL)
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw_exception (exception);
2952 }
2953 END_CATCH
2954 }
2955
2956 /* Start recording in Intel Processor Trace format. */
2957
2958 static void
2959 cmd_record_btrace_pt_start (char *args, int from_tty)
2960 {
2961 if (args != NULL && *args != 0)
2962 error (_("Invalid argument."));
2963
2964 record_btrace_conf.format = BTRACE_FORMAT_PT;
2965
2966 TRY
2967 {
2968 execute_command ("target record-btrace", from_tty);
2969 }
2970 CATCH (exception, RETURN_MASK_ALL)
2971 {
2972 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2973 throw_exception (exception);
2974 }
2975 END_CATCH
2976 }
2977
2978 /* Alias for "target record". */
2979
2980 static void
2981 cmd_record_btrace_start (char *args, int from_tty)
2982 {
2983 if (args != NULL && *args != 0)
2984 error (_("Invalid argument."));
2985
2986 record_btrace_conf.format = BTRACE_FORMAT_PT;
2987
2988 TRY
2989 {
2990 execute_command ("target record-btrace", from_tty);
2991 }
2992 CATCH (exception, RETURN_MASK_ALL)
2993 {
2994 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2995
2996 TRY
2997 {
2998 execute_command ("target record-btrace", from_tty);
2999 }
3000 CATCH (exception, RETURN_MASK_ALL)
3001 {
3002 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3003 throw_exception (exception);
3004 }
3005 END_CATCH
3006 }
3007 END_CATCH
3008 }
3009
3010 /* The "set record btrace" command. */
3011
3012 static void
3013 cmd_set_record_btrace (char *args, int from_tty)
3014 {
3015 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3016 }
3017
3018 /* The "show record btrace" command. */
3019
3020 static void
3021 cmd_show_record_btrace (char *args, int from_tty)
3022 {
3023 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3024 }
3025
3026 /* The "show record btrace replay-memory-access" command. */
3027
3028 static void
3029 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c, const char *value)
3031 {
3032 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3033 replay_memory_access);
3034 }
3035
3036 /* The "set record btrace bts" command. */
3037
3038 static void
3039 cmd_set_record_btrace_bts (char *args, int from_tty)
3040 {
3041 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3042 "by an appropriate subcommand.\n"));
3043 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3044 all_commands, gdb_stdout);
3045 }
3046
3047 /* The "show record btrace bts" command. */
3048
3049 static void
3050 cmd_show_record_btrace_bts (char *args, int from_tty)
3051 {
3052 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3053 }
3054
3055 /* The "set record btrace pt" command. */
3056
3057 static void
3058 cmd_set_record_btrace_pt (char *args, int from_tty)
3059 {
3060 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3061 "by an appropriate subcommand.\n"));
3062 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3063 all_commands, gdb_stdout);
3064 }
3065
3066 /* The "show record btrace pt" command. */
3067
3068 static void
3069 cmd_show_record_btrace_pt (char *args, int from_tty)
3070 {
3071 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3072 }
3073
3074 /* The "record bts buffer-size" show value function. */
3075
3076 static void
3077 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3078 struct cmd_list_element *c,
3079 const char *value)
3080 {
3081 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3082 value);
3083 }
3084
3085 /* The "record pt buffer-size" show value function. */
3086
3087 static void
3088 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3089 struct cmd_list_element *c,
3090 const char *value)
3091 {
3092 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3093 value);
3094 }
3095
3096 void _initialize_record_btrace (void);
3097
3098 /* Initialize btrace commands. */
3099
3100 void
3101 _initialize_record_btrace (void)
3102 {
3103 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3104 _("Start branch trace recording."), &record_btrace_cmdlist,
3105 "record btrace ", 0, &record_cmdlist);
3106 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3107
3108 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3109 _("\
3110 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3111 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3112 This format may not be available on all processors."),
3113 &record_btrace_cmdlist);
3114 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3115
3116 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3117 _("\
3118 Start branch trace recording in Intel Processor Trace format.\n\n\
3119 This format may not be available on all processors."),
3120 &record_btrace_cmdlist);
3121 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3122
3123 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3124 _("Set record options"), &set_record_btrace_cmdlist,
3125 "set record btrace ", 0, &set_record_cmdlist);
3126
3127 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3128 _("Show record options"), &show_record_btrace_cmdlist,
3129 "show record btrace ", 0, &show_record_cmdlist);
3130
3131 add_setshow_enum_cmd ("replay-memory-access", no_class,
3132 replay_memory_access_types, &replay_memory_access, _("\
3133 Set what memory accesses are allowed during replay."), _("\
3134 Show what memory accesses are allowed during replay."),
3135 _("Default is READ-ONLY.\n\n\
3136 The btrace record target does not trace data.\n\
3137 The memory therefore corresponds to the live target and not \
3138 to the current replay position.\n\n\
3139 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3140 When READ-WRITE, allow accesses to read-only and read-write memory during \
3141 replay."),
3142 NULL, cmd_show_replay_memory_access,
3143 &set_record_btrace_cmdlist,
3144 &show_record_btrace_cmdlist);
3145
3146 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3147 _("Set record btrace bts options"),
3148 &set_record_btrace_bts_cmdlist,
3149 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3150
3151 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3152 _("Show record btrace bts options"),
3153 &show_record_btrace_bts_cmdlist,
3154 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3155
3156 add_setshow_uinteger_cmd ("buffer-size", no_class,
3157 &record_btrace_conf.bts.size,
3158 _("Set the record/replay bts buffer size."),
3159 _("Show the record/replay bts buffer size."), _("\
3160 When starting recording request a trace buffer of this size. \
3161 The actual buffer size may differ from the requested size. \
3162 Use \"info record\" to see the actual buffer size.\n\n\
3163 Bigger buffers allow longer recording but also take more time to process \
3164 the recorded execution trace.\n\n\
3165 The trace buffer size may not be changed while recording."), NULL,
3166 show_record_bts_buffer_size_value,
3167 &set_record_btrace_bts_cmdlist,
3168 &show_record_btrace_bts_cmdlist);
3169
3170 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3171 _("Set record btrace pt options"),
3172 &set_record_btrace_pt_cmdlist,
3173 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3174
3175 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3176 _("Show record btrace pt options"),
3177 &show_record_btrace_pt_cmdlist,
3178 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3179
3180 add_setshow_uinteger_cmd ("buffer-size", no_class,
3181 &record_btrace_conf.pt.size,
3182 _("Set the record/replay pt buffer size."),
3183 _("Show the record/replay pt buffer size."), _("\
3184 Bigger buffers allow longer recording but also take more time to process \
3185 the recorded execution.\n\
3186 The actual buffer size may differ from the requested size. Use \"info record\" \
3187 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3188 &set_record_btrace_pt_cmdlist,
3189 &show_record_btrace_pt_cmdlist);
3190
3191 init_record_btrace_ops ();
3192 add_target (&record_btrace_ops);
3193
3194 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3195 xcalloc, xfree);
3196
3197 record_btrace_conf.bts.size = 64 * 1024;
3198 record_btrace_conf.pt.size = 16 * 1024;
3199 }
This page took 0.101272 seconds and 4 git commands to generate.