Reuse buffers across gdb_pretty_print_insn calls
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446
447 insns = btrace_insn_number (&insn);
448 if (insns != 0)
449 {
450 /* The last instruction does not really belong to the trace. */
451 insns -= 1;
452 }
453 else
454 {
455 unsigned int steps;
456
457 /* Skip gaps at the end. */
458 do
459 {
460 steps = btrace_insn_prev (&insn, 1);
461 if (steps == 0)
462 break;
463
464 insns = btrace_insn_number (&insn);
465 }
466 while (insns == 0);
467 }
468
469 gaps = btinfo->ngaps;
470 }
471
472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
473 "for thread %s (%s).\n"), insns, calls, gaps,
474 print_thread_id (tp), target_pid_to_str (tp->ptid));
475
476 if (btrace_is_replaying (tp))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo->replay));
479 }
480
481 /* Print a decode error. */
482
483 static void
484 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
485 enum btrace_format format)
486 {
487 const char *errstr;
488 int is_error;
489
490 errstr = _("unknown");
491 is_error = 1;
492
493 switch (format)
494 {
495 default:
496 break;
497
498 case BTRACE_FORMAT_BTS:
499 switch (errcode)
500 {
501 default:
502 break;
503
504 case BDE_BTS_OVERFLOW:
505 errstr = _("instruction overflow");
506 break;
507
508 case BDE_BTS_INSN_SIZE:
509 errstr = _("unknown instruction");
510 break;
511 }
512 break;
513
514 #if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT:
516 switch (errcode)
517 {
518 case BDE_PT_USER_QUIT:
519 is_error = 0;
520 errstr = _("trace decode cancelled");
521 break;
522
523 case BDE_PT_DISABLED:
524 is_error = 0;
525 errstr = _("disabled");
526 break;
527
528 case BDE_PT_OVERFLOW:
529 is_error = 0;
530 errstr = _("overflow");
531 break;
532
533 default:
534 if (errcode < 0)
535 errstr = pt_errstr (pt_errcode (errcode));
536 break;
537 }
538 break;
539 #endif /* defined (HAVE_LIBIPT) */
540 }
541
542 uiout->text (_("["));
543 if (is_error)
544 {
545 uiout->text (_("decode error ("));
546 uiout->field_int ("errcode", errcode);
547 uiout->text (_("): "));
548 }
549 uiout->text (errstr);
550 uiout->text (_("]\n"));
551 }
552
553 /* Print an unsigned int. */
554
555 static void
556 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
557 {
558 uiout->field_fmt (fld, "%u", val);
559 }
560
561 /* A range of source lines. */
562
563 struct btrace_line_range
564 {
565 /* The symtab this line is from. */
566 struct symtab *symtab;
567
568 /* The first line (inclusive). */
569 int begin;
570
571 /* The last line (exclusive). */
572 int end;
573 };
574
575 /* Construct a line range. */
576
577 static struct btrace_line_range
578 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
579 {
580 struct btrace_line_range range;
581
582 range.symtab = symtab;
583 range.begin = begin;
584 range.end = end;
585
586 return range;
587 }
588
589 /* Add a line to a line range. */
590
591 static struct btrace_line_range
592 btrace_line_range_add (struct btrace_line_range range, int line)
593 {
594 if (range.end <= range.begin)
595 {
596 /* This is the first entry. */
597 range.begin = line;
598 range.end = line + 1;
599 }
600 else if (line < range.begin)
601 range.begin = line;
602 else if (range.end < line)
603 range.end = line;
604
605 return range;
606 }
607
608 /* Return non-zero if RANGE is empty, zero otherwise. */
609
610 static int
611 btrace_line_range_is_empty (struct btrace_line_range range)
612 {
613 return range.end <= range.begin;
614 }
615
616 /* Return non-zero if LHS contains RHS, zero otherwise. */
617
618 static int
619 btrace_line_range_contains_range (struct btrace_line_range lhs,
620 struct btrace_line_range rhs)
621 {
622 return ((lhs.symtab == rhs.symtab)
623 && (lhs.begin <= rhs.begin)
624 && (rhs.end <= lhs.end));
625 }
626
627 /* Find the line range associated with PC. */
628
629 static struct btrace_line_range
630 btrace_find_line_range (CORE_ADDR pc)
631 {
632 struct btrace_line_range range;
633 struct linetable_entry *lines;
634 struct linetable *ltable;
635 struct symtab *symtab;
636 int nlines, i;
637
638 symtab = find_pc_line_symtab (pc);
639 if (symtab == NULL)
640 return btrace_mk_line_range (NULL, 0, 0);
641
642 ltable = SYMTAB_LINETABLE (symtab);
643 if (ltable == NULL)
644 return btrace_mk_line_range (symtab, 0, 0);
645
646 nlines = ltable->nitems;
647 lines = ltable->item;
648 if (nlines <= 0)
649 return btrace_mk_line_range (symtab, 0, 0);
650
651 range = btrace_mk_line_range (symtab, 0, 0);
652 for (i = 0; i < nlines - 1; i++)
653 {
654 if ((lines[i].pc == pc) && (lines[i].line != 0))
655 range = btrace_line_range_add (range, lines[i].line);
656 }
657
658 return range;
659 }
660
661 /* Print source lines in LINES to UIOUT.
662
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
669
670 static void
671 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
672 struct cleanup **ui_item_chain, int flags)
673 {
674 print_source_lines_flags psl_flags;
675 int line;
676
677 psl_flags = 0;
678 if (flags & DISASSEMBLY_FILENAME)
679 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
680
681 for (line = lines.begin; line < lines.end; ++line)
682 {
683 if (*ui_item_chain != NULL)
684 do_cleanups (*ui_item_chain);
685
686 *ui_item_chain
687 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
688
689 print_source_lines (lines.symtab, line, line + 1, psl_flags);
690
691 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
692 }
693 }
694
695 /* Disassemble a section of the recorded instruction trace. */
696
697 static void
698 btrace_insn_history (struct ui_out *uiout,
699 const struct btrace_thread_info *btinfo,
700 const struct btrace_insn_iterator *begin,
701 const struct btrace_insn_iterator *end, int flags)
702 {
703 struct cleanup *cleanups, *ui_item_chain;
704 struct gdbarch *gdbarch;
705 struct btrace_insn_iterator it;
706 struct btrace_line_range last_lines;
707
708 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
709 btrace_insn_number (end));
710
711 flags |= DISASSEMBLY_SPECULATIVE;
712
713 gdbarch = target_gdbarch ();
714 last_lines = btrace_mk_line_range (NULL, 0, 0);
715
716 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
717
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain = NULL;
721
722 gdb_pretty_print_disassembler disasm (gdbarch);
723
724 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
725 {
726 const struct btrace_insn *insn;
727
728 insn = btrace_insn_get (&it);
729
730 /* A NULL instruction indicates a gap in the trace. */
731 if (insn == NULL)
732 {
733 const struct btrace_config *conf;
734
735 conf = btrace_conf (btinfo);
736
737 /* We have trace so we must have a configuration. */
738 gdb_assert (conf != NULL);
739
740 btrace_ui_out_decode_error (uiout, it.function->errcode,
741 conf->format);
742 }
743 else
744 {
745 struct disasm_insn dinsn;
746
747 if ((flags & DISASSEMBLY_SOURCE) != 0)
748 {
749 struct btrace_line_range lines;
750
751 lines = btrace_find_line_range (insn->pc);
752 if (!btrace_line_range_is_empty (lines)
753 && !btrace_line_range_contains_range (last_lines, lines))
754 {
755 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
756 last_lines = lines;
757 }
758 else if (ui_item_chain == NULL)
759 {
760 ui_item_chain
761 = make_cleanup_ui_out_tuple_begin_end (uiout,
762 "src_and_asm_line");
763 /* No source information. */
764 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
765 }
766
767 gdb_assert (ui_item_chain != NULL);
768 }
769
770 memset (&dinsn, 0, sizeof (dinsn));
771 dinsn.number = btrace_insn_number (&it);
772 dinsn.addr = insn->pc;
773
774 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
775 dinsn.is_speculative = 1;
776
777 disasm.pretty_print_insn (uiout, &dinsn, flags);
778 }
779 }
780
781 do_cleanups (cleanups);
782 }
783
784 /* The to_insn_history method of target record-btrace. */
785
786 static void
787 record_btrace_insn_history (struct target_ops *self, int size, int flags)
788 {
789 struct btrace_thread_info *btinfo;
790 struct btrace_insn_history *history;
791 struct btrace_insn_iterator begin, end;
792 struct cleanup *uiout_cleanup;
793 struct ui_out *uiout;
794 unsigned int context, covered;
795
796 uiout = current_uiout;
797 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
798 "insn history");
799 context = abs (size);
800 if (context == 0)
801 error (_("Bad record instruction-history-size."));
802
803 btinfo = require_btrace ();
804 history = btinfo->insn_history;
805 if (history == NULL)
806 {
807 struct btrace_insn_iterator *replay;
808
809 DEBUG ("insn-history (0x%x): %d", flags, size);
810
811 /* If we're replaying, we start at the replay position. Otherwise, we
812 start at the tail of the trace. */
813 replay = btinfo->replay;
814 if (replay != NULL)
815 begin = *replay;
816 else
817 btrace_insn_end (&begin, btinfo);
818
819 /* We start from here and expand in the requested direction. Then we
820 expand in the other direction, as well, to fill up any remaining
821 context. */
822 end = begin;
823 if (size < 0)
824 {
825 /* We want the current position covered, as well. */
826 covered = btrace_insn_next (&end, 1);
827 covered += btrace_insn_prev (&begin, context - covered);
828 covered += btrace_insn_next (&end, context - covered);
829 }
830 else
831 {
832 covered = btrace_insn_next (&end, context);
833 covered += btrace_insn_prev (&begin, context - covered);
834 }
835 }
836 else
837 {
838 begin = history->begin;
839 end = history->end;
840
841 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
842 btrace_insn_number (&begin), btrace_insn_number (&end));
843
844 if (size < 0)
845 {
846 end = begin;
847 covered = btrace_insn_prev (&begin, context);
848 }
849 else
850 {
851 begin = end;
852 covered = btrace_insn_next (&end, context);
853 }
854 }
855
856 if (covered > 0)
857 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
858 else
859 {
860 if (size < 0)
861 printf_unfiltered (_("At the start of the branch trace record.\n"));
862 else
863 printf_unfiltered (_("At the end of the branch trace record.\n"));
864 }
865
866 btrace_set_insn_history (btinfo, &begin, &end);
867 do_cleanups (uiout_cleanup);
868 }
869
870 /* The to_insn_history_range method of target record-btrace. */
871
872 static void
873 record_btrace_insn_history_range (struct target_ops *self,
874 ULONGEST from, ULONGEST to, int flags)
875 {
876 struct btrace_thread_info *btinfo;
877 struct btrace_insn_history *history;
878 struct btrace_insn_iterator begin, end;
879 struct cleanup *uiout_cleanup;
880 struct ui_out *uiout;
881 unsigned int low, high;
882 int found;
883
884 uiout = current_uiout;
885 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
886 "insn history");
887 low = from;
888 high = to;
889
890 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
891
892 /* Check for wrap-arounds. */
893 if (low != from || high != to)
894 error (_("Bad range."));
895
896 if (high < low)
897 error (_("Bad range."));
898
899 btinfo = require_btrace ();
900
901 found = btrace_find_insn_by_number (&begin, btinfo, low);
902 if (found == 0)
903 error (_("Range out of bounds."));
904
905 found = btrace_find_insn_by_number (&end, btinfo, high);
906 if (found == 0)
907 {
908 /* Silently truncate the range. */
909 btrace_insn_end (&end, btinfo);
910 }
911 else
912 {
913 /* We want both begin and end to be inclusive. */
914 btrace_insn_next (&end, 1);
915 }
916
917 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
918 btrace_set_insn_history (btinfo, &begin, &end);
919
920 do_cleanups (uiout_cleanup);
921 }
922
923 /* The to_insn_history_from method of target record-btrace. */
924
925 static void
926 record_btrace_insn_history_from (struct target_ops *self,
927 ULONGEST from, int size, int flags)
928 {
929 ULONGEST begin, end, context;
930
931 context = abs (size);
932 if (context == 0)
933 error (_("Bad record instruction-history-size."));
934
935 if (size < 0)
936 {
937 end = from;
938
939 if (from < context)
940 begin = 0;
941 else
942 begin = from - context + 1;
943 }
944 else
945 {
946 begin = from;
947 end = from + context - 1;
948
949 /* Check for wrap-around. */
950 if (end < begin)
951 end = ULONGEST_MAX;
952 }
953
954 record_btrace_insn_history_range (self, begin, end, flags);
955 }
956
957 /* Print the instruction number range for a function call history line. */
958
959 static void
960 btrace_call_history_insn_range (struct ui_out *uiout,
961 const struct btrace_function *bfun)
962 {
963 unsigned int begin, end, size;
964
965 size = VEC_length (btrace_insn_s, bfun->insn);
966 gdb_assert (size > 0);
967
968 begin = bfun->insn_offset;
969 end = begin + size - 1;
970
971 ui_out_field_uint (uiout, "insn begin", begin);
972 uiout->text (",");
973 ui_out_field_uint (uiout, "insn end", end);
974 }
975
976 /* Compute the lowest and highest source line for the instructions in BFUN
977 and return them in PBEGIN and PEND.
978 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
979 result from inlining or macro expansion. */
980
981 static void
982 btrace_compute_src_line_range (const struct btrace_function *bfun,
983 int *pbegin, int *pend)
984 {
985 struct btrace_insn *insn;
986 struct symtab *symtab;
987 struct symbol *sym;
988 unsigned int idx;
989 int begin, end;
990
991 begin = INT_MAX;
992 end = INT_MIN;
993
994 sym = bfun->sym;
995 if (sym == NULL)
996 goto out;
997
998 symtab = symbol_symtab (sym);
999
1000 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1001 {
1002 struct symtab_and_line sal;
1003
1004 sal = find_pc_line (insn->pc, 0);
1005 if (sal.symtab != symtab || sal.line == 0)
1006 continue;
1007
1008 begin = std::min (begin, sal.line);
1009 end = std::max (end, sal.line);
1010 }
1011
1012 out:
1013 *pbegin = begin;
1014 *pend = end;
1015 }
1016
1017 /* Print the source line information for a function call history line. */
1018
1019 static void
1020 btrace_call_history_src_line (struct ui_out *uiout,
1021 const struct btrace_function *bfun)
1022 {
1023 struct symbol *sym;
1024 int begin, end;
1025
1026 sym = bfun->sym;
1027 if (sym == NULL)
1028 return;
1029
1030 uiout->field_string ("file",
1031 symtab_to_filename_for_display (symbol_symtab (sym)));
1032
1033 btrace_compute_src_line_range (bfun, &begin, &end);
1034 if (end < begin)
1035 return;
1036
1037 uiout->text (":");
1038 uiout->field_int ("min line", begin);
1039
1040 if (end == begin)
1041 return;
1042
1043 uiout->text (",");
1044 uiout->field_int ("max line", end);
1045 }
1046
1047 /* Get the name of a branch trace function. */
1048
1049 static const char *
1050 btrace_get_bfun_name (const struct btrace_function *bfun)
1051 {
1052 struct minimal_symbol *msym;
1053 struct symbol *sym;
1054
1055 if (bfun == NULL)
1056 return "??";
1057
1058 msym = bfun->msym;
1059 sym = bfun->sym;
1060
1061 if (sym != NULL)
1062 return SYMBOL_PRINT_NAME (sym);
1063 else if (msym != NULL)
1064 return MSYMBOL_PRINT_NAME (msym);
1065 else
1066 return "??";
1067 }
1068
1069 /* Disassemble a section of the recorded function trace. */
1070
1071 static void
1072 btrace_call_history (struct ui_out *uiout,
1073 const struct btrace_thread_info *btinfo,
1074 const struct btrace_call_iterator *begin,
1075 const struct btrace_call_iterator *end,
1076 int int_flags)
1077 {
1078 struct btrace_call_iterator it;
1079 record_print_flags flags = (enum record_print_flag) int_flags;
1080
1081 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1082 btrace_call_number (end));
1083
1084 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1085 {
1086 const struct btrace_function *bfun;
1087 struct minimal_symbol *msym;
1088 struct symbol *sym;
1089
1090 bfun = btrace_call_get (&it);
1091 sym = bfun->sym;
1092 msym = bfun->msym;
1093
1094 /* Print the function index. */
1095 ui_out_field_uint (uiout, "index", bfun->number);
1096 uiout->text ("\t");
1097
1098 /* Indicate gaps in the trace. */
1099 if (bfun->errcode != 0)
1100 {
1101 const struct btrace_config *conf;
1102
1103 conf = btrace_conf (btinfo);
1104
1105 /* We have trace so we must have a configuration. */
1106 gdb_assert (conf != NULL);
1107
1108 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1109
1110 continue;
1111 }
1112
1113 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1114 {
1115 int level = bfun->level + btinfo->level, i;
1116
1117 for (i = 0; i < level; ++i)
1118 uiout->text (" ");
1119 }
1120
1121 if (sym != NULL)
1122 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1123 else if (msym != NULL)
1124 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1125 else if (!uiout->is_mi_like_p ())
1126 uiout->field_string ("function", "??");
1127
1128 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1129 {
1130 uiout->text (_("\tinst "));
1131 btrace_call_history_insn_range (uiout, bfun);
1132 }
1133
1134 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1135 {
1136 uiout->text (_("\tat "));
1137 btrace_call_history_src_line (uiout, bfun);
1138 }
1139
1140 uiout->text ("\n");
1141 }
1142 }
1143
1144 /* The to_call_history method of target record-btrace. */
1145
1146 static void
1147 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1148 {
1149 struct btrace_thread_info *btinfo;
1150 struct btrace_call_history *history;
1151 struct btrace_call_iterator begin, end;
1152 struct cleanup *uiout_cleanup;
1153 struct ui_out *uiout;
1154 unsigned int context, covered;
1155 record_print_flags flags = (enum record_print_flag) int_flags;
1156
1157 uiout = current_uiout;
1158 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1159 "insn history");
1160 context = abs (size);
1161 if (context == 0)
1162 error (_("Bad record function-call-history-size."));
1163
1164 btinfo = require_btrace ();
1165 history = btinfo->call_history;
1166 if (history == NULL)
1167 {
1168 struct btrace_insn_iterator *replay;
1169
1170 DEBUG ("call-history (0x%x): %d", int_flags, size);
1171
1172 /* If we're replaying, we start at the replay position. Otherwise, we
1173 start at the tail of the trace. */
1174 replay = btinfo->replay;
1175 if (replay != NULL)
1176 {
1177 begin.function = replay->function;
1178 begin.btinfo = btinfo;
1179 }
1180 else
1181 btrace_call_end (&begin, btinfo);
1182
1183 /* We start from here and expand in the requested direction. Then we
1184 expand in the other direction, as well, to fill up any remaining
1185 context. */
1186 end = begin;
1187 if (size < 0)
1188 {
1189 /* We want the current position covered, as well. */
1190 covered = btrace_call_next (&end, 1);
1191 covered += btrace_call_prev (&begin, context - covered);
1192 covered += btrace_call_next (&end, context - covered);
1193 }
1194 else
1195 {
1196 covered = btrace_call_next (&end, context);
1197 covered += btrace_call_prev (&begin, context- covered);
1198 }
1199 }
1200 else
1201 {
1202 begin = history->begin;
1203 end = history->end;
1204
1205 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1206 btrace_call_number (&begin), btrace_call_number (&end));
1207
1208 if (size < 0)
1209 {
1210 end = begin;
1211 covered = btrace_call_prev (&begin, context);
1212 }
1213 else
1214 {
1215 begin = end;
1216 covered = btrace_call_next (&end, context);
1217 }
1218 }
1219
1220 if (covered > 0)
1221 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1222 else
1223 {
1224 if (size < 0)
1225 printf_unfiltered (_("At the start of the branch trace record.\n"));
1226 else
1227 printf_unfiltered (_("At the end of the branch trace record.\n"));
1228 }
1229
1230 btrace_set_call_history (btinfo, &begin, &end);
1231 do_cleanups (uiout_cleanup);
1232 }
1233
1234 /* The to_call_history_range method of target record-btrace. */
1235
1236 static void
1237 record_btrace_call_history_range (struct target_ops *self,
1238 ULONGEST from, ULONGEST to,
1239 int int_flags)
1240 {
1241 struct btrace_thread_info *btinfo;
1242 struct btrace_call_history *history;
1243 struct btrace_call_iterator begin, end;
1244 struct cleanup *uiout_cleanup;
1245 struct ui_out *uiout;
1246 unsigned int low, high;
1247 int found;
1248 record_print_flags flags = (enum record_print_flag) int_flags;
1249
1250 uiout = current_uiout;
1251 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1252 "func history");
1253 low = from;
1254 high = to;
1255
1256 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1257
1258 /* Check for wrap-arounds. */
1259 if (low != from || high != to)
1260 error (_("Bad range."));
1261
1262 if (high < low)
1263 error (_("Bad range."));
1264
1265 btinfo = require_btrace ();
1266
1267 found = btrace_find_call_by_number (&begin, btinfo, low);
1268 if (found == 0)
1269 error (_("Range out of bounds."));
1270
1271 found = btrace_find_call_by_number (&end, btinfo, high);
1272 if (found == 0)
1273 {
1274 /* Silently truncate the range. */
1275 btrace_call_end (&end, btinfo);
1276 }
1277 else
1278 {
1279 /* We want both begin and end to be inclusive. */
1280 btrace_call_next (&end, 1);
1281 }
1282
1283 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1284 btrace_set_call_history (btinfo, &begin, &end);
1285
1286 do_cleanups (uiout_cleanup);
1287 }
1288
1289 /* The to_call_history_from method of target record-btrace. */
1290
1291 static void
1292 record_btrace_call_history_from (struct target_ops *self,
1293 ULONGEST from, int size,
1294 int int_flags)
1295 {
1296 ULONGEST begin, end, context;
1297 record_print_flags flags = (enum record_print_flag) int_flags;
1298
1299 context = abs (size);
1300 if (context == 0)
1301 error (_("Bad record function-call-history-size."));
1302
1303 if (size < 0)
1304 {
1305 end = from;
1306
1307 if (from < context)
1308 begin = 0;
1309 else
1310 begin = from - context + 1;
1311 }
1312 else
1313 {
1314 begin = from;
1315 end = from + context - 1;
1316
1317 /* Check for wrap-around. */
1318 if (end < begin)
1319 end = ULONGEST_MAX;
1320 }
1321
1322 record_btrace_call_history_range (self, begin, end, flags);
1323 }
1324
1325 /* The to_record_is_replaying method of target record-btrace. */
1326
1327 static int
1328 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1329 {
1330 struct thread_info *tp;
1331
1332 ALL_NON_EXITED_THREADS (tp)
1333 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1334 return 1;
1335
1336 return 0;
1337 }
1338
1339 /* The to_record_will_replay method of target record-btrace. */
1340
1341 static int
1342 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1343 {
1344 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1345 }
1346
1347 /* The to_xfer_partial method of target record-btrace. */
1348
1349 static enum target_xfer_status
1350 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1351 const char *annex, gdb_byte *readbuf,
1352 const gdb_byte *writebuf, ULONGEST offset,
1353 ULONGEST len, ULONGEST *xfered_len)
1354 {
1355 struct target_ops *t;
1356
1357 /* Filter out requests that don't make sense during replay. */
1358 if (replay_memory_access == replay_memory_access_read_only
1359 && !record_btrace_generating_corefile
1360 && record_btrace_is_replaying (ops, inferior_ptid))
1361 {
1362 switch (object)
1363 {
1364 case TARGET_OBJECT_MEMORY:
1365 {
1366 struct target_section *section;
1367
1368 /* We do not allow writing memory in general. */
1369 if (writebuf != NULL)
1370 {
1371 *xfered_len = len;
1372 return TARGET_XFER_UNAVAILABLE;
1373 }
1374
1375 /* We allow reading readonly memory. */
1376 section = target_section_by_addr (ops, offset);
1377 if (section != NULL)
1378 {
1379 /* Check if the section we found is readonly. */
1380 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1381 section->the_bfd_section)
1382 & SEC_READONLY) != 0)
1383 {
1384 /* Truncate the request to fit into this section. */
1385 len = std::min (len, section->endaddr - offset);
1386 break;
1387 }
1388 }
1389
1390 *xfered_len = len;
1391 return TARGET_XFER_UNAVAILABLE;
1392 }
1393 }
1394 }
1395
1396 /* Forward the request. */
1397 ops = ops->beneath;
1398 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1399 offset, len, xfered_len);
1400 }
1401
1402 /* The to_insert_breakpoint method of target record-btrace. */
1403
1404 static int
1405 record_btrace_insert_breakpoint (struct target_ops *ops,
1406 struct gdbarch *gdbarch,
1407 struct bp_target_info *bp_tgt)
1408 {
1409 const char *old;
1410 int ret;
1411
1412 /* Inserting breakpoints requires accessing memory. Allow it for the
1413 duration of this function. */
1414 old = replay_memory_access;
1415 replay_memory_access = replay_memory_access_read_write;
1416
1417 ret = 0;
1418 TRY
1419 {
1420 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1421 }
1422 CATCH (except, RETURN_MASK_ALL)
1423 {
1424 replay_memory_access = old;
1425 throw_exception (except);
1426 }
1427 END_CATCH
1428 replay_memory_access = old;
1429
1430 return ret;
1431 }
1432
1433 /* The to_remove_breakpoint method of target record-btrace. */
1434
1435 static int
1436 record_btrace_remove_breakpoint (struct target_ops *ops,
1437 struct gdbarch *gdbarch,
1438 struct bp_target_info *bp_tgt,
1439 enum remove_bp_reason reason)
1440 {
1441 const char *old;
1442 int ret;
1443
1444 /* Removing breakpoints requires accessing memory. Allow it for the
1445 duration of this function. */
1446 old = replay_memory_access;
1447 replay_memory_access = replay_memory_access_read_write;
1448
1449 ret = 0;
1450 TRY
1451 {
1452 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1453 reason);
1454 }
1455 CATCH (except, RETURN_MASK_ALL)
1456 {
1457 replay_memory_access = old;
1458 throw_exception (except);
1459 }
1460 END_CATCH
1461 replay_memory_access = old;
1462
1463 return ret;
1464 }
1465
1466 /* The to_fetch_registers method of target record-btrace. */
1467
1468 static void
1469 record_btrace_fetch_registers (struct target_ops *ops,
1470 struct regcache *regcache, int regno)
1471 {
1472 struct btrace_insn_iterator *replay;
1473 struct thread_info *tp;
1474
1475 tp = find_thread_ptid (inferior_ptid);
1476 gdb_assert (tp != NULL);
1477
1478 replay = tp->btrace.replay;
1479 if (replay != NULL && !record_btrace_generating_corefile)
1480 {
1481 const struct btrace_insn *insn;
1482 struct gdbarch *gdbarch;
1483 int pcreg;
1484
1485 gdbarch = get_regcache_arch (regcache);
1486 pcreg = gdbarch_pc_regnum (gdbarch);
1487 if (pcreg < 0)
1488 return;
1489
1490 /* We can only provide the PC register. */
1491 if (regno >= 0 && regno != pcreg)
1492 return;
1493
1494 insn = btrace_insn_get (replay);
1495 gdb_assert (insn != NULL);
1496
1497 regcache_raw_supply (regcache, regno, &insn->pc);
1498 }
1499 else
1500 {
1501 struct target_ops *t = ops->beneath;
1502
1503 t->to_fetch_registers (t, regcache, regno);
1504 }
1505 }
1506
1507 /* The to_store_registers method of target record-btrace. */
1508
1509 static void
1510 record_btrace_store_registers (struct target_ops *ops,
1511 struct regcache *regcache, int regno)
1512 {
1513 struct target_ops *t;
1514
1515 if (!record_btrace_generating_corefile
1516 && record_btrace_is_replaying (ops, inferior_ptid))
1517 error (_("Cannot write registers while replaying."));
1518
1519 gdb_assert (may_write_registers != 0);
1520
1521 t = ops->beneath;
1522 t->to_store_registers (t, regcache, regno);
1523 }
1524
1525 /* The to_prepare_to_store method of target record-btrace. */
1526
1527 static void
1528 record_btrace_prepare_to_store (struct target_ops *ops,
1529 struct regcache *regcache)
1530 {
1531 struct target_ops *t;
1532
1533 if (!record_btrace_generating_corefile
1534 && record_btrace_is_replaying (ops, inferior_ptid))
1535 return;
1536
1537 t = ops->beneath;
1538 t->to_prepare_to_store (t, regcache);
1539 }
1540
1541 /* The branch trace frame cache. */
1542
1543 struct btrace_frame_cache
1544 {
1545 /* The thread. */
1546 struct thread_info *tp;
1547
1548 /* The frame info. */
1549 struct frame_info *frame;
1550
1551 /* The branch trace function segment. */
1552 const struct btrace_function *bfun;
1553 };
1554
1555 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1556
1557 static htab_t bfcache;
1558
1559 /* hash_f for htab_create_alloc of bfcache. */
1560
1561 static hashval_t
1562 bfcache_hash (const void *arg)
1563 {
1564 const struct btrace_frame_cache *cache
1565 = (const struct btrace_frame_cache *) arg;
1566
1567 return htab_hash_pointer (cache->frame);
1568 }
1569
1570 /* eq_f for htab_create_alloc of bfcache. */
1571
1572 static int
1573 bfcache_eq (const void *arg1, const void *arg2)
1574 {
1575 const struct btrace_frame_cache *cache1
1576 = (const struct btrace_frame_cache *) arg1;
1577 const struct btrace_frame_cache *cache2
1578 = (const struct btrace_frame_cache *) arg2;
1579
1580 return cache1->frame == cache2->frame;
1581 }
1582
1583 /* Create a new btrace frame cache. */
1584
1585 static struct btrace_frame_cache *
1586 bfcache_new (struct frame_info *frame)
1587 {
1588 struct btrace_frame_cache *cache;
1589 void **slot;
1590
1591 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1592 cache->frame = frame;
1593
1594 slot = htab_find_slot (bfcache, cache, INSERT);
1595 gdb_assert (*slot == NULL);
1596 *slot = cache;
1597
1598 return cache;
1599 }
1600
1601 /* Extract the branch trace function from a branch trace frame. */
1602
1603 static const struct btrace_function *
1604 btrace_get_frame_function (struct frame_info *frame)
1605 {
1606 const struct btrace_frame_cache *cache;
1607 const struct btrace_function *bfun;
1608 struct btrace_frame_cache pattern;
1609 void **slot;
1610
1611 pattern.frame = frame;
1612
1613 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1614 if (slot == NULL)
1615 return NULL;
1616
1617 cache = (const struct btrace_frame_cache *) *slot;
1618 return cache->bfun;
1619 }
1620
1621 /* Implement stop_reason method for record_btrace_frame_unwind. */
1622
1623 static enum unwind_stop_reason
1624 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1625 void **this_cache)
1626 {
1627 const struct btrace_frame_cache *cache;
1628 const struct btrace_function *bfun;
1629
1630 cache = (const struct btrace_frame_cache *) *this_cache;
1631 bfun = cache->bfun;
1632 gdb_assert (bfun != NULL);
1633
1634 if (bfun->up == NULL)
1635 return UNWIND_UNAVAILABLE;
1636
1637 return UNWIND_NO_REASON;
1638 }
1639
1640 /* Implement this_id method for record_btrace_frame_unwind. */
1641
1642 static void
1643 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1644 struct frame_id *this_id)
1645 {
1646 const struct btrace_frame_cache *cache;
1647 const struct btrace_function *bfun;
1648 CORE_ADDR code, special;
1649
1650 cache = (const struct btrace_frame_cache *) *this_cache;
1651
1652 bfun = cache->bfun;
1653 gdb_assert (bfun != NULL);
1654
1655 while (bfun->segment.prev != NULL)
1656 bfun = bfun->segment.prev;
1657
1658 code = get_frame_func (this_frame);
1659 special = bfun->number;
1660
1661 *this_id = frame_id_build_unavailable_stack_special (code, special);
1662
1663 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1664 btrace_get_bfun_name (cache->bfun),
1665 core_addr_to_string_nz (this_id->code_addr),
1666 core_addr_to_string_nz (this_id->special_addr));
1667 }
1668
1669 /* Implement prev_register method for record_btrace_frame_unwind. */
1670
1671 static struct value *
1672 record_btrace_frame_prev_register (struct frame_info *this_frame,
1673 void **this_cache,
1674 int regnum)
1675 {
1676 const struct btrace_frame_cache *cache;
1677 const struct btrace_function *bfun, *caller;
1678 const struct btrace_insn *insn;
1679 struct gdbarch *gdbarch;
1680 CORE_ADDR pc;
1681 int pcreg;
1682
1683 gdbarch = get_frame_arch (this_frame);
1684 pcreg = gdbarch_pc_regnum (gdbarch);
1685 if (pcreg < 0 || regnum != pcreg)
1686 throw_error (NOT_AVAILABLE_ERROR,
1687 _("Registers are not available in btrace record history"));
1688
1689 cache = (const struct btrace_frame_cache *) *this_cache;
1690 bfun = cache->bfun;
1691 gdb_assert (bfun != NULL);
1692
1693 caller = bfun->up;
1694 if (caller == NULL)
1695 throw_error (NOT_AVAILABLE_ERROR,
1696 _("No caller in btrace record history"));
1697
1698 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1699 {
1700 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1701 pc = insn->pc;
1702 }
1703 else
1704 {
1705 insn = VEC_last (btrace_insn_s, caller->insn);
1706 pc = insn->pc;
1707
1708 pc += gdb_insn_length (gdbarch, pc);
1709 }
1710
1711 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1712 btrace_get_bfun_name (bfun), bfun->level,
1713 core_addr_to_string_nz (pc));
1714
1715 return frame_unwind_got_address (this_frame, regnum, pc);
1716 }
1717
1718 /* Implement sniffer method for record_btrace_frame_unwind. */
1719
1720 static int
1721 record_btrace_frame_sniffer (const struct frame_unwind *self,
1722 struct frame_info *this_frame,
1723 void **this_cache)
1724 {
1725 const struct btrace_function *bfun;
1726 struct btrace_frame_cache *cache;
1727 struct thread_info *tp;
1728 struct frame_info *next;
1729
1730 /* THIS_FRAME does not contain a reference to its thread. */
1731 tp = find_thread_ptid (inferior_ptid);
1732 gdb_assert (tp != NULL);
1733
1734 bfun = NULL;
1735 next = get_next_frame (this_frame);
1736 if (next == NULL)
1737 {
1738 const struct btrace_insn_iterator *replay;
1739
1740 replay = tp->btrace.replay;
1741 if (replay != NULL)
1742 bfun = replay->function;
1743 }
1744 else
1745 {
1746 const struct btrace_function *callee;
1747
1748 callee = btrace_get_frame_function (next);
1749 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1750 bfun = callee->up;
1751 }
1752
1753 if (bfun == NULL)
1754 return 0;
1755
1756 DEBUG ("[frame] sniffed frame for %s on level %d",
1757 btrace_get_bfun_name (bfun), bfun->level);
1758
1759 /* This is our frame. Initialize the frame cache. */
1760 cache = bfcache_new (this_frame);
1761 cache->tp = tp;
1762 cache->bfun = bfun;
1763
1764 *this_cache = cache;
1765 return 1;
1766 }
1767
1768 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1769
1770 static int
1771 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1772 struct frame_info *this_frame,
1773 void **this_cache)
1774 {
1775 const struct btrace_function *bfun, *callee;
1776 struct btrace_frame_cache *cache;
1777 struct frame_info *next;
1778
1779 next = get_next_frame (this_frame);
1780 if (next == NULL)
1781 return 0;
1782
1783 callee = btrace_get_frame_function (next);
1784 if (callee == NULL)
1785 return 0;
1786
1787 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1788 return 0;
1789
1790 bfun = callee->up;
1791 if (bfun == NULL)
1792 return 0;
1793
1794 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1795 btrace_get_bfun_name (bfun), bfun->level);
1796
1797 /* This is our frame. Initialize the frame cache. */
1798 cache = bfcache_new (this_frame);
1799 cache->tp = find_thread_ptid (inferior_ptid);
1800 cache->bfun = bfun;
1801
1802 *this_cache = cache;
1803 return 1;
1804 }
1805
1806 static void
1807 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1808 {
1809 struct btrace_frame_cache *cache;
1810 void **slot;
1811
1812 cache = (struct btrace_frame_cache *) this_cache;
1813
1814 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1815 gdb_assert (slot != NULL);
1816
1817 htab_remove_elt (bfcache, cache);
1818 }
1819
1820 /* btrace recording does not store previous memory content, neither the stack
1821 frames content. Any unwinding would return errorneous results as the stack
1822 contents no longer matches the changed PC value restored from history.
1823 Therefore this unwinder reports any possibly unwound registers as
1824 <unavailable>. */
1825
1826 const struct frame_unwind record_btrace_frame_unwind =
1827 {
1828 NORMAL_FRAME,
1829 record_btrace_frame_unwind_stop_reason,
1830 record_btrace_frame_this_id,
1831 record_btrace_frame_prev_register,
1832 NULL,
1833 record_btrace_frame_sniffer,
1834 record_btrace_frame_dealloc_cache
1835 };
1836
1837 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1838 {
1839 TAILCALL_FRAME,
1840 record_btrace_frame_unwind_stop_reason,
1841 record_btrace_frame_this_id,
1842 record_btrace_frame_prev_register,
1843 NULL,
1844 record_btrace_tailcall_frame_sniffer,
1845 record_btrace_frame_dealloc_cache
1846 };
1847
1848 /* Implement the to_get_unwinder method. */
1849
1850 static const struct frame_unwind *
1851 record_btrace_to_get_unwinder (struct target_ops *self)
1852 {
1853 return &record_btrace_frame_unwind;
1854 }
1855
1856 /* Implement the to_get_tailcall_unwinder method. */
1857
1858 static const struct frame_unwind *
1859 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1860 {
1861 return &record_btrace_tailcall_frame_unwind;
1862 }
1863
1864 /* Return a human-readable string for FLAG. */
1865
1866 static const char *
1867 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1868 {
1869 switch (flag)
1870 {
1871 case BTHR_STEP:
1872 return "step";
1873
1874 case BTHR_RSTEP:
1875 return "reverse-step";
1876
1877 case BTHR_CONT:
1878 return "cont";
1879
1880 case BTHR_RCONT:
1881 return "reverse-cont";
1882
1883 case BTHR_STOP:
1884 return "stop";
1885 }
1886
1887 return "<invalid>";
1888 }
1889
1890 /* Indicate that TP should be resumed according to FLAG. */
1891
1892 static void
1893 record_btrace_resume_thread (struct thread_info *tp,
1894 enum btrace_thread_flag flag)
1895 {
1896 struct btrace_thread_info *btinfo;
1897
1898 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1899 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1900
1901 btinfo = &tp->btrace;
1902
1903 /* Fetch the latest branch trace. */
1904 btrace_fetch (tp);
1905
1906 /* A resume request overwrites a preceding resume or stop request. */
1907 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1908 btinfo->flags |= flag;
1909 }
1910
1911 /* Get the current frame for TP. */
1912
1913 static struct frame_info *
1914 get_thread_current_frame (struct thread_info *tp)
1915 {
1916 struct frame_info *frame;
1917 ptid_t old_inferior_ptid;
1918 int executing;
1919
1920 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1921 old_inferior_ptid = inferior_ptid;
1922 inferior_ptid = tp->ptid;
1923
1924 /* Clear the executing flag to allow changes to the current frame.
1925 We are not actually running, yet. We just started a reverse execution
1926 command or a record goto command.
1927 For the latter, EXECUTING is false and this has no effect.
1928 For the former, EXECUTING is true and we're in to_wait, about to
1929 move the thread. Since we need to recompute the stack, we temporarily
1930 set EXECUTING to flase. */
1931 executing = is_executing (inferior_ptid);
1932 set_executing (inferior_ptid, 0);
1933
1934 frame = NULL;
1935 TRY
1936 {
1937 frame = get_current_frame ();
1938 }
1939 CATCH (except, RETURN_MASK_ALL)
1940 {
1941 /* Restore the previous execution state. */
1942 set_executing (inferior_ptid, executing);
1943
1944 /* Restore the previous inferior_ptid. */
1945 inferior_ptid = old_inferior_ptid;
1946
1947 throw_exception (except);
1948 }
1949 END_CATCH
1950
1951 /* Restore the previous execution state. */
1952 set_executing (inferior_ptid, executing);
1953
1954 /* Restore the previous inferior_ptid. */
1955 inferior_ptid = old_inferior_ptid;
1956
1957 return frame;
1958 }
1959
1960 /* Start replaying a thread. */
1961
1962 static struct btrace_insn_iterator *
1963 record_btrace_start_replaying (struct thread_info *tp)
1964 {
1965 struct btrace_insn_iterator *replay;
1966 struct btrace_thread_info *btinfo;
1967
1968 btinfo = &tp->btrace;
1969 replay = NULL;
1970
1971 /* We can't start replaying without trace. */
1972 if (btinfo->begin == NULL)
1973 return NULL;
1974
1975 /* GDB stores the current frame_id when stepping in order to detects steps
1976 into subroutines.
1977 Since frames are computed differently when we're replaying, we need to
1978 recompute those stored frames and fix them up so we can still detect
1979 subroutines after we started replaying. */
1980 TRY
1981 {
1982 struct frame_info *frame;
1983 struct frame_id frame_id;
1984 int upd_step_frame_id, upd_step_stack_frame_id;
1985
1986 /* The current frame without replaying - computed via normal unwind. */
1987 frame = get_thread_current_frame (tp);
1988 frame_id = get_frame_id (frame);
1989
1990 /* Check if we need to update any stepping-related frame id's. */
1991 upd_step_frame_id = frame_id_eq (frame_id,
1992 tp->control.step_frame_id);
1993 upd_step_stack_frame_id = frame_id_eq (frame_id,
1994 tp->control.step_stack_frame_id);
1995
1996 /* We start replaying at the end of the branch trace. This corresponds
1997 to the current instruction. */
1998 replay = XNEW (struct btrace_insn_iterator);
1999 btrace_insn_end (replay, btinfo);
2000
2001 /* Skip gaps at the end of the trace. */
2002 while (btrace_insn_get (replay) == NULL)
2003 {
2004 unsigned int steps;
2005
2006 steps = btrace_insn_prev (replay, 1);
2007 if (steps == 0)
2008 error (_("No trace."));
2009 }
2010
2011 /* We're not replaying, yet. */
2012 gdb_assert (btinfo->replay == NULL);
2013 btinfo->replay = replay;
2014
2015 /* Make sure we're not using any stale registers. */
2016 registers_changed_ptid (tp->ptid);
2017
2018 /* The current frame with replaying - computed via btrace unwind. */
2019 frame = get_thread_current_frame (tp);
2020 frame_id = get_frame_id (frame);
2021
2022 /* Replace stepping related frames where necessary. */
2023 if (upd_step_frame_id)
2024 tp->control.step_frame_id = frame_id;
2025 if (upd_step_stack_frame_id)
2026 tp->control.step_stack_frame_id = frame_id;
2027 }
2028 CATCH (except, RETURN_MASK_ALL)
2029 {
2030 xfree (btinfo->replay);
2031 btinfo->replay = NULL;
2032
2033 registers_changed_ptid (tp->ptid);
2034
2035 throw_exception (except);
2036 }
2037 END_CATCH
2038
2039 return replay;
2040 }
2041
2042 /* Stop replaying a thread. */
2043
2044 static void
2045 record_btrace_stop_replaying (struct thread_info *tp)
2046 {
2047 struct btrace_thread_info *btinfo;
2048
2049 btinfo = &tp->btrace;
2050
2051 xfree (btinfo->replay);
2052 btinfo->replay = NULL;
2053
2054 /* Make sure we're not leaving any stale registers. */
2055 registers_changed_ptid (tp->ptid);
2056 }
2057
2058 /* Stop replaying TP if it is at the end of its execution history. */
2059
2060 static void
2061 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2062 {
2063 struct btrace_insn_iterator *replay, end;
2064 struct btrace_thread_info *btinfo;
2065
2066 btinfo = &tp->btrace;
2067 replay = btinfo->replay;
2068
2069 if (replay == NULL)
2070 return;
2071
2072 btrace_insn_end (&end, btinfo);
2073
2074 if (btrace_insn_cmp (replay, &end) == 0)
2075 record_btrace_stop_replaying (tp);
2076 }
2077
2078 /* The to_resume method of target record-btrace. */
2079
2080 static void
2081 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2082 enum gdb_signal signal)
2083 {
2084 struct thread_info *tp;
2085 enum btrace_thread_flag flag, cflag;
2086
2087 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2088 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2089 step ? "step" : "cont");
2090
2091 /* Store the execution direction of the last resume.
2092
2093 If there is more than one to_resume call, we have to rely on infrun
2094 to not change the execution direction in-between. */
2095 record_btrace_resume_exec_dir = execution_direction;
2096
2097 /* As long as we're not replaying, just forward the request.
2098
2099 For non-stop targets this means that no thread is replaying. In order to
2100 make progress, we may need to explicitly move replaying threads to the end
2101 of their execution history. */
2102 if ((execution_direction != EXEC_REVERSE)
2103 && !record_btrace_is_replaying (ops, minus_one_ptid))
2104 {
2105 ops = ops->beneath;
2106 ops->to_resume (ops, ptid, step, signal);
2107 return;
2108 }
2109
2110 /* Compute the btrace thread flag for the requested move. */
2111 if (execution_direction == EXEC_REVERSE)
2112 {
2113 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2114 cflag = BTHR_RCONT;
2115 }
2116 else
2117 {
2118 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2119 cflag = BTHR_CONT;
2120 }
2121
2122 /* We just indicate the resume intent here. The actual stepping happens in
2123 record_btrace_wait below.
2124
2125 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2126 if (!target_is_non_stop_p ())
2127 {
2128 gdb_assert (ptid_match (inferior_ptid, ptid));
2129
2130 ALL_NON_EXITED_THREADS (tp)
2131 if (ptid_match (tp->ptid, ptid))
2132 {
2133 if (ptid_match (tp->ptid, inferior_ptid))
2134 record_btrace_resume_thread (tp, flag);
2135 else
2136 record_btrace_resume_thread (tp, cflag);
2137 }
2138 }
2139 else
2140 {
2141 ALL_NON_EXITED_THREADS (tp)
2142 if (ptid_match (tp->ptid, ptid))
2143 record_btrace_resume_thread (tp, flag);
2144 }
2145
2146 /* Async support. */
2147 if (target_can_async_p ())
2148 {
2149 target_async (1);
2150 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2151 }
2152 }
2153
2154 /* The to_commit_resume method of target record-btrace. */
2155
2156 static void
2157 record_btrace_commit_resume (struct target_ops *ops)
2158 {
2159 if ((execution_direction != EXEC_REVERSE)
2160 && !record_btrace_is_replaying (ops, minus_one_ptid))
2161 ops->beneath->to_commit_resume (ops->beneath);
2162 }
2163
2164 /* Cancel resuming TP. */
2165
2166 static void
2167 record_btrace_cancel_resume (struct thread_info *tp)
2168 {
2169 enum btrace_thread_flag flags;
2170
2171 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2172 if (flags == 0)
2173 return;
2174
2175 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2176 print_thread_id (tp),
2177 target_pid_to_str (tp->ptid), flags,
2178 btrace_thread_flag_to_str (flags));
2179
2180 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2181 record_btrace_stop_replaying_at_end (tp);
2182 }
2183
2184 /* Return a target_waitstatus indicating that we ran out of history. */
2185
2186 static struct target_waitstatus
2187 btrace_step_no_history (void)
2188 {
2189 struct target_waitstatus status;
2190
2191 status.kind = TARGET_WAITKIND_NO_HISTORY;
2192
2193 return status;
2194 }
2195
2196 /* Return a target_waitstatus indicating that a step finished. */
2197
2198 static struct target_waitstatus
2199 btrace_step_stopped (void)
2200 {
2201 struct target_waitstatus status;
2202
2203 status.kind = TARGET_WAITKIND_STOPPED;
2204 status.value.sig = GDB_SIGNAL_TRAP;
2205
2206 return status;
2207 }
2208
2209 /* Return a target_waitstatus indicating that a thread was stopped as
2210 requested. */
2211
2212 static struct target_waitstatus
2213 btrace_step_stopped_on_request (void)
2214 {
2215 struct target_waitstatus status;
2216
2217 status.kind = TARGET_WAITKIND_STOPPED;
2218 status.value.sig = GDB_SIGNAL_0;
2219
2220 return status;
2221 }
2222
2223 /* Return a target_waitstatus indicating a spurious stop. */
2224
2225 static struct target_waitstatus
2226 btrace_step_spurious (void)
2227 {
2228 struct target_waitstatus status;
2229
2230 status.kind = TARGET_WAITKIND_SPURIOUS;
2231
2232 return status;
2233 }
2234
2235 /* Return a target_waitstatus indicating that the thread was not resumed. */
2236
2237 static struct target_waitstatus
2238 btrace_step_no_resumed (void)
2239 {
2240 struct target_waitstatus status;
2241
2242 status.kind = TARGET_WAITKIND_NO_RESUMED;
2243
2244 return status;
2245 }
2246
2247 /* Return a target_waitstatus indicating that we should wait again. */
2248
2249 static struct target_waitstatus
2250 btrace_step_again (void)
2251 {
2252 struct target_waitstatus status;
2253
2254 status.kind = TARGET_WAITKIND_IGNORE;
2255
2256 return status;
2257 }
2258
2259 /* Clear the record histories. */
2260
2261 static void
2262 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2263 {
2264 xfree (btinfo->insn_history);
2265 xfree (btinfo->call_history);
2266
2267 btinfo->insn_history = NULL;
2268 btinfo->call_history = NULL;
2269 }
2270
2271 /* Check whether TP's current replay position is at a breakpoint. */
2272
2273 static int
2274 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2275 {
2276 struct btrace_insn_iterator *replay;
2277 struct btrace_thread_info *btinfo;
2278 const struct btrace_insn *insn;
2279 struct inferior *inf;
2280
2281 btinfo = &tp->btrace;
2282 replay = btinfo->replay;
2283
2284 if (replay == NULL)
2285 return 0;
2286
2287 insn = btrace_insn_get (replay);
2288 if (insn == NULL)
2289 return 0;
2290
2291 inf = find_inferior_ptid (tp->ptid);
2292 if (inf == NULL)
2293 return 0;
2294
2295 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2296 &btinfo->stop_reason);
2297 }
2298
2299 /* Step one instruction in forward direction. */
2300
2301 static struct target_waitstatus
2302 record_btrace_single_step_forward (struct thread_info *tp)
2303 {
2304 struct btrace_insn_iterator *replay, end, start;
2305 struct btrace_thread_info *btinfo;
2306
2307 btinfo = &tp->btrace;
2308 replay = btinfo->replay;
2309
2310 /* We're done if we're not replaying. */
2311 if (replay == NULL)
2312 return btrace_step_no_history ();
2313
2314 /* Check if we're stepping a breakpoint. */
2315 if (record_btrace_replay_at_breakpoint (tp))
2316 return btrace_step_stopped ();
2317
2318 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2319 jump back to the instruction at which we started. */
2320 start = *replay;
2321 do
2322 {
2323 unsigned int steps;
2324
2325 /* We will bail out here if we continue stepping after reaching the end
2326 of the execution history. */
2327 steps = btrace_insn_next (replay, 1);
2328 if (steps == 0)
2329 {
2330 *replay = start;
2331 return btrace_step_no_history ();
2332 }
2333 }
2334 while (btrace_insn_get (replay) == NULL);
2335
2336 /* Determine the end of the instruction trace. */
2337 btrace_insn_end (&end, btinfo);
2338
2339 /* The execution trace contains (and ends with) the current instruction.
2340 This instruction has not been executed, yet, so the trace really ends
2341 one instruction earlier. */
2342 if (btrace_insn_cmp (replay, &end) == 0)
2343 return btrace_step_no_history ();
2344
2345 return btrace_step_spurious ();
2346 }
2347
2348 /* Step one instruction in backward direction. */
2349
2350 static struct target_waitstatus
2351 record_btrace_single_step_backward (struct thread_info *tp)
2352 {
2353 struct btrace_insn_iterator *replay, start;
2354 struct btrace_thread_info *btinfo;
2355
2356 btinfo = &tp->btrace;
2357 replay = btinfo->replay;
2358
2359 /* Start replaying if we're not already doing so. */
2360 if (replay == NULL)
2361 replay = record_btrace_start_replaying (tp);
2362
2363 /* If we can't step any further, we reached the end of the history.
2364 Skip gaps during replay. If we end up at a gap (at the beginning of
2365 the trace), jump back to the instruction at which we started. */
2366 start = *replay;
2367 do
2368 {
2369 unsigned int steps;
2370
2371 steps = btrace_insn_prev (replay, 1);
2372 if (steps == 0)
2373 {
2374 *replay = start;
2375 return btrace_step_no_history ();
2376 }
2377 }
2378 while (btrace_insn_get (replay) == NULL);
2379
2380 /* Check if we're stepping a breakpoint.
2381
2382 For reverse-stepping, this check is after the step. There is logic in
2383 infrun.c that handles reverse-stepping separately. See, for example,
2384 proceed and adjust_pc_after_break.
2385
2386 This code assumes that for reverse-stepping, PC points to the last
2387 de-executed instruction, whereas for forward-stepping PC points to the
2388 next to-be-executed instruction. */
2389 if (record_btrace_replay_at_breakpoint (tp))
2390 return btrace_step_stopped ();
2391
2392 return btrace_step_spurious ();
2393 }
2394
2395 /* Step a single thread. */
2396
2397 static struct target_waitstatus
2398 record_btrace_step_thread (struct thread_info *tp)
2399 {
2400 struct btrace_thread_info *btinfo;
2401 struct target_waitstatus status;
2402 enum btrace_thread_flag flags;
2403
2404 btinfo = &tp->btrace;
2405
2406 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2407 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2408
2409 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2410 target_pid_to_str (tp->ptid), flags,
2411 btrace_thread_flag_to_str (flags));
2412
2413 /* We can't step without an execution history. */
2414 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2415 return btrace_step_no_history ();
2416
2417 switch (flags)
2418 {
2419 default:
2420 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2421
2422 case BTHR_STOP:
2423 return btrace_step_stopped_on_request ();
2424
2425 case BTHR_STEP:
2426 status = record_btrace_single_step_forward (tp);
2427 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2428 break;
2429
2430 return btrace_step_stopped ();
2431
2432 case BTHR_RSTEP:
2433 status = record_btrace_single_step_backward (tp);
2434 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2435 break;
2436
2437 return btrace_step_stopped ();
2438
2439 case BTHR_CONT:
2440 status = record_btrace_single_step_forward (tp);
2441 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2442 break;
2443
2444 btinfo->flags |= flags;
2445 return btrace_step_again ();
2446
2447 case BTHR_RCONT:
2448 status = record_btrace_single_step_backward (tp);
2449 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2450 break;
2451
2452 btinfo->flags |= flags;
2453 return btrace_step_again ();
2454 }
2455
2456 /* We keep threads moving at the end of their execution history. The to_wait
2457 method will stop the thread for whom the event is reported. */
2458 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2459 btinfo->flags |= flags;
2460
2461 return status;
2462 }
2463
2464 /* A vector of threads. */
2465
2466 typedef struct thread_info * tp_t;
2467 DEF_VEC_P (tp_t);
2468
2469 /* Announce further events if necessary. */
2470
2471 static void
2472 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2473 const VEC (tp_t) *no_history)
2474 {
2475 int more_moving, more_no_history;
2476
2477 more_moving = !VEC_empty (tp_t, moving);
2478 more_no_history = !VEC_empty (tp_t, no_history);
2479
2480 if (!more_moving && !more_no_history)
2481 return;
2482
2483 if (more_moving)
2484 DEBUG ("movers pending");
2485
2486 if (more_no_history)
2487 DEBUG ("no-history pending");
2488
2489 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2490 }
2491
2492 /* The to_wait method of target record-btrace. */
2493
2494 static ptid_t
2495 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2496 struct target_waitstatus *status, int options)
2497 {
2498 VEC (tp_t) *moving, *no_history;
2499 struct thread_info *tp, *eventing;
2500 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2501
2502 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2503
2504 /* As long as we're not replaying, just forward the request. */
2505 if ((execution_direction != EXEC_REVERSE)
2506 && !record_btrace_is_replaying (ops, minus_one_ptid))
2507 {
2508 ops = ops->beneath;
2509 return ops->to_wait (ops, ptid, status, options);
2510 }
2511
2512 moving = NULL;
2513 no_history = NULL;
2514
2515 make_cleanup (VEC_cleanup (tp_t), &moving);
2516 make_cleanup (VEC_cleanup (tp_t), &no_history);
2517
2518 /* Keep a work list of moving threads. */
2519 ALL_NON_EXITED_THREADS (tp)
2520 if (ptid_match (tp->ptid, ptid)
2521 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2522 VEC_safe_push (tp_t, moving, tp);
2523
2524 if (VEC_empty (tp_t, moving))
2525 {
2526 *status = btrace_step_no_resumed ();
2527
2528 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2529 target_waitstatus_to_string (status));
2530
2531 do_cleanups (cleanups);
2532 return null_ptid;
2533 }
2534
2535 /* Step moving threads one by one, one step each, until either one thread
2536 reports an event or we run out of threads to step.
2537
2538 When stepping more than one thread, chances are that some threads reach
2539 the end of their execution history earlier than others. If we reported
2540 this immediately, all-stop on top of non-stop would stop all threads and
2541 resume the same threads next time. And we would report the same thread
2542 having reached the end of its execution history again.
2543
2544 In the worst case, this would starve the other threads. But even if other
2545 threads would be allowed to make progress, this would result in far too
2546 many intermediate stops.
2547
2548 We therefore delay the reporting of "no execution history" until we have
2549 nothing else to report. By this time, all threads should have moved to
2550 either the beginning or the end of their execution history. There will
2551 be a single user-visible stop. */
2552 eventing = NULL;
2553 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2554 {
2555 unsigned int ix;
2556
2557 ix = 0;
2558 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2559 {
2560 *status = record_btrace_step_thread (tp);
2561
2562 switch (status->kind)
2563 {
2564 case TARGET_WAITKIND_IGNORE:
2565 ix++;
2566 break;
2567
2568 case TARGET_WAITKIND_NO_HISTORY:
2569 VEC_safe_push (tp_t, no_history,
2570 VEC_ordered_remove (tp_t, moving, ix));
2571 break;
2572
2573 default:
2574 eventing = VEC_unordered_remove (tp_t, moving, ix);
2575 break;
2576 }
2577 }
2578 }
2579
2580 if (eventing == NULL)
2581 {
2582 /* We started with at least one moving thread. This thread must have
2583 either stopped or reached the end of its execution history.
2584
2585 In the former case, EVENTING must not be NULL.
2586 In the latter case, NO_HISTORY must not be empty. */
2587 gdb_assert (!VEC_empty (tp_t, no_history));
2588
2589 /* We kept threads moving at the end of their execution history. Stop
2590 EVENTING now that we are going to report its stop. */
2591 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2592 eventing->btrace.flags &= ~BTHR_MOVE;
2593
2594 *status = btrace_step_no_history ();
2595 }
2596
2597 gdb_assert (eventing != NULL);
2598
2599 /* We kept threads replaying at the end of their execution history. Stop
2600 replaying EVENTING now that we are going to report its stop. */
2601 record_btrace_stop_replaying_at_end (eventing);
2602
2603 /* Stop all other threads. */
2604 if (!target_is_non_stop_p ())
2605 ALL_NON_EXITED_THREADS (tp)
2606 record_btrace_cancel_resume (tp);
2607
2608 /* In async mode, we need to announce further events. */
2609 if (target_is_async_p ())
2610 record_btrace_maybe_mark_async_event (moving, no_history);
2611
2612 /* Start record histories anew from the current position. */
2613 record_btrace_clear_histories (&eventing->btrace);
2614
2615 /* We moved the replay position but did not update registers. */
2616 registers_changed_ptid (eventing->ptid);
2617
2618 DEBUG ("wait ended by thread %s (%s): %s",
2619 print_thread_id (eventing),
2620 target_pid_to_str (eventing->ptid),
2621 target_waitstatus_to_string (status));
2622
2623 do_cleanups (cleanups);
2624 return eventing->ptid;
2625 }
2626
2627 /* The to_stop method of target record-btrace. */
2628
2629 static void
2630 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2631 {
2632 DEBUG ("stop %s", target_pid_to_str (ptid));
2633
2634 /* As long as we're not replaying, just forward the request. */
2635 if ((execution_direction != EXEC_REVERSE)
2636 && !record_btrace_is_replaying (ops, minus_one_ptid))
2637 {
2638 ops = ops->beneath;
2639 ops->to_stop (ops, ptid);
2640 }
2641 else
2642 {
2643 struct thread_info *tp;
2644
2645 ALL_NON_EXITED_THREADS (tp)
2646 if (ptid_match (tp->ptid, ptid))
2647 {
2648 tp->btrace.flags &= ~BTHR_MOVE;
2649 tp->btrace.flags |= BTHR_STOP;
2650 }
2651 }
2652 }
2653
2654 /* The to_can_execute_reverse method of target record-btrace. */
2655
2656 static int
2657 record_btrace_can_execute_reverse (struct target_ops *self)
2658 {
2659 return 1;
2660 }
2661
2662 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2663
2664 static int
2665 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2666 {
2667 if (record_btrace_is_replaying (ops, minus_one_ptid))
2668 {
2669 struct thread_info *tp = inferior_thread ();
2670
2671 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2672 }
2673
2674 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2675 }
2676
2677 /* The to_supports_stopped_by_sw_breakpoint method of target
2678 record-btrace. */
2679
2680 static int
2681 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2682 {
2683 if (record_btrace_is_replaying (ops, minus_one_ptid))
2684 return 1;
2685
2686 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2687 }
2688
2689 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2690
2691 static int
2692 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2693 {
2694 if (record_btrace_is_replaying (ops, minus_one_ptid))
2695 {
2696 struct thread_info *tp = inferior_thread ();
2697
2698 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2699 }
2700
2701 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2702 }
2703
2704 /* The to_supports_stopped_by_hw_breakpoint method of target
2705 record-btrace. */
2706
2707 static int
2708 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2709 {
2710 if (record_btrace_is_replaying (ops, minus_one_ptid))
2711 return 1;
2712
2713 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2714 }
2715
2716 /* The to_update_thread_list method of target record-btrace. */
2717
2718 static void
2719 record_btrace_update_thread_list (struct target_ops *ops)
2720 {
2721 /* We don't add or remove threads during replay. */
2722 if (record_btrace_is_replaying (ops, minus_one_ptid))
2723 return;
2724
2725 /* Forward the request. */
2726 ops = ops->beneath;
2727 ops->to_update_thread_list (ops);
2728 }
2729
2730 /* The to_thread_alive method of target record-btrace. */
2731
2732 static int
2733 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2734 {
2735 /* We don't add or remove threads during replay. */
2736 if (record_btrace_is_replaying (ops, minus_one_ptid))
2737 return find_thread_ptid (ptid) != NULL;
2738
2739 /* Forward the request. */
2740 ops = ops->beneath;
2741 return ops->to_thread_alive (ops, ptid);
2742 }
2743
2744 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2745 is stopped. */
2746
2747 static void
2748 record_btrace_set_replay (struct thread_info *tp,
2749 const struct btrace_insn_iterator *it)
2750 {
2751 struct btrace_thread_info *btinfo;
2752
2753 btinfo = &tp->btrace;
2754
2755 if (it == NULL || it->function == NULL)
2756 record_btrace_stop_replaying (tp);
2757 else
2758 {
2759 if (btinfo->replay == NULL)
2760 record_btrace_start_replaying (tp);
2761 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2762 return;
2763
2764 *btinfo->replay = *it;
2765 registers_changed_ptid (tp->ptid);
2766 }
2767
2768 /* Start anew from the new replay position. */
2769 record_btrace_clear_histories (btinfo);
2770
2771 stop_pc = regcache_read_pc (get_current_regcache ());
2772 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2773 }
2774
2775 /* The to_goto_record_begin method of target record-btrace. */
2776
2777 static void
2778 record_btrace_goto_begin (struct target_ops *self)
2779 {
2780 struct thread_info *tp;
2781 struct btrace_insn_iterator begin;
2782
2783 tp = require_btrace_thread ();
2784
2785 btrace_insn_begin (&begin, &tp->btrace);
2786
2787 /* Skip gaps at the beginning of the trace. */
2788 while (btrace_insn_get (&begin) == NULL)
2789 {
2790 unsigned int steps;
2791
2792 steps = btrace_insn_next (&begin, 1);
2793 if (steps == 0)
2794 error (_("No trace."));
2795 }
2796
2797 record_btrace_set_replay (tp, &begin);
2798 }
2799
2800 /* The to_goto_record_end method of target record-btrace. */
2801
2802 static void
2803 record_btrace_goto_end (struct target_ops *ops)
2804 {
2805 struct thread_info *tp;
2806
2807 tp = require_btrace_thread ();
2808
2809 record_btrace_set_replay (tp, NULL);
2810 }
2811
2812 /* The to_goto_record method of target record-btrace. */
2813
2814 static void
2815 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2816 {
2817 struct thread_info *tp;
2818 struct btrace_insn_iterator it;
2819 unsigned int number;
2820 int found;
2821
2822 number = insn;
2823
2824 /* Check for wrap-arounds. */
2825 if (number != insn)
2826 error (_("Instruction number out of range."));
2827
2828 tp = require_btrace_thread ();
2829
2830 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2831 if (found == 0)
2832 error (_("No such instruction."));
2833
2834 record_btrace_set_replay (tp, &it);
2835 }
2836
2837 /* The to_record_stop_replaying method of target record-btrace. */
2838
2839 static void
2840 record_btrace_stop_replaying_all (struct target_ops *self)
2841 {
2842 struct thread_info *tp;
2843
2844 ALL_NON_EXITED_THREADS (tp)
2845 record_btrace_stop_replaying (tp);
2846 }
2847
2848 /* The to_execution_direction target method. */
2849
2850 static enum exec_direction_kind
2851 record_btrace_execution_direction (struct target_ops *self)
2852 {
2853 return record_btrace_resume_exec_dir;
2854 }
2855
2856 /* The to_prepare_to_generate_core target method. */
2857
2858 static void
2859 record_btrace_prepare_to_generate_core (struct target_ops *self)
2860 {
2861 record_btrace_generating_corefile = 1;
2862 }
2863
2864 /* The to_done_generating_core target method. */
2865
2866 static void
2867 record_btrace_done_generating_core (struct target_ops *self)
2868 {
2869 record_btrace_generating_corefile = 0;
2870 }
2871
2872 /* Initialize the record-btrace target ops. */
2873
2874 static void
2875 init_record_btrace_ops (void)
2876 {
2877 struct target_ops *ops;
2878
2879 ops = &record_btrace_ops;
2880 ops->to_shortname = "record-btrace";
2881 ops->to_longname = "Branch tracing target";
2882 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2883 ops->to_open = record_btrace_open;
2884 ops->to_close = record_btrace_close;
2885 ops->to_async = record_btrace_async;
2886 ops->to_detach = record_detach;
2887 ops->to_disconnect = record_btrace_disconnect;
2888 ops->to_mourn_inferior = record_mourn_inferior;
2889 ops->to_kill = record_kill;
2890 ops->to_stop_recording = record_btrace_stop_recording;
2891 ops->to_info_record = record_btrace_info;
2892 ops->to_insn_history = record_btrace_insn_history;
2893 ops->to_insn_history_from = record_btrace_insn_history_from;
2894 ops->to_insn_history_range = record_btrace_insn_history_range;
2895 ops->to_call_history = record_btrace_call_history;
2896 ops->to_call_history_from = record_btrace_call_history_from;
2897 ops->to_call_history_range = record_btrace_call_history_range;
2898 ops->to_record_is_replaying = record_btrace_is_replaying;
2899 ops->to_record_will_replay = record_btrace_will_replay;
2900 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2901 ops->to_xfer_partial = record_btrace_xfer_partial;
2902 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2903 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2904 ops->to_fetch_registers = record_btrace_fetch_registers;
2905 ops->to_store_registers = record_btrace_store_registers;
2906 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2907 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2908 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2909 ops->to_resume = record_btrace_resume;
2910 ops->to_commit_resume = record_btrace_commit_resume;
2911 ops->to_wait = record_btrace_wait;
2912 ops->to_stop = record_btrace_stop;
2913 ops->to_update_thread_list = record_btrace_update_thread_list;
2914 ops->to_thread_alive = record_btrace_thread_alive;
2915 ops->to_goto_record_begin = record_btrace_goto_begin;
2916 ops->to_goto_record_end = record_btrace_goto_end;
2917 ops->to_goto_record = record_btrace_goto;
2918 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2919 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2920 ops->to_supports_stopped_by_sw_breakpoint
2921 = record_btrace_supports_stopped_by_sw_breakpoint;
2922 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2923 ops->to_supports_stopped_by_hw_breakpoint
2924 = record_btrace_supports_stopped_by_hw_breakpoint;
2925 ops->to_execution_direction = record_btrace_execution_direction;
2926 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2927 ops->to_done_generating_core = record_btrace_done_generating_core;
2928 ops->to_stratum = record_stratum;
2929 ops->to_magic = OPS_MAGIC;
2930 }
2931
2932 /* Start recording in BTS format. */
2933
2934 static void
2935 cmd_record_btrace_bts_start (char *args, int from_tty)
2936 {
2937 if (args != NULL && *args != 0)
2938 error (_("Invalid argument."));
2939
2940 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2941
2942 TRY
2943 {
2944 execute_command ("target record-btrace", from_tty);
2945 }
2946 CATCH (exception, RETURN_MASK_ALL)
2947 {
2948 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2949 throw_exception (exception);
2950 }
2951 END_CATCH
2952 }
2953
2954 /* Start recording in Intel Processor Trace format. */
2955
2956 static void
2957 cmd_record_btrace_pt_start (char *args, int from_tty)
2958 {
2959 if (args != NULL && *args != 0)
2960 error (_("Invalid argument."));
2961
2962 record_btrace_conf.format = BTRACE_FORMAT_PT;
2963
2964 TRY
2965 {
2966 execute_command ("target record-btrace", from_tty);
2967 }
2968 CATCH (exception, RETURN_MASK_ALL)
2969 {
2970 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2971 throw_exception (exception);
2972 }
2973 END_CATCH
2974 }
2975
2976 /* Alias for "target record". */
2977
2978 static void
2979 cmd_record_btrace_start (char *args, int from_tty)
2980 {
2981 if (args != NULL && *args != 0)
2982 error (_("Invalid argument."));
2983
2984 record_btrace_conf.format = BTRACE_FORMAT_PT;
2985
2986 TRY
2987 {
2988 execute_command ("target record-btrace", from_tty);
2989 }
2990 CATCH (exception, RETURN_MASK_ALL)
2991 {
2992 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2993
2994 TRY
2995 {
2996 execute_command ("target record-btrace", from_tty);
2997 }
2998 CATCH (exception, RETURN_MASK_ALL)
2999 {
3000 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3001 throw_exception (exception);
3002 }
3003 END_CATCH
3004 }
3005 END_CATCH
3006 }
3007
3008 /* The "set record btrace" command. */
3009
3010 static void
3011 cmd_set_record_btrace (char *args, int from_tty)
3012 {
3013 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3014 }
3015
3016 /* The "show record btrace" command. */
3017
3018 static void
3019 cmd_show_record_btrace (char *args, int from_tty)
3020 {
3021 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3022 }
3023
3024 /* The "show record btrace replay-memory-access" command. */
3025
3026 static void
3027 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3028 struct cmd_list_element *c, const char *value)
3029 {
3030 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3031 replay_memory_access);
3032 }
3033
3034 /* The "set record btrace bts" command. */
3035
3036 static void
3037 cmd_set_record_btrace_bts (char *args, int from_tty)
3038 {
3039 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3040 "by an appropriate subcommand.\n"));
3041 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3042 all_commands, gdb_stdout);
3043 }
3044
3045 /* The "show record btrace bts" command. */
3046
3047 static void
3048 cmd_show_record_btrace_bts (char *args, int from_tty)
3049 {
3050 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3051 }
3052
3053 /* The "set record btrace pt" command. */
3054
3055 static void
3056 cmd_set_record_btrace_pt (char *args, int from_tty)
3057 {
3058 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3059 "by an appropriate subcommand.\n"));
3060 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3061 all_commands, gdb_stdout);
3062 }
3063
3064 /* The "show record btrace pt" command. */
3065
3066 static void
3067 cmd_show_record_btrace_pt (char *args, int from_tty)
3068 {
3069 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3070 }
3071
3072 /* The "record bts buffer-size" show value function. */
3073
3074 static void
3075 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3076 struct cmd_list_element *c,
3077 const char *value)
3078 {
3079 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3080 value);
3081 }
3082
3083 /* The "record pt buffer-size" show value function. */
3084
3085 static void
3086 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3087 struct cmd_list_element *c,
3088 const char *value)
3089 {
3090 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3091 value);
3092 }
3093
3094 void _initialize_record_btrace (void);
3095
3096 /* Initialize btrace commands. */
3097
3098 void
3099 _initialize_record_btrace (void)
3100 {
3101 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3102 _("Start branch trace recording."), &record_btrace_cmdlist,
3103 "record btrace ", 0, &record_cmdlist);
3104 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3105
3106 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3107 _("\
3108 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3109 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3110 This format may not be available on all processors."),
3111 &record_btrace_cmdlist);
3112 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3113
3114 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3115 _("\
3116 Start branch trace recording in Intel Processor Trace format.\n\n\
3117 This format may not be available on all processors."),
3118 &record_btrace_cmdlist);
3119 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3120
3121 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3122 _("Set record options"), &set_record_btrace_cmdlist,
3123 "set record btrace ", 0, &set_record_cmdlist);
3124
3125 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3126 _("Show record options"), &show_record_btrace_cmdlist,
3127 "show record btrace ", 0, &show_record_cmdlist);
3128
3129 add_setshow_enum_cmd ("replay-memory-access", no_class,
3130 replay_memory_access_types, &replay_memory_access, _("\
3131 Set what memory accesses are allowed during replay."), _("\
3132 Show what memory accesses are allowed during replay."),
3133 _("Default is READ-ONLY.\n\n\
3134 The btrace record target does not trace data.\n\
3135 The memory therefore corresponds to the live target and not \
3136 to the current replay position.\n\n\
3137 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3138 When READ-WRITE, allow accesses to read-only and read-write memory during \
3139 replay."),
3140 NULL, cmd_show_replay_memory_access,
3141 &set_record_btrace_cmdlist,
3142 &show_record_btrace_cmdlist);
3143
3144 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3145 _("Set record btrace bts options"),
3146 &set_record_btrace_bts_cmdlist,
3147 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3148
3149 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3150 _("Show record btrace bts options"),
3151 &show_record_btrace_bts_cmdlist,
3152 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3153
3154 add_setshow_uinteger_cmd ("buffer-size", no_class,
3155 &record_btrace_conf.bts.size,
3156 _("Set the record/replay bts buffer size."),
3157 _("Show the record/replay bts buffer size."), _("\
3158 When starting recording request a trace buffer of this size. \
3159 The actual buffer size may differ from the requested size. \
3160 Use \"info record\" to see the actual buffer size.\n\n\
3161 Bigger buffers allow longer recording but also take more time to process \
3162 the recorded execution trace.\n\n\
3163 The trace buffer size may not be changed while recording."), NULL,
3164 show_record_bts_buffer_size_value,
3165 &set_record_btrace_bts_cmdlist,
3166 &show_record_btrace_bts_cmdlist);
3167
3168 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3169 _("Set record btrace pt options"),
3170 &set_record_btrace_pt_cmdlist,
3171 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3172
3173 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3174 _("Show record btrace pt options"),
3175 &show_record_btrace_pt_cmdlist,
3176 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3177
3178 add_setshow_uinteger_cmd ("buffer-size", no_class,
3179 &record_btrace_conf.pt.size,
3180 _("Set the record/replay pt buffer size."),
3181 _("Show the record/replay pt buffer size."), _("\
3182 Bigger buffers allow longer recording but also take more time to process \
3183 the recorded execution.\n\
3184 The actual buffer size may differ from the requested size. Use \"info record\" \
3185 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3186 &set_record_btrace_pt_cmdlist,
3187 &show_record_btrace_pt_cmdlist);
3188
3189 init_record_btrace_ops ();
3190 add_target (&record_btrace_ops);
3191
3192 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3193 xcalloc, xfree);
3194
3195 record_btrace_conf.bts.size = 64 * 1024;
3196 record_btrace_conf.pt.size = 16 * 1024;
3197 }
This page took 0.100674 seconds and 4 git commands to generate.