1dbf5d88c283819e31bf45e56a4e0c642643273b
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr;
472 int is_error;
473
474 errstr = _("unknown");
475 is_error = 1;
476
477 switch (format)
478 {
479 default:
480 break;
481
482 case BTRACE_FORMAT_BTS:
483 switch (errcode)
484 {
485 default:
486 break;
487
488 case BDE_BTS_OVERFLOW:
489 errstr = _("instruction overflow");
490 break;
491
492 case BDE_BTS_INSN_SIZE:
493 errstr = _("unknown instruction");
494 break;
495 }
496 break;
497
498 #if defined (HAVE_LIBIPT)
499 case BTRACE_FORMAT_PT:
500 switch (errcode)
501 {
502 case BDE_PT_USER_QUIT:
503 is_error = 0;
504 errstr = _("trace decode cancelled");
505 break;
506
507 case BDE_PT_DISABLED:
508 is_error = 0;
509 errstr = _("disabled");
510 break;
511
512 case BDE_PT_OVERFLOW:
513 is_error = 0;
514 errstr = _("overflow");
515 break;
516
517 default:
518 if (errcode < 0)
519 errstr = pt_errstr (pt_errcode (errcode));
520 break;
521 }
522 break;
523 #endif /* defined (HAVE_LIBIPT) */
524 }
525
526 uiout->text (_("["));
527 if (is_error)
528 {
529 uiout->text (_("decode error ("));
530 uiout->field_int ("errcode", errcode);
531 uiout->text (_("): "));
532 }
533 uiout->text (errstr);
534 uiout->text (_("]\n"));
535 }
536
537 /* Print an unsigned int. */
538
539 static void
540 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
541 {
542 uiout->field_fmt (fld, "%u", val);
543 }
544
545 /* A range of source lines. */
546
547 struct btrace_line_range
548 {
549 /* The symtab this line is from. */
550 struct symtab *symtab;
551
552 /* The first line (inclusive). */
553 int begin;
554
555 /* The last line (exclusive). */
556 int end;
557 };
558
559 /* Construct a line range. */
560
561 static struct btrace_line_range
562 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
563 {
564 struct btrace_line_range range;
565
566 range.symtab = symtab;
567 range.begin = begin;
568 range.end = end;
569
570 return range;
571 }
572
573 /* Add a line to a line range. */
574
575 static struct btrace_line_range
576 btrace_line_range_add (struct btrace_line_range range, int line)
577 {
578 if (range.end <= range.begin)
579 {
580 /* This is the first entry. */
581 range.begin = line;
582 range.end = line + 1;
583 }
584 else if (line < range.begin)
585 range.begin = line;
586 else if (range.end < line)
587 range.end = line;
588
589 return range;
590 }
591
592 /* Return non-zero if RANGE is empty, zero otherwise. */
593
594 static int
595 btrace_line_range_is_empty (struct btrace_line_range range)
596 {
597 return range.end <= range.begin;
598 }
599
600 /* Return non-zero if LHS contains RHS, zero otherwise. */
601
602 static int
603 btrace_line_range_contains_range (struct btrace_line_range lhs,
604 struct btrace_line_range rhs)
605 {
606 return ((lhs.symtab == rhs.symtab)
607 && (lhs.begin <= rhs.begin)
608 && (rhs.end <= lhs.end));
609 }
610
611 /* Find the line range associated with PC. */
612
613 static struct btrace_line_range
614 btrace_find_line_range (CORE_ADDR pc)
615 {
616 struct btrace_line_range range;
617 struct linetable_entry *lines;
618 struct linetable *ltable;
619 struct symtab *symtab;
620 int nlines, i;
621
622 symtab = find_pc_line_symtab (pc);
623 if (symtab == NULL)
624 return btrace_mk_line_range (NULL, 0, 0);
625
626 ltable = SYMTAB_LINETABLE (symtab);
627 if (ltable == NULL)
628 return btrace_mk_line_range (symtab, 0, 0);
629
630 nlines = ltable->nitems;
631 lines = ltable->item;
632 if (nlines <= 0)
633 return btrace_mk_line_range (symtab, 0, 0);
634
635 range = btrace_mk_line_range (symtab, 0, 0);
636 for (i = 0; i < nlines - 1; i++)
637 {
638 if ((lines[i].pc == pc) && (lines[i].line != 0))
639 range = btrace_line_range_add (range, lines[i].line);
640 }
641
642 return range;
643 }
644
645 /* Print source lines in LINES to UIOUT.
646
647 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
648 instructions corresponding to that source line. When printing a new source
649 line, we do the cleanups for the open chain and open a new cleanup chain for
650 the new source line. If the source line range in LINES is not empty, this
651 function will leave the cleanup chain for the last printed source line open
652 so instructions can be added to it. */
653
654 static void
655 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
656 struct cleanup **ui_item_chain, int flags)
657 {
658 print_source_lines_flags psl_flags;
659 int line;
660
661 psl_flags = 0;
662 if (flags & DISASSEMBLY_FILENAME)
663 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
664
665 for (line = lines.begin; line < lines.end; ++line)
666 {
667 if (*ui_item_chain != NULL)
668 do_cleanups (*ui_item_chain);
669
670 *ui_item_chain
671 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
672
673 print_source_lines (lines.symtab, line, line + 1, psl_flags);
674
675 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
676 }
677 }
678
679 /* Disassemble a section of the recorded instruction trace. */
680
681 static void
682 btrace_insn_history (struct ui_out *uiout,
683 const struct btrace_thread_info *btinfo,
684 const struct btrace_insn_iterator *begin,
685 const struct btrace_insn_iterator *end, int flags)
686 {
687 struct cleanup *cleanups, *ui_item_chain;
688 struct gdbarch *gdbarch;
689 struct btrace_insn_iterator it;
690 struct btrace_line_range last_lines;
691
692 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
693 btrace_insn_number (end));
694
695 flags |= DISASSEMBLY_SPECULATIVE;
696
697 gdbarch = target_gdbarch ();
698 last_lines = btrace_mk_line_range (NULL, 0, 0);
699
700 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
701
702 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
703 instructions corresponding to that line. */
704 ui_item_chain = NULL;
705
706 gdb_pretty_print_disassembler disasm (gdbarch);
707
708 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
709 {
710 const struct btrace_insn *insn;
711
712 insn = btrace_insn_get (&it);
713
714 /* A NULL instruction indicates a gap in the trace. */
715 if (insn == NULL)
716 {
717 const struct btrace_config *conf;
718
719 conf = btrace_conf (btinfo);
720
721 /* We have trace so we must have a configuration. */
722 gdb_assert (conf != NULL);
723
724 uiout->field_fmt ("insn-number", "%u",
725 btrace_insn_number (&it));
726 uiout->text ("\t");
727
728 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
729 conf->format);
730 }
731 else
732 {
733 struct disasm_insn dinsn;
734
735 if ((flags & DISASSEMBLY_SOURCE) != 0)
736 {
737 struct btrace_line_range lines;
738
739 lines = btrace_find_line_range (insn->pc);
740 if (!btrace_line_range_is_empty (lines)
741 && !btrace_line_range_contains_range (last_lines, lines))
742 {
743 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
744 last_lines = lines;
745 }
746 else if (ui_item_chain == NULL)
747 {
748 ui_item_chain
749 = make_cleanup_ui_out_tuple_begin_end (uiout,
750 "src_and_asm_line");
751 /* No source information. */
752 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
753 }
754
755 gdb_assert (ui_item_chain != NULL);
756 }
757
758 memset (&dinsn, 0, sizeof (dinsn));
759 dinsn.number = btrace_insn_number (&it);
760 dinsn.addr = insn->pc;
761
762 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
763 dinsn.is_speculative = 1;
764
765 disasm.pretty_print_insn (uiout, &dinsn, flags);
766 }
767 }
768
769 do_cleanups (cleanups);
770 }
771
772 /* The to_insn_history method of target record-btrace. */
773
774 static void
775 record_btrace_insn_history (struct target_ops *self, int size, int flags)
776 {
777 struct btrace_thread_info *btinfo;
778 struct btrace_insn_history *history;
779 struct btrace_insn_iterator begin, end;
780 struct cleanup *uiout_cleanup;
781 struct ui_out *uiout;
782 unsigned int context, covered;
783
784 uiout = current_uiout;
785 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
786 "insn history");
787 context = abs (size);
788 if (context == 0)
789 error (_("Bad record instruction-history-size."));
790
791 btinfo = require_btrace ();
792 history = btinfo->insn_history;
793 if (history == NULL)
794 {
795 struct btrace_insn_iterator *replay;
796
797 DEBUG ("insn-history (0x%x): %d", flags, size);
798
799 /* If we're replaying, we start at the replay position. Otherwise, we
800 start at the tail of the trace. */
801 replay = btinfo->replay;
802 if (replay != NULL)
803 begin = *replay;
804 else
805 btrace_insn_end (&begin, btinfo);
806
807 /* We start from here and expand in the requested direction. Then we
808 expand in the other direction, as well, to fill up any remaining
809 context. */
810 end = begin;
811 if (size < 0)
812 {
813 /* We want the current position covered, as well. */
814 covered = btrace_insn_next (&end, 1);
815 covered += btrace_insn_prev (&begin, context - covered);
816 covered += btrace_insn_next (&end, context - covered);
817 }
818 else
819 {
820 covered = btrace_insn_next (&end, context);
821 covered += btrace_insn_prev (&begin, context - covered);
822 }
823 }
824 else
825 {
826 begin = history->begin;
827 end = history->end;
828
829 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
830 btrace_insn_number (&begin), btrace_insn_number (&end));
831
832 if (size < 0)
833 {
834 end = begin;
835 covered = btrace_insn_prev (&begin, context);
836 }
837 else
838 {
839 begin = end;
840 covered = btrace_insn_next (&end, context);
841 }
842 }
843
844 if (covered > 0)
845 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
846 else
847 {
848 if (size < 0)
849 printf_unfiltered (_("At the start of the branch trace record.\n"));
850 else
851 printf_unfiltered (_("At the end of the branch trace record.\n"));
852 }
853
854 btrace_set_insn_history (btinfo, &begin, &end);
855 do_cleanups (uiout_cleanup);
856 }
857
858 /* The to_insn_history_range method of target record-btrace. */
859
860 static void
861 record_btrace_insn_history_range (struct target_ops *self,
862 ULONGEST from, ULONGEST to, int flags)
863 {
864 struct btrace_thread_info *btinfo;
865 struct btrace_insn_history *history;
866 struct btrace_insn_iterator begin, end;
867 struct cleanup *uiout_cleanup;
868 struct ui_out *uiout;
869 unsigned int low, high;
870 int found;
871
872 uiout = current_uiout;
873 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
874 "insn history");
875 low = from;
876 high = to;
877
878 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
879
880 /* Check for wrap-arounds. */
881 if (low != from || high != to)
882 error (_("Bad range."));
883
884 if (high < low)
885 error (_("Bad range."));
886
887 btinfo = require_btrace ();
888
889 found = btrace_find_insn_by_number (&begin, btinfo, low);
890 if (found == 0)
891 error (_("Range out of bounds."));
892
893 found = btrace_find_insn_by_number (&end, btinfo, high);
894 if (found == 0)
895 {
896 /* Silently truncate the range. */
897 btrace_insn_end (&end, btinfo);
898 }
899 else
900 {
901 /* We want both begin and end to be inclusive. */
902 btrace_insn_next (&end, 1);
903 }
904
905 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
906 btrace_set_insn_history (btinfo, &begin, &end);
907
908 do_cleanups (uiout_cleanup);
909 }
910
911 /* The to_insn_history_from method of target record-btrace. */
912
913 static void
914 record_btrace_insn_history_from (struct target_ops *self,
915 ULONGEST from, int size, int flags)
916 {
917 ULONGEST begin, end, context;
918
919 context = abs (size);
920 if (context == 0)
921 error (_("Bad record instruction-history-size."));
922
923 if (size < 0)
924 {
925 end = from;
926
927 if (from < context)
928 begin = 0;
929 else
930 begin = from - context + 1;
931 }
932 else
933 {
934 begin = from;
935 end = from + context - 1;
936
937 /* Check for wrap-around. */
938 if (end < begin)
939 end = ULONGEST_MAX;
940 }
941
942 record_btrace_insn_history_range (self, begin, end, flags);
943 }
944
945 /* Print the instruction number range for a function call history line. */
946
947 static void
948 btrace_call_history_insn_range (struct ui_out *uiout,
949 const struct btrace_function *bfun)
950 {
951 unsigned int begin, end, size;
952
953 size = VEC_length (btrace_insn_s, bfun->insn);
954 gdb_assert (size > 0);
955
956 begin = bfun->insn_offset;
957 end = begin + size - 1;
958
959 ui_out_field_uint (uiout, "insn begin", begin);
960 uiout->text (",");
961 ui_out_field_uint (uiout, "insn end", end);
962 }
963
964 /* Compute the lowest and highest source line for the instructions in BFUN
965 and return them in PBEGIN and PEND.
966 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
967 result from inlining or macro expansion. */
968
969 static void
970 btrace_compute_src_line_range (const struct btrace_function *bfun,
971 int *pbegin, int *pend)
972 {
973 struct btrace_insn *insn;
974 struct symtab *symtab;
975 struct symbol *sym;
976 unsigned int idx;
977 int begin, end;
978
979 begin = INT_MAX;
980 end = INT_MIN;
981
982 sym = bfun->sym;
983 if (sym == NULL)
984 goto out;
985
986 symtab = symbol_symtab (sym);
987
988 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
989 {
990 struct symtab_and_line sal;
991
992 sal = find_pc_line (insn->pc, 0);
993 if (sal.symtab != symtab || sal.line == 0)
994 continue;
995
996 begin = std::min (begin, sal.line);
997 end = std::max (end, sal.line);
998 }
999
1000 out:
1001 *pbegin = begin;
1002 *pend = end;
1003 }
1004
1005 /* Print the source line information for a function call history line. */
1006
1007 static void
1008 btrace_call_history_src_line (struct ui_out *uiout,
1009 const struct btrace_function *bfun)
1010 {
1011 struct symbol *sym;
1012 int begin, end;
1013
1014 sym = bfun->sym;
1015 if (sym == NULL)
1016 return;
1017
1018 uiout->field_string ("file",
1019 symtab_to_filename_for_display (symbol_symtab (sym)));
1020
1021 btrace_compute_src_line_range (bfun, &begin, &end);
1022 if (end < begin)
1023 return;
1024
1025 uiout->text (":");
1026 uiout->field_int ("min line", begin);
1027
1028 if (end == begin)
1029 return;
1030
1031 uiout->text (",");
1032 uiout->field_int ("max line", end);
1033 }
1034
1035 /* Get the name of a branch trace function. */
1036
1037 static const char *
1038 btrace_get_bfun_name (const struct btrace_function *bfun)
1039 {
1040 struct minimal_symbol *msym;
1041 struct symbol *sym;
1042
1043 if (bfun == NULL)
1044 return "??";
1045
1046 msym = bfun->msym;
1047 sym = bfun->sym;
1048
1049 if (sym != NULL)
1050 return SYMBOL_PRINT_NAME (sym);
1051 else if (msym != NULL)
1052 return MSYMBOL_PRINT_NAME (msym);
1053 else
1054 return "??";
1055 }
1056
1057 /* Disassemble a section of the recorded function trace. */
1058
1059 static void
1060 btrace_call_history (struct ui_out *uiout,
1061 const struct btrace_thread_info *btinfo,
1062 const struct btrace_call_iterator *begin,
1063 const struct btrace_call_iterator *end,
1064 int int_flags)
1065 {
1066 struct btrace_call_iterator it;
1067 record_print_flags flags = (enum record_print_flag) int_flags;
1068
1069 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1070 btrace_call_number (end));
1071
1072 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1073 {
1074 const struct btrace_function *bfun;
1075 struct minimal_symbol *msym;
1076 struct symbol *sym;
1077
1078 bfun = btrace_call_get (&it);
1079 sym = bfun->sym;
1080 msym = bfun->msym;
1081
1082 /* Print the function index. */
1083 ui_out_field_uint (uiout, "index", bfun->number);
1084 uiout->text ("\t");
1085
1086 /* Indicate gaps in the trace. */
1087 if (bfun->errcode != 0)
1088 {
1089 const struct btrace_config *conf;
1090
1091 conf = btrace_conf (btinfo);
1092
1093 /* We have trace so we must have a configuration. */
1094 gdb_assert (conf != NULL);
1095
1096 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1097
1098 continue;
1099 }
1100
1101 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1102 {
1103 int level = bfun->level + btinfo->level, i;
1104
1105 for (i = 0; i < level; ++i)
1106 uiout->text (" ");
1107 }
1108
1109 if (sym != NULL)
1110 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1111 else if (msym != NULL)
1112 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1113 else if (!uiout->is_mi_like_p ())
1114 uiout->field_string ("function", "??");
1115
1116 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1117 {
1118 uiout->text (_("\tinst "));
1119 btrace_call_history_insn_range (uiout, bfun);
1120 }
1121
1122 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1123 {
1124 uiout->text (_("\tat "));
1125 btrace_call_history_src_line (uiout, bfun);
1126 }
1127
1128 uiout->text ("\n");
1129 }
1130 }
1131
1132 /* The to_call_history method of target record-btrace. */
1133
1134 static void
1135 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1136 {
1137 struct btrace_thread_info *btinfo;
1138 struct btrace_call_history *history;
1139 struct btrace_call_iterator begin, end;
1140 struct cleanup *uiout_cleanup;
1141 struct ui_out *uiout;
1142 unsigned int context, covered;
1143 record_print_flags flags = (enum record_print_flag) int_flags;
1144
1145 uiout = current_uiout;
1146 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1147 "insn history");
1148 context = abs (size);
1149 if (context == 0)
1150 error (_("Bad record function-call-history-size."));
1151
1152 btinfo = require_btrace ();
1153 history = btinfo->call_history;
1154 if (history == NULL)
1155 {
1156 struct btrace_insn_iterator *replay;
1157
1158 DEBUG ("call-history (0x%x): %d", int_flags, size);
1159
1160 /* If we're replaying, we start at the replay position. Otherwise, we
1161 start at the tail of the trace. */
1162 replay = btinfo->replay;
1163 if (replay != NULL)
1164 {
1165 begin.function = replay->function;
1166 begin.btinfo = btinfo;
1167 }
1168 else
1169 btrace_call_end (&begin, btinfo);
1170
1171 /* We start from here and expand in the requested direction. Then we
1172 expand in the other direction, as well, to fill up any remaining
1173 context. */
1174 end = begin;
1175 if (size < 0)
1176 {
1177 /* We want the current position covered, as well. */
1178 covered = btrace_call_next (&end, 1);
1179 covered += btrace_call_prev (&begin, context - covered);
1180 covered += btrace_call_next (&end, context - covered);
1181 }
1182 else
1183 {
1184 covered = btrace_call_next (&end, context);
1185 covered += btrace_call_prev (&begin, context- covered);
1186 }
1187 }
1188 else
1189 {
1190 begin = history->begin;
1191 end = history->end;
1192
1193 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1194 btrace_call_number (&begin), btrace_call_number (&end));
1195
1196 if (size < 0)
1197 {
1198 end = begin;
1199 covered = btrace_call_prev (&begin, context);
1200 }
1201 else
1202 {
1203 begin = end;
1204 covered = btrace_call_next (&end, context);
1205 }
1206 }
1207
1208 if (covered > 0)
1209 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1210 else
1211 {
1212 if (size < 0)
1213 printf_unfiltered (_("At the start of the branch trace record.\n"));
1214 else
1215 printf_unfiltered (_("At the end of the branch trace record.\n"));
1216 }
1217
1218 btrace_set_call_history (btinfo, &begin, &end);
1219 do_cleanups (uiout_cleanup);
1220 }
1221
1222 /* The to_call_history_range method of target record-btrace. */
1223
1224 static void
1225 record_btrace_call_history_range (struct target_ops *self,
1226 ULONGEST from, ULONGEST to,
1227 int int_flags)
1228 {
1229 struct btrace_thread_info *btinfo;
1230 struct btrace_call_history *history;
1231 struct btrace_call_iterator begin, end;
1232 struct cleanup *uiout_cleanup;
1233 struct ui_out *uiout;
1234 unsigned int low, high;
1235 int found;
1236 record_print_flags flags = (enum record_print_flag) int_flags;
1237
1238 uiout = current_uiout;
1239 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1240 "func history");
1241 low = from;
1242 high = to;
1243
1244 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1245
1246 /* Check for wrap-arounds. */
1247 if (low != from || high != to)
1248 error (_("Bad range."));
1249
1250 if (high < low)
1251 error (_("Bad range."));
1252
1253 btinfo = require_btrace ();
1254
1255 found = btrace_find_call_by_number (&begin, btinfo, low);
1256 if (found == 0)
1257 error (_("Range out of bounds."));
1258
1259 found = btrace_find_call_by_number (&end, btinfo, high);
1260 if (found == 0)
1261 {
1262 /* Silently truncate the range. */
1263 btrace_call_end (&end, btinfo);
1264 }
1265 else
1266 {
1267 /* We want both begin and end to be inclusive. */
1268 btrace_call_next (&end, 1);
1269 }
1270
1271 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1272 btrace_set_call_history (btinfo, &begin, &end);
1273
1274 do_cleanups (uiout_cleanup);
1275 }
1276
1277 /* The to_call_history_from method of target record-btrace. */
1278
1279 static void
1280 record_btrace_call_history_from (struct target_ops *self,
1281 ULONGEST from, int size,
1282 int int_flags)
1283 {
1284 ULONGEST begin, end, context;
1285 record_print_flags flags = (enum record_print_flag) int_flags;
1286
1287 context = abs (size);
1288 if (context == 0)
1289 error (_("Bad record function-call-history-size."));
1290
1291 if (size < 0)
1292 {
1293 end = from;
1294
1295 if (from < context)
1296 begin = 0;
1297 else
1298 begin = from - context + 1;
1299 }
1300 else
1301 {
1302 begin = from;
1303 end = from + context - 1;
1304
1305 /* Check for wrap-around. */
1306 if (end < begin)
1307 end = ULONGEST_MAX;
1308 }
1309
1310 record_btrace_call_history_range (self, begin, end, flags);
1311 }
1312
1313 /* The to_record_is_replaying method of target record-btrace. */
1314
1315 static int
1316 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1317 {
1318 struct thread_info *tp;
1319
1320 ALL_NON_EXITED_THREADS (tp)
1321 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1322 return 1;
1323
1324 return 0;
1325 }
1326
1327 /* The to_record_will_replay method of target record-btrace. */
1328
1329 static int
1330 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1331 {
1332 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1333 }
1334
1335 /* The to_xfer_partial method of target record-btrace. */
1336
1337 static enum target_xfer_status
1338 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1339 const char *annex, gdb_byte *readbuf,
1340 const gdb_byte *writebuf, ULONGEST offset,
1341 ULONGEST len, ULONGEST *xfered_len)
1342 {
1343 struct target_ops *t;
1344
1345 /* Filter out requests that don't make sense during replay. */
1346 if (replay_memory_access == replay_memory_access_read_only
1347 && !record_btrace_generating_corefile
1348 && record_btrace_is_replaying (ops, inferior_ptid))
1349 {
1350 switch (object)
1351 {
1352 case TARGET_OBJECT_MEMORY:
1353 {
1354 struct target_section *section;
1355
1356 /* We do not allow writing memory in general. */
1357 if (writebuf != NULL)
1358 {
1359 *xfered_len = len;
1360 return TARGET_XFER_UNAVAILABLE;
1361 }
1362
1363 /* We allow reading readonly memory. */
1364 section = target_section_by_addr (ops, offset);
1365 if (section != NULL)
1366 {
1367 /* Check if the section we found is readonly. */
1368 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1369 section->the_bfd_section)
1370 & SEC_READONLY) != 0)
1371 {
1372 /* Truncate the request to fit into this section. */
1373 len = std::min (len, section->endaddr - offset);
1374 break;
1375 }
1376 }
1377
1378 *xfered_len = len;
1379 return TARGET_XFER_UNAVAILABLE;
1380 }
1381 }
1382 }
1383
1384 /* Forward the request. */
1385 ops = ops->beneath;
1386 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1387 offset, len, xfered_len);
1388 }
1389
1390 /* The to_insert_breakpoint method of target record-btrace. */
1391
1392 static int
1393 record_btrace_insert_breakpoint (struct target_ops *ops,
1394 struct gdbarch *gdbarch,
1395 struct bp_target_info *bp_tgt)
1396 {
1397 const char *old;
1398 int ret;
1399
1400 /* Inserting breakpoints requires accessing memory. Allow it for the
1401 duration of this function. */
1402 old = replay_memory_access;
1403 replay_memory_access = replay_memory_access_read_write;
1404
1405 ret = 0;
1406 TRY
1407 {
1408 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1409 }
1410 CATCH (except, RETURN_MASK_ALL)
1411 {
1412 replay_memory_access = old;
1413 throw_exception (except);
1414 }
1415 END_CATCH
1416 replay_memory_access = old;
1417
1418 return ret;
1419 }
1420
1421 /* The to_remove_breakpoint method of target record-btrace. */
1422
1423 static int
1424 record_btrace_remove_breakpoint (struct target_ops *ops,
1425 struct gdbarch *gdbarch,
1426 struct bp_target_info *bp_tgt,
1427 enum remove_bp_reason reason)
1428 {
1429 const char *old;
1430 int ret;
1431
1432 /* Removing breakpoints requires accessing memory. Allow it for the
1433 duration of this function. */
1434 old = replay_memory_access;
1435 replay_memory_access = replay_memory_access_read_write;
1436
1437 ret = 0;
1438 TRY
1439 {
1440 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1441 reason);
1442 }
1443 CATCH (except, RETURN_MASK_ALL)
1444 {
1445 replay_memory_access = old;
1446 throw_exception (except);
1447 }
1448 END_CATCH
1449 replay_memory_access = old;
1450
1451 return ret;
1452 }
1453
1454 /* The to_fetch_registers method of target record-btrace. */
1455
1456 static void
1457 record_btrace_fetch_registers (struct target_ops *ops,
1458 struct regcache *regcache, int regno)
1459 {
1460 struct btrace_insn_iterator *replay;
1461 struct thread_info *tp;
1462
1463 tp = find_thread_ptid (inferior_ptid);
1464 gdb_assert (tp != NULL);
1465
1466 replay = tp->btrace.replay;
1467 if (replay != NULL && !record_btrace_generating_corefile)
1468 {
1469 const struct btrace_insn *insn;
1470 struct gdbarch *gdbarch;
1471 int pcreg;
1472
1473 gdbarch = get_regcache_arch (regcache);
1474 pcreg = gdbarch_pc_regnum (gdbarch);
1475 if (pcreg < 0)
1476 return;
1477
1478 /* We can only provide the PC register. */
1479 if (regno >= 0 && regno != pcreg)
1480 return;
1481
1482 insn = btrace_insn_get (replay);
1483 gdb_assert (insn != NULL);
1484
1485 regcache_raw_supply (regcache, regno, &insn->pc);
1486 }
1487 else
1488 {
1489 struct target_ops *t = ops->beneath;
1490
1491 t->to_fetch_registers (t, regcache, regno);
1492 }
1493 }
1494
1495 /* The to_store_registers method of target record-btrace. */
1496
1497 static void
1498 record_btrace_store_registers (struct target_ops *ops,
1499 struct regcache *regcache, int regno)
1500 {
1501 struct target_ops *t;
1502
1503 if (!record_btrace_generating_corefile
1504 && record_btrace_is_replaying (ops, inferior_ptid))
1505 error (_("Cannot write registers while replaying."));
1506
1507 gdb_assert (may_write_registers != 0);
1508
1509 t = ops->beneath;
1510 t->to_store_registers (t, regcache, regno);
1511 }
1512
1513 /* The to_prepare_to_store method of target record-btrace. */
1514
1515 static void
1516 record_btrace_prepare_to_store (struct target_ops *ops,
1517 struct regcache *regcache)
1518 {
1519 struct target_ops *t;
1520
1521 if (!record_btrace_generating_corefile
1522 && record_btrace_is_replaying (ops, inferior_ptid))
1523 return;
1524
1525 t = ops->beneath;
1526 t->to_prepare_to_store (t, regcache);
1527 }
1528
1529 /* The branch trace frame cache. */
1530
1531 struct btrace_frame_cache
1532 {
1533 /* The thread. */
1534 struct thread_info *tp;
1535
1536 /* The frame info. */
1537 struct frame_info *frame;
1538
1539 /* The branch trace function segment. */
1540 const struct btrace_function *bfun;
1541 };
1542
1543 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1544
1545 static htab_t bfcache;
1546
1547 /* hash_f for htab_create_alloc of bfcache. */
1548
1549 static hashval_t
1550 bfcache_hash (const void *arg)
1551 {
1552 const struct btrace_frame_cache *cache
1553 = (const struct btrace_frame_cache *) arg;
1554
1555 return htab_hash_pointer (cache->frame);
1556 }
1557
1558 /* eq_f for htab_create_alloc of bfcache. */
1559
1560 static int
1561 bfcache_eq (const void *arg1, const void *arg2)
1562 {
1563 const struct btrace_frame_cache *cache1
1564 = (const struct btrace_frame_cache *) arg1;
1565 const struct btrace_frame_cache *cache2
1566 = (const struct btrace_frame_cache *) arg2;
1567
1568 return cache1->frame == cache2->frame;
1569 }
1570
1571 /* Create a new btrace frame cache. */
1572
1573 static struct btrace_frame_cache *
1574 bfcache_new (struct frame_info *frame)
1575 {
1576 struct btrace_frame_cache *cache;
1577 void **slot;
1578
1579 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1580 cache->frame = frame;
1581
1582 slot = htab_find_slot (bfcache, cache, INSERT);
1583 gdb_assert (*slot == NULL);
1584 *slot = cache;
1585
1586 return cache;
1587 }
1588
1589 /* Extract the branch trace function from a branch trace frame. */
1590
1591 static const struct btrace_function *
1592 btrace_get_frame_function (struct frame_info *frame)
1593 {
1594 const struct btrace_frame_cache *cache;
1595 const struct btrace_function *bfun;
1596 struct btrace_frame_cache pattern;
1597 void **slot;
1598
1599 pattern.frame = frame;
1600
1601 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1602 if (slot == NULL)
1603 return NULL;
1604
1605 cache = (const struct btrace_frame_cache *) *slot;
1606 return cache->bfun;
1607 }
1608
1609 /* Implement stop_reason method for record_btrace_frame_unwind. */
1610
1611 static enum unwind_stop_reason
1612 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1613 void **this_cache)
1614 {
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun;
1617
1618 cache = (const struct btrace_frame_cache *) *this_cache;
1619 bfun = cache->bfun;
1620 gdb_assert (bfun != NULL);
1621
1622 if (bfun->up == NULL)
1623 return UNWIND_UNAVAILABLE;
1624
1625 return UNWIND_NO_REASON;
1626 }
1627
1628 /* Implement this_id method for record_btrace_frame_unwind. */
1629
1630 static void
1631 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1632 struct frame_id *this_id)
1633 {
1634 const struct btrace_frame_cache *cache;
1635 const struct btrace_function *bfun;
1636 CORE_ADDR code, special;
1637
1638 cache = (const struct btrace_frame_cache *) *this_cache;
1639
1640 bfun = cache->bfun;
1641 gdb_assert (bfun != NULL);
1642
1643 while (bfun->segment.prev != NULL)
1644 bfun = bfun->segment.prev;
1645
1646 code = get_frame_func (this_frame);
1647 special = bfun->number;
1648
1649 *this_id = frame_id_build_unavailable_stack_special (code, special);
1650
1651 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1652 btrace_get_bfun_name (cache->bfun),
1653 core_addr_to_string_nz (this_id->code_addr),
1654 core_addr_to_string_nz (this_id->special_addr));
1655 }
1656
1657 /* Implement prev_register method for record_btrace_frame_unwind. */
1658
1659 static struct value *
1660 record_btrace_frame_prev_register (struct frame_info *this_frame,
1661 void **this_cache,
1662 int regnum)
1663 {
1664 const struct btrace_frame_cache *cache;
1665 const struct btrace_function *bfun, *caller;
1666 const struct btrace_insn *insn;
1667 struct gdbarch *gdbarch;
1668 CORE_ADDR pc;
1669 int pcreg;
1670
1671 gdbarch = get_frame_arch (this_frame);
1672 pcreg = gdbarch_pc_regnum (gdbarch);
1673 if (pcreg < 0 || regnum != pcreg)
1674 throw_error (NOT_AVAILABLE_ERROR,
1675 _("Registers are not available in btrace record history"));
1676
1677 cache = (const struct btrace_frame_cache *) *this_cache;
1678 bfun = cache->bfun;
1679 gdb_assert (bfun != NULL);
1680
1681 caller = bfun->up;
1682 if (caller == NULL)
1683 throw_error (NOT_AVAILABLE_ERROR,
1684 _("No caller in btrace record history"));
1685
1686 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1687 {
1688 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1689 pc = insn->pc;
1690 }
1691 else
1692 {
1693 insn = VEC_last (btrace_insn_s, caller->insn);
1694 pc = insn->pc;
1695
1696 pc += gdb_insn_length (gdbarch, pc);
1697 }
1698
1699 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1700 btrace_get_bfun_name (bfun), bfun->level,
1701 core_addr_to_string_nz (pc));
1702
1703 return frame_unwind_got_address (this_frame, regnum, pc);
1704 }
1705
1706 /* Implement sniffer method for record_btrace_frame_unwind. */
1707
1708 static int
1709 record_btrace_frame_sniffer (const struct frame_unwind *self,
1710 struct frame_info *this_frame,
1711 void **this_cache)
1712 {
1713 const struct btrace_function *bfun;
1714 struct btrace_frame_cache *cache;
1715 struct thread_info *tp;
1716 struct frame_info *next;
1717
1718 /* THIS_FRAME does not contain a reference to its thread. */
1719 tp = find_thread_ptid (inferior_ptid);
1720 gdb_assert (tp != NULL);
1721
1722 bfun = NULL;
1723 next = get_next_frame (this_frame);
1724 if (next == NULL)
1725 {
1726 const struct btrace_insn_iterator *replay;
1727
1728 replay = tp->btrace.replay;
1729 if (replay != NULL)
1730 bfun = replay->function;
1731 }
1732 else
1733 {
1734 const struct btrace_function *callee;
1735
1736 callee = btrace_get_frame_function (next);
1737 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1738 bfun = callee->up;
1739 }
1740
1741 if (bfun == NULL)
1742 return 0;
1743
1744 DEBUG ("[frame] sniffed frame for %s on level %d",
1745 btrace_get_bfun_name (bfun), bfun->level);
1746
1747 /* This is our frame. Initialize the frame cache. */
1748 cache = bfcache_new (this_frame);
1749 cache->tp = tp;
1750 cache->bfun = bfun;
1751
1752 *this_cache = cache;
1753 return 1;
1754 }
1755
1756 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1757
1758 static int
1759 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1760 struct frame_info *this_frame,
1761 void **this_cache)
1762 {
1763 const struct btrace_function *bfun, *callee;
1764 struct btrace_frame_cache *cache;
1765 struct frame_info *next;
1766
1767 next = get_next_frame (this_frame);
1768 if (next == NULL)
1769 return 0;
1770
1771 callee = btrace_get_frame_function (next);
1772 if (callee == NULL)
1773 return 0;
1774
1775 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1776 return 0;
1777
1778 bfun = callee->up;
1779 if (bfun == NULL)
1780 return 0;
1781
1782 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1783 btrace_get_bfun_name (bfun), bfun->level);
1784
1785 /* This is our frame. Initialize the frame cache. */
1786 cache = bfcache_new (this_frame);
1787 cache->tp = find_thread_ptid (inferior_ptid);
1788 cache->bfun = bfun;
1789
1790 *this_cache = cache;
1791 return 1;
1792 }
1793
1794 static void
1795 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1796 {
1797 struct btrace_frame_cache *cache;
1798 void **slot;
1799
1800 cache = (struct btrace_frame_cache *) this_cache;
1801
1802 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1803 gdb_assert (slot != NULL);
1804
1805 htab_remove_elt (bfcache, cache);
1806 }
1807
1808 /* btrace recording does not store previous memory content, neither the stack
1809 frames content. Any unwinding would return errorneous results as the stack
1810 contents no longer matches the changed PC value restored from history.
1811 Therefore this unwinder reports any possibly unwound registers as
1812 <unavailable>. */
1813
1814 const struct frame_unwind record_btrace_frame_unwind =
1815 {
1816 NORMAL_FRAME,
1817 record_btrace_frame_unwind_stop_reason,
1818 record_btrace_frame_this_id,
1819 record_btrace_frame_prev_register,
1820 NULL,
1821 record_btrace_frame_sniffer,
1822 record_btrace_frame_dealloc_cache
1823 };
1824
1825 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1826 {
1827 TAILCALL_FRAME,
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1831 NULL,
1832 record_btrace_tailcall_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1834 };
1835
1836 /* Implement the to_get_unwinder method. */
1837
1838 static const struct frame_unwind *
1839 record_btrace_to_get_unwinder (struct target_ops *self)
1840 {
1841 return &record_btrace_frame_unwind;
1842 }
1843
1844 /* Implement the to_get_tailcall_unwinder method. */
1845
1846 static const struct frame_unwind *
1847 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1848 {
1849 return &record_btrace_tailcall_frame_unwind;
1850 }
1851
1852 /* Return a human-readable string for FLAG. */
1853
1854 static const char *
1855 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1856 {
1857 switch (flag)
1858 {
1859 case BTHR_STEP:
1860 return "step";
1861
1862 case BTHR_RSTEP:
1863 return "reverse-step";
1864
1865 case BTHR_CONT:
1866 return "cont";
1867
1868 case BTHR_RCONT:
1869 return "reverse-cont";
1870
1871 case BTHR_STOP:
1872 return "stop";
1873 }
1874
1875 return "<invalid>";
1876 }
1877
1878 /* Indicate that TP should be resumed according to FLAG. */
1879
1880 static void
1881 record_btrace_resume_thread (struct thread_info *tp,
1882 enum btrace_thread_flag flag)
1883 {
1884 struct btrace_thread_info *btinfo;
1885
1886 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1887 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1888
1889 btinfo = &tp->btrace;
1890
1891 /* Fetch the latest branch trace. */
1892 btrace_fetch (tp);
1893
1894 /* A resume request overwrites a preceding resume or stop request. */
1895 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1896 btinfo->flags |= flag;
1897 }
1898
1899 /* Get the current frame for TP. */
1900
1901 static struct frame_info *
1902 get_thread_current_frame (struct thread_info *tp)
1903 {
1904 struct frame_info *frame;
1905 ptid_t old_inferior_ptid;
1906 int executing;
1907
1908 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1909 old_inferior_ptid = inferior_ptid;
1910 inferior_ptid = tp->ptid;
1911
1912 /* Clear the executing flag to allow changes to the current frame.
1913 We are not actually running, yet. We just started a reverse execution
1914 command or a record goto command.
1915 For the latter, EXECUTING is false and this has no effect.
1916 For the former, EXECUTING is true and we're in to_wait, about to
1917 move the thread. Since we need to recompute the stack, we temporarily
1918 set EXECUTING to flase. */
1919 executing = is_executing (inferior_ptid);
1920 set_executing (inferior_ptid, 0);
1921
1922 frame = NULL;
1923 TRY
1924 {
1925 frame = get_current_frame ();
1926 }
1927 CATCH (except, RETURN_MASK_ALL)
1928 {
1929 /* Restore the previous execution state. */
1930 set_executing (inferior_ptid, executing);
1931
1932 /* Restore the previous inferior_ptid. */
1933 inferior_ptid = old_inferior_ptid;
1934
1935 throw_exception (except);
1936 }
1937 END_CATCH
1938
1939 /* Restore the previous execution state. */
1940 set_executing (inferior_ptid, executing);
1941
1942 /* Restore the previous inferior_ptid. */
1943 inferior_ptid = old_inferior_ptid;
1944
1945 return frame;
1946 }
1947
1948 /* Start replaying a thread. */
1949
1950 static struct btrace_insn_iterator *
1951 record_btrace_start_replaying (struct thread_info *tp)
1952 {
1953 struct btrace_insn_iterator *replay;
1954 struct btrace_thread_info *btinfo;
1955
1956 btinfo = &tp->btrace;
1957 replay = NULL;
1958
1959 /* We can't start replaying without trace. */
1960 if (btinfo->begin == NULL)
1961 return NULL;
1962
1963 /* GDB stores the current frame_id when stepping in order to detects steps
1964 into subroutines.
1965 Since frames are computed differently when we're replaying, we need to
1966 recompute those stored frames and fix them up so we can still detect
1967 subroutines after we started replaying. */
1968 TRY
1969 {
1970 struct frame_info *frame;
1971 struct frame_id frame_id;
1972 int upd_step_frame_id, upd_step_stack_frame_id;
1973
1974 /* The current frame without replaying - computed via normal unwind. */
1975 frame = get_thread_current_frame (tp);
1976 frame_id = get_frame_id (frame);
1977
1978 /* Check if we need to update any stepping-related frame id's. */
1979 upd_step_frame_id = frame_id_eq (frame_id,
1980 tp->control.step_frame_id);
1981 upd_step_stack_frame_id = frame_id_eq (frame_id,
1982 tp->control.step_stack_frame_id);
1983
1984 /* We start replaying at the end of the branch trace. This corresponds
1985 to the current instruction. */
1986 replay = XNEW (struct btrace_insn_iterator);
1987 btrace_insn_end (replay, btinfo);
1988
1989 /* Skip gaps at the end of the trace. */
1990 while (btrace_insn_get (replay) == NULL)
1991 {
1992 unsigned int steps;
1993
1994 steps = btrace_insn_prev (replay, 1);
1995 if (steps == 0)
1996 error (_("No trace."));
1997 }
1998
1999 /* We're not replaying, yet. */
2000 gdb_assert (btinfo->replay == NULL);
2001 btinfo->replay = replay;
2002
2003 /* Make sure we're not using any stale registers. */
2004 registers_changed_ptid (tp->ptid);
2005
2006 /* The current frame with replaying - computed via btrace unwind. */
2007 frame = get_thread_current_frame (tp);
2008 frame_id = get_frame_id (frame);
2009
2010 /* Replace stepping related frames where necessary. */
2011 if (upd_step_frame_id)
2012 tp->control.step_frame_id = frame_id;
2013 if (upd_step_stack_frame_id)
2014 tp->control.step_stack_frame_id = frame_id;
2015 }
2016 CATCH (except, RETURN_MASK_ALL)
2017 {
2018 xfree (btinfo->replay);
2019 btinfo->replay = NULL;
2020
2021 registers_changed_ptid (tp->ptid);
2022
2023 throw_exception (except);
2024 }
2025 END_CATCH
2026
2027 return replay;
2028 }
2029
2030 /* Stop replaying a thread. */
2031
2032 static void
2033 record_btrace_stop_replaying (struct thread_info *tp)
2034 {
2035 struct btrace_thread_info *btinfo;
2036
2037 btinfo = &tp->btrace;
2038
2039 xfree (btinfo->replay);
2040 btinfo->replay = NULL;
2041
2042 /* Make sure we're not leaving any stale registers. */
2043 registers_changed_ptid (tp->ptid);
2044 }
2045
2046 /* Stop replaying TP if it is at the end of its execution history. */
2047
2048 static void
2049 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2050 {
2051 struct btrace_insn_iterator *replay, end;
2052 struct btrace_thread_info *btinfo;
2053
2054 btinfo = &tp->btrace;
2055 replay = btinfo->replay;
2056
2057 if (replay == NULL)
2058 return;
2059
2060 btrace_insn_end (&end, btinfo);
2061
2062 if (btrace_insn_cmp (replay, &end) == 0)
2063 record_btrace_stop_replaying (tp);
2064 }
2065
2066 /* The to_resume method of target record-btrace. */
2067
2068 static void
2069 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2070 enum gdb_signal signal)
2071 {
2072 struct thread_info *tp;
2073 enum btrace_thread_flag flag, cflag;
2074
2075 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2076 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2077 step ? "step" : "cont");
2078
2079 /* Store the execution direction of the last resume.
2080
2081 If there is more than one to_resume call, we have to rely on infrun
2082 to not change the execution direction in-between. */
2083 record_btrace_resume_exec_dir = execution_direction;
2084
2085 /* As long as we're not replaying, just forward the request.
2086
2087 For non-stop targets this means that no thread is replaying. In order to
2088 make progress, we may need to explicitly move replaying threads to the end
2089 of their execution history. */
2090 if ((execution_direction != EXEC_REVERSE)
2091 && !record_btrace_is_replaying (ops, minus_one_ptid))
2092 {
2093 ops = ops->beneath;
2094 ops->to_resume (ops, ptid, step, signal);
2095 return;
2096 }
2097
2098 /* Compute the btrace thread flag for the requested move. */
2099 if (execution_direction == EXEC_REVERSE)
2100 {
2101 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2102 cflag = BTHR_RCONT;
2103 }
2104 else
2105 {
2106 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2107 cflag = BTHR_CONT;
2108 }
2109
2110 /* We just indicate the resume intent here. The actual stepping happens in
2111 record_btrace_wait below.
2112
2113 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2114 if (!target_is_non_stop_p ())
2115 {
2116 gdb_assert (ptid_match (inferior_ptid, ptid));
2117
2118 ALL_NON_EXITED_THREADS (tp)
2119 if (ptid_match (tp->ptid, ptid))
2120 {
2121 if (ptid_match (tp->ptid, inferior_ptid))
2122 record_btrace_resume_thread (tp, flag);
2123 else
2124 record_btrace_resume_thread (tp, cflag);
2125 }
2126 }
2127 else
2128 {
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2131 record_btrace_resume_thread (tp, flag);
2132 }
2133
2134 /* Async support. */
2135 if (target_can_async_p ())
2136 {
2137 target_async (1);
2138 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2139 }
2140 }
2141
2142 /* The to_commit_resume method of target record-btrace. */
2143
2144 static void
2145 record_btrace_commit_resume (struct target_ops *ops)
2146 {
2147 if ((execution_direction != EXEC_REVERSE)
2148 && !record_btrace_is_replaying (ops, minus_one_ptid))
2149 ops->beneath->to_commit_resume (ops->beneath);
2150 }
2151
2152 /* Cancel resuming TP. */
2153
2154 static void
2155 record_btrace_cancel_resume (struct thread_info *tp)
2156 {
2157 enum btrace_thread_flag flags;
2158
2159 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2160 if (flags == 0)
2161 return;
2162
2163 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2164 print_thread_id (tp),
2165 target_pid_to_str (tp->ptid), flags,
2166 btrace_thread_flag_to_str (flags));
2167
2168 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2169 record_btrace_stop_replaying_at_end (tp);
2170 }
2171
2172 /* Return a target_waitstatus indicating that we ran out of history. */
2173
2174 static struct target_waitstatus
2175 btrace_step_no_history (void)
2176 {
2177 struct target_waitstatus status;
2178
2179 status.kind = TARGET_WAITKIND_NO_HISTORY;
2180
2181 return status;
2182 }
2183
2184 /* Return a target_waitstatus indicating that a step finished. */
2185
2186 static struct target_waitstatus
2187 btrace_step_stopped (void)
2188 {
2189 struct target_waitstatus status;
2190
2191 status.kind = TARGET_WAITKIND_STOPPED;
2192 status.value.sig = GDB_SIGNAL_TRAP;
2193
2194 return status;
2195 }
2196
2197 /* Return a target_waitstatus indicating that a thread was stopped as
2198 requested. */
2199
2200 static struct target_waitstatus
2201 btrace_step_stopped_on_request (void)
2202 {
2203 struct target_waitstatus status;
2204
2205 status.kind = TARGET_WAITKIND_STOPPED;
2206 status.value.sig = GDB_SIGNAL_0;
2207
2208 return status;
2209 }
2210
2211 /* Return a target_waitstatus indicating a spurious stop. */
2212
2213 static struct target_waitstatus
2214 btrace_step_spurious (void)
2215 {
2216 struct target_waitstatus status;
2217
2218 status.kind = TARGET_WAITKIND_SPURIOUS;
2219
2220 return status;
2221 }
2222
2223 /* Return a target_waitstatus indicating that the thread was not resumed. */
2224
2225 static struct target_waitstatus
2226 btrace_step_no_resumed (void)
2227 {
2228 struct target_waitstatus status;
2229
2230 status.kind = TARGET_WAITKIND_NO_RESUMED;
2231
2232 return status;
2233 }
2234
2235 /* Return a target_waitstatus indicating that we should wait again. */
2236
2237 static struct target_waitstatus
2238 btrace_step_again (void)
2239 {
2240 struct target_waitstatus status;
2241
2242 status.kind = TARGET_WAITKIND_IGNORE;
2243
2244 return status;
2245 }
2246
2247 /* Clear the record histories. */
2248
2249 static void
2250 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2251 {
2252 xfree (btinfo->insn_history);
2253 xfree (btinfo->call_history);
2254
2255 btinfo->insn_history = NULL;
2256 btinfo->call_history = NULL;
2257 }
2258
2259 /* Check whether TP's current replay position is at a breakpoint. */
2260
2261 static int
2262 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2263 {
2264 struct btrace_insn_iterator *replay;
2265 struct btrace_thread_info *btinfo;
2266 const struct btrace_insn *insn;
2267 struct inferior *inf;
2268
2269 btinfo = &tp->btrace;
2270 replay = btinfo->replay;
2271
2272 if (replay == NULL)
2273 return 0;
2274
2275 insn = btrace_insn_get (replay);
2276 if (insn == NULL)
2277 return 0;
2278
2279 inf = find_inferior_ptid (tp->ptid);
2280 if (inf == NULL)
2281 return 0;
2282
2283 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2284 &btinfo->stop_reason);
2285 }
2286
2287 /* Step one instruction in forward direction. */
2288
2289 static struct target_waitstatus
2290 record_btrace_single_step_forward (struct thread_info *tp)
2291 {
2292 struct btrace_insn_iterator *replay, end, start;
2293 struct btrace_thread_info *btinfo;
2294
2295 btinfo = &tp->btrace;
2296 replay = btinfo->replay;
2297
2298 /* We're done if we're not replaying. */
2299 if (replay == NULL)
2300 return btrace_step_no_history ();
2301
2302 /* Check if we're stepping a breakpoint. */
2303 if (record_btrace_replay_at_breakpoint (tp))
2304 return btrace_step_stopped ();
2305
2306 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2307 jump back to the instruction at which we started. */
2308 start = *replay;
2309 do
2310 {
2311 unsigned int steps;
2312
2313 /* We will bail out here if we continue stepping after reaching the end
2314 of the execution history. */
2315 steps = btrace_insn_next (replay, 1);
2316 if (steps == 0)
2317 {
2318 *replay = start;
2319 return btrace_step_no_history ();
2320 }
2321 }
2322 while (btrace_insn_get (replay) == NULL);
2323
2324 /* Determine the end of the instruction trace. */
2325 btrace_insn_end (&end, btinfo);
2326
2327 /* The execution trace contains (and ends with) the current instruction.
2328 This instruction has not been executed, yet, so the trace really ends
2329 one instruction earlier. */
2330 if (btrace_insn_cmp (replay, &end) == 0)
2331 return btrace_step_no_history ();
2332
2333 return btrace_step_spurious ();
2334 }
2335
2336 /* Step one instruction in backward direction. */
2337
2338 static struct target_waitstatus
2339 record_btrace_single_step_backward (struct thread_info *tp)
2340 {
2341 struct btrace_insn_iterator *replay, start;
2342 struct btrace_thread_info *btinfo;
2343
2344 btinfo = &tp->btrace;
2345 replay = btinfo->replay;
2346
2347 /* Start replaying if we're not already doing so. */
2348 if (replay == NULL)
2349 replay = record_btrace_start_replaying (tp);
2350
2351 /* If we can't step any further, we reached the end of the history.
2352 Skip gaps during replay. If we end up at a gap (at the beginning of
2353 the trace), jump back to the instruction at which we started. */
2354 start = *replay;
2355 do
2356 {
2357 unsigned int steps;
2358
2359 steps = btrace_insn_prev (replay, 1);
2360 if (steps == 0)
2361 {
2362 *replay = start;
2363 return btrace_step_no_history ();
2364 }
2365 }
2366 while (btrace_insn_get (replay) == NULL);
2367
2368 /* Check if we're stepping a breakpoint.
2369
2370 For reverse-stepping, this check is after the step. There is logic in
2371 infrun.c that handles reverse-stepping separately. See, for example,
2372 proceed and adjust_pc_after_break.
2373
2374 This code assumes that for reverse-stepping, PC points to the last
2375 de-executed instruction, whereas for forward-stepping PC points to the
2376 next to-be-executed instruction. */
2377 if (record_btrace_replay_at_breakpoint (tp))
2378 return btrace_step_stopped ();
2379
2380 return btrace_step_spurious ();
2381 }
2382
2383 /* Step a single thread. */
2384
2385 static struct target_waitstatus
2386 record_btrace_step_thread (struct thread_info *tp)
2387 {
2388 struct btrace_thread_info *btinfo;
2389 struct target_waitstatus status;
2390 enum btrace_thread_flag flags;
2391
2392 btinfo = &tp->btrace;
2393
2394 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2395 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2396
2397 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2398 target_pid_to_str (tp->ptid), flags,
2399 btrace_thread_flag_to_str (flags));
2400
2401 /* We can't step without an execution history. */
2402 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2403 return btrace_step_no_history ();
2404
2405 switch (flags)
2406 {
2407 default:
2408 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2409
2410 case BTHR_STOP:
2411 return btrace_step_stopped_on_request ();
2412
2413 case BTHR_STEP:
2414 status = record_btrace_single_step_forward (tp);
2415 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2416 break;
2417
2418 return btrace_step_stopped ();
2419
2420 case BTHR_RSTEP:
2421 status = record_btrace_single_step_backward (tp);
2422 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2423 break;
2424
2425 return btrace_step_stopped ();
2426
2427 case BTHR_CONT:
2428 status = record_btrace_single_step_forward (tp);
2429 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2430 break;
2431
2432 btinfo->flags |= flags;
2433 return btrace_step_again ();
2434
2435 case BTHR_RCONT:
2436 status = record_btrace_single_step_backward (tp);
2437 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2438 break;
2439
2440 btinfo->flags |= flags;
2441 return btrace_step_again ();
2442 }
2443
2444 /* We keep threads moving at the end of their execution history. The to_wait
2445 method will stop the thread for whom the event is reported. */
2446 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2447 btinfo->flags |= flags;
2448
2449 return status;
2450 }
2451
2452 /* A vector of threads. */
2453
2454 typedef struct thread_info * tp_t;
2455 DEF_VEC_P (tp_t);
2456
2457 /* Announce further events if necessary. */
2458
2459 static void
2460 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2461 const VEC (tp_t) *no_history)
2462 {
2463 int more_moving, more_no_history;
2464
2465 more_moving = !VEC_empty (tp_t, moving);
2466 more_no_history = !VEC_empty (tp_t, no_history);
2467
2468 if (!more_moving && !more_no_history)
2469 return;
2470
2471 if (more_moving)
2472 DEBUG ("movers pending");
2473
2474 if (more_no_history)
2475 DEBUG ("no-history pending");
2476
2477 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2478 }
2479
2480 /* The to_wait method of target record-btrace. */
2481
2482 static ptid_t
2483 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2484 struct target_waitstatus *status, int options)
2485 {
2486 VEC (tp_t) *moving, *no_history;
2487 struct thread_info *tp, *eventing;
2488 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2489
2490 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2491
2492 /* As long as we're not replaying, just forward the request. */
2493 if ((execution_direction != EXEC_REVERSE)
2494 && !record_btrace_is_replaying (ops, minus_one_ptid))
2495 {
2496 ops = ops->beneath;
2497 return ops->to_wait (ops, ptid, status, options);
2498 }
2499
2500 moving = NULL;
2501 no_history = NULL;
2502
2503 make_cleanup (VEC_cleanup (tp_t), &moving);
2504 make_cleanup (VEC_cleanup (tp_t), &no_history);
2505
2506 /* Keep a work list of moving threads. */
2507 ALL_NON_EXITED_THREADS (tp)
2508 if (ptid_match (tp->ptid, ptid)
2509 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2510 VEC_safe_push (tp_t, moving, tp);
2511
2512 if (VEC_empty (tp_t, moving))
2513 {
2514 *status = btrace_step_no_resumed ();
2515
2516 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2517 target_waitstatus_to_string (status));
2518
2519 do_cleanups (cleanups);
2520 return null_ptid;
2521 }
2522
2523 /* Step moving threads one by one, one step each, until either one thread
2524 reports an event or we run out of threads to step.
2525
2526 When stepping more than one thread, chances are that some threads reach
2527 the end of their execution history earlier than others. If we reported
2528 this immediately, all-stop on top of non-stop would stop all threads and
2529 resume the same threads next time. And we would report the same thread
2530 having reached the end of its execution history again.
2531
2532 In the worst case, this would starve the other threads. But even if other
2533 threads would be allowed to make progress, this would result in far too
2534 many intermediate stops.
2535
2536 We therefore delay the reporting of "no execution history" until we have
2537 nothing else to report. By this time, all threads should have moved to
2538 either the beginning or the end of their execution history. There will
2539 be a single user-visible stop. */
2540 eventing = NULL;
2541 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2542 {
2543 unsigned int ix;
2544
2545 ix = 0;
2546 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2547 {
2548 *status = record_btrace_step_thread (tp);
2549
2550 switch (status->kind)
2551 {
2552 case TARGET_WAITKIND_IGNORE:
2553 ix++;
2554 break;
2555
2556 case TARGET_WAITKIND_NO_HISTORY:
2557 VEC_safe_push (tp_t, no_history,
2558 VEC_ordered_remove (tp_t, moving, ix));
2559 break;
2560
2561 default:
2562 eventing = VEC_unordered_remove (tp_t, moving, ix);
2563 break;
2564 }
2565 }
2566 }
2567
2568 if (eventing == NULL)
2569 {
2570 /* We started with at least one moving thread. This thread must have
2571 either stopped or reached the end of its execution history.
2572
2573 In the former case, EVENTING must not be NULL.
2574 In the latter case, NO_HISTORY must not be empty. */
2575 gdb_assert (!VEC_empty (tp_t, no_history));
2576
2577 /* We kept threads moving at the end of their execution history. Stop
2578 EVENTING now that we are going to report its stop. */
2579 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2580 eventing->btrace.flags &= ~BTHR_MOVE;
2581
2582 *status = btrace_step_no_history ();
2583 }
2584
2585 gdb_assert (eventing != NULL);
2586
2587 /* We kept threads replaying at the end of their execution history. Stop
2588 replaying EVENTING now that we are going to report its stop. */
2589 record_btrace_stop_replaying_at_end (eventing);
2590
2591 /* Stop all other threads. */
2592 if (!target_is_non_stop_p ())
2593 ALL_NON_EXITED_THREADS (tp)
2594 record_btrace_cancel_resume (tp);
2595
2596 /* In async mode, we need to announce further events. */
2597 if (target_is_async_p ())
2598 record_btrace_maybe_mark_async_event (moving, no_history);
2599
2600 /* Start record histories anew from the current position. */
2601 record_btrace_clear_histories (&eventing->btrace);
2602
2603 /* We moved the replay position but did not update registers. */
2604 registers_changed_ptid (eventing->ptid);
2605
2606 DEBUG ("wait ended by thread %s (%s): %s",
2607 print_thread_id (eventing),
2608 target_pid_to_str (eventing->ptid),
2609 target_waitstatus_to_string (status));
2610
2611 do_cleanups (cleanups);
2612 return eventing->ptid;
2613 }
2614
2615 /* The to_stop method of target record-btrace. */
2616
2617 static void
2618 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2619 {
2620 DEBUG ("stop %s", target_pid_to_str (ptid));
2621
2622 /* As long as we're not replaying, just forward the request. */
2623 if ((execution_direction != EXEC_REVERSE)
2624 && !record_btrace_is_replaying (ops, minus_one_ptid))
2625 {
2626 ops = ops->beneath;
2627 ops->to_stop (ops, ptid);
2628 }
2629 else
2630 {
2631 struct thread_info *tp;
2632
2633 ALL_NON_EXITED_THREADS (tp)
2634 if (ptid_match (tp->ptid, ptid))
2635 {
2636 tp->btrace.flags &= ~BTHR_MOVE;
2637 tp->btrace.flags |= BTHR_STOP;
2638 }
2639 }
2640 }
2641
2642 /* The to_can_execute_reverse method of target record-btrace. */
2643
2644 static int
2645 record_btrace_can_execute_reverse (struct target_ops *self)
2646 {
2647 return 1;
2648 }
2649
2650 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2651
2652 static int
2653 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2654 {
2655 if (record_btrace_is_replaying (ops, minus_one_ptid))
2656 {
2657 struct thread_info *tp = inferior_thread ();
2658
2659 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2660 }
2661
2662 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2663 }
2664
2665 /* The to_supports_stopped_by_sw_breakpoint method of target
2666 record-btrace. */
2667
2668 static int
2669 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2670 {
2671 if (record_btrace_is_replaying (ops, minus_one_ptid))
2672 return 1;
2673
2674 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2675 }
2676
2677 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2678
2679 static int
2680 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2681 {
2682 if (record_btrace_is_replaying (ops, minus_one_ptid))
2683 {
2684 struct thread_info *tp = inferior_thread ();
2685
2686 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2687 }
2688
2689 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2690 }
2691
2692 /* The to_supports_stopped_by_hw_breakpoint method of target
2693 record-btrace. */
2694
2695 static int
2696 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2697 {
2698 if (record_btrace_is_replaying (ops, minus_one_ptid))
2699 return 1;
2700
2701 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2702 }
2703
2704 /* The to_update_thread_list method of target record-btrace. */
2705
2706 static void
2707 record_btrace_update_thread_list (struct target_ops *ops)
2708 {
2709 /* We don't add or remove threads during replay. */
2710 if (record_btrace_is_replaying (ops, minus_one_ptid))
2711 return;
2712
2713 /* Forward the request. */
2714 ops = ops->beneath;
2715 ops->to_update_thread_list (ops);
2716 }
2717
2718 /* The to_thread_alive method of target record-btrace. */
2719
2720 static int
2721 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2722 {
2723 /* We don't add or remove threads during replay. */
2724 if (record_btrace_is_replaying (ops, minus_one_ptid))
2725 return find_thread_ptid (ptid) != NULL;
2726
2727 /* Forward the request. */
2728 ops = ops->beneath;
2729 return ops->to_thread_alive (ops, ptid);
2730 }
2731
2732 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2733 is stopped. */
2734
2735 static void
2736 record_btrace_set_replay (struct thread_info *tp,
2737 const struct btrace_insn_iterator *it)
2738 {
2739 struct btrace_thread_info *btinfo;
2740
2741 btinfo = &tp->btrace;
2742
2743 if (it == NULL || it->function == NULL)
2744 record_btrace_stop_replaying (tp);
2745 else
2746 {
2747 if (btinfo->replay == NULL)
2748 record_btrace_start_replaying (tp);
2749 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2750 return;
2751
2752 *btinfo->replay = *it;
2753 registers_changed_ptid (tp->ptid);
2754 }
2755
2756 /* Start anew from the new replay position. */
2757 record_btrace_clear_histories (btinfo);
2758
2759 stop_pc = regcache_read_pc (get_current_regcache ());
2760 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2761 }
2762
2763 /* The to_goto_record_begin method of target record-btrace. */
2764
2765 static void
2766 record_btrace_goto_begin (struct target_ops *self)
2767 {
2768 struct thread_info *tp;
2769 struct btrace_insn_iterator begin;
2770
2771 tp = require_btrace_thread ();
2772
2773 btrace_insn_begin (&begin, &tp->btrace);
2774
2775 /* Skip gaps at the beginning of the trace. */
2776 while (btrace_insn_get (&begin) == NULL)
2777 {
2778 unsigned int steps;
2779
2780 steps = btrace_insn_next (&begin, 1);
2781 if (steps == 0)
2782 error (_("No trace."));
2783 }
2784
2785 record_btrace_set_replay (tp, &begin);
2786 }
2787
2788 /* The to_goto_record_end method of target record-btrace. */
2789
2790 static void
2791 record_btrace_goto_end (struct target_ops *ops)
2792 {
2793 struct thread_info *tp;
2794
2795 tp = require_btrace_thread ();
2796
2797 record_btrace_set_replay (tp, NULL);
2798 }
2799
2800 /* The to_goto_record method of target record-btrace. */
2801
2802 static void
2803 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2804 {
2805 struct thread_info *tp;
2806 struct btrace_insn_iterator it;
2807 unsigned int number;
2808 int found;
2809
2810 number = insn;
2811
2812 /* Check for wrap-arounds. */
2813 if (number != insn)
2814 error (_("Instruction number out of range."));
2815
2816 tp = require_btrace_thread ();
2817
2818 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2819
2820 /* Check if the instruction could not be found or is a gap. */
2821 if (found == 0 || btrace_insn_get (&it) == NULL)
2822 error (_("No such instruction."));
2823
2824 record_btrace_set_replay (tp, &it);
2825 }
2826
2827 /* The to_record_stop_replaying method of target record-btrace. */
2828
2829 static void
2830 record_btrace_stop_replaying_all (struct target_ops *self)
2831 {
2832 struct thread_info *tp;
2833
2834 ALL_NON_EXITED_THREADS (tp)
2835 record_btrace_stop_replaying (tp);
2836 }
2837
2838 /* The to_execution_direction target method. */
2839
2840 static enum exec_direction_kind
2841 record_btrace_execution_direction (struct target_ops *self)
2842 {
2843 return record_btrace_resume_exec_dir;
2844 }
2845
2846 /* The to_prepare_to_generate_core target method. */
2847
2848 static void
2849 record_btrace_prepare_to_generate_core (struct target_ops *self)
2850 {
2851 record_btrace_generating_corefile = 1;
2852 }
2853
2854 /* The to_done_generating_core target method. */
2855
2856 static void
2857 record_btrace_done_generating_core (struct target_ops *self)
2858 {
2859 record_btrace_generating_corefile = 0;
2860 }
2861
2862 /* Initialize the record-btrace target ops. */
2863
2864 static void
2865 init_record_btrace_ops (void)
2866 {
2867 struct target_ops *ops;
2868
2869 ops = &record_btrace_ops;
2870 ops->to_shortname = "record-btrace";
2871 ops->to_longname = "Branch tracing target";
2872 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2873 ops->to_open = record_btrace_open;
2874 ops->to_close = record_btrace_close;
2875 ops->to_async = record_btrace_async;
2876 ops->to_detach = record_detach;
2877 ops->to_disconnect = record_btrace_disconnect;
2878 ops->to_mourn_inferior = record_mourn_inferior;
2879 ops->to_kill = record_kill;
2880 ops->to_stop_recording = record_btrace_stop_recording;
2881 ops->to_info_record = record_btrace_info;
2882 ops->to_insn_history = record_btrace_insn_history;
2883 ops->to_insn_history_from = record_btrace_insn_history_from;
2884 ops->to_insn_history_range = record_btrace_insn_history_range;
2885 ops->to_call_history = record_btrace_call_history;
2886 ops->to_call_history_from = record_btrace_call_history_from;
2887 ops->to_call_history_range = record_btrace_call_history_range;
2888 ops->to_record_is_replaying = record_btrace_is_replaying;
2889 ops->to_record_will_replay = record_btrace_will_replay;
2890 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2891 ops->to_xfer_partial = record_btrace_xfer_partial;
2892 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2893 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2894 ops->to_fetch_registers = record_btrace_fetch_registers;
2895 ops->to_store_registers = record_btrace_store_registers;
2896 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2897 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2898 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2899 ops->to_resume = record_btrace_resume;
2900 ops->to_commit_resume = record_btrace_commit_resume;
2901 ops->to_wait = record_btrace_wait;
2902 ops->to_stop = record_btrace_stop;
2903 ops->to_update_thread_list = record_btrace_update_thread_list;
2904 ops->to_thread_alive = record_btrace_thread_alive;
2905 ops->to_goto_record_begin = record_btrace_goto_begin;
2906 ops->to_goto_record_end = record_btrace_goto_end;
2907 ops->to_goto_record = record_btrace_goto;
2908 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2909 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2910 ops->to_supports_stopped_by_sw_breakpoint
2911 = record_btrace_supports_stopped_by_sw_breakpoint;
2912 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2913 ops->to_supports_stopped_by_hw_breakpoint
2914 = record_btrace_supports_stopped_by_hw_breakpoint;
2915 ops->to_execution_direction = record_btrace_execution_direction;
2916 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2917 ops->to_done_generating_core = record_btrace_done_generating_core;
2918 ops->to_stratum = record_stratum;
2919 ops->to_magic = OPS_MAGIC;
2920 }
2921
2922 /* Start recording in BTS format. */
2923
2924 static void
2925 cmd_record_btrace_bts_start (char *args, int from_tty)
2926 {
2927 if (args != NULL && *args != 0)
2928 error (_("Invalid argument."));
2929
2930 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2931
2932 TRY
2933 {
2934 execute_command ("target record-btrace", from_tty);
2935 }
2936 CATCH (exception, RETURN_MASK_ALL)
2937 {
2938 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2939 throw_exception (exception);
2940 }
2941 END_CATCH
2942 }
2943
2944 /* Start recording in Intel Processor Trace format. */
2945
2946 static void
2947 cmd_record_btrace_pt_start (char *args, int from_tty)
2948 {
2949 if (args != NULL && *args != 0)
2950 error (_("Invalid argument."));
2951
2952 record_btrace_conf.format = BTRACE_FORMAT_PT;
2953
2954 TRY
2955 {
2956 execute_command ("target record-btrace", from_tty);
2957 }
2958 CATCH (exception, RETURN_MASK_ALL)
2959 {
2960 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2961 throw_exception (exception);
2962 }
2963 END_CATCH
2964 }
2965
2966 /* Alias for "target record". */
2967
2968 static void
2969 cmd_record_btrace_start (char *args, int from_tty)
2970 {
2971 if (args != NULL && *args != 0)
2972 error (_("Invalid argument."));
2973
2974 record_btrace_conf.format = BTRACE_FORMAT_PT;
2975
2976 TRY
2977 {
2978 execute_command ("target record-btrace", from_tty);
2979 }
2980 CATCH (exception, RETURN_MASK_ALL)
2981 {
2982 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2983
2984 TRY
2985 {
2986 execute_command ("target record-btrace", from_tty);
2987 }
2988 CATCH (exception, RETURN_MASK_ALL)
2989 {
2990 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2991 throw_exception (exception);
2992 }
2993 END_CATCH
2994 }
2995 END_CATCH
2996 }
2997
2998 /* The "set record btrace" command. */
2999
3000 static void
3001 cmd_set_record_btrace (char *args, int from_tty)
3002 {
3003 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3004 }
3005
3006 /* The "show record btrace" command. */
3007
3008 static void
3009 cmd_show_record_btrace (char *args, int from_tty)
3010 {
3011 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3012 }
3013
3014 /* The "show record btrace replay-memory-access" command. */
3015
3016 static void
3017 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3018 struct cmd_list_element *c, const char *value)
3019 {
3020 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3021 replay_memory_access);
3022 }
3023
3024 /* The "set record btrace bts" command. */
3025
3026 static void
3027 cmd_set_record_btrace_bts (char *args, int from_tty)
3028 {
3029 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3030 "by an appropriate subcommand.\n"));
3031 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3032 all_commands, gdb_stdout);
3033 }
3034
3035 /* The "show record btrace bts" command. */
3036
3037 static void
3038 cmd_show_record_btrace_bts (char *args, int from_tty)
3039 {
3040 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3041 }
3042
3043 /* The "set record btrace pt" command. */
3044
3045 static void
3046 cmd_set_record_btrace_pt (char *args, int from_tty)
3047 {
3048 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3049 "by an appropriate subcommand.\n"));
3050 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3051 all_commands, gdb_stdout);
3052 }
3053
3054 /* The "show record btrace pt" command. */
3055
3056 static void
3057 cmd_show_record_btrace_pt (char *args, int from_tty)
3058 {
3059 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3060 }
3061
3062 /* The "record bts buffer-size" show value function. */
3063
3064 static void
3065 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3066 struct cmd_list_element *c,
3067 const char *value)
3068 {
3069 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3070 value);
3071 }
3072
3073 /* The "record pt buffer-size" show value function. */
3074
3075 static void
3076 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3077 struct cmd_list_element *c,
3078 const char *value)
3079 {
3080 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3081 value);
3082 }
3083
3084 void _initialize_record_btrace (void);
3085
3086 /* Initialize btrace commands. */
3087
3088 void
3089 _initialize_record_btrace (void)
3090 {
3091 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3092 _("Start branch trace recording."), &record_btrace_cmdlist,
3093 "record btrace ", 0, &record_cmdlist);
3094 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3095
3096 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3097 _("\
3098 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3099 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3100 This format may not be available on all processors."),
3101 &record_btrace_cmdlist);
3102 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3103
3104 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3105 _("\
3106 Start branch trace recording in Intel Processor Trace format.\n\n\
3107 This format may not be available on all processors."),
3108 &record_btrace_cmdlist);
3109 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3110
3111 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3112 _("Set record options"), &set_record_btrace_cmdlist,
3113 "set record btrace ", 0, &set_record_cmdlist);
3114
3115 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3116 _("Show record options"), &show_record_btrace_cmdlist,
3117 "show record btrace ", 0, &show_record_cmdlist);
3118
3119 add_setshow_enum_cmd ("replay-memory-access", no_class,
3120 replay_memory_access_types, &replay_memory_access, _("\
3121 Set what memory accesses are allowed during replay."), _("\
3122 Show what memory accesses are allowed during replay."),
3123 _("Default is READ-ONLY.\n\n\
3124 The btrace record target does not trace data.\n\
3125 The memory therefore corresponds to the live target and not \
3126 to the current replay position.\n\n\
3127 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3128 When READ-WRITE, allow accesses to read-only and read-write memory during \
3129 replay."),
3130 NULL, cmd_show_replay_memory_access,
3131 &set_record_btrace_cmdlist,
3132 &show_record_btrace_cmdlist);
3133
3134 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3135 _("Set record btrace bts options"),
3136 &set_record_btrace_bts_cmdlist,
3137 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3138
3139 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3140 _("Show record btrace bts options"),
3141 &show_record_btrace_bts_cmdlist,
3142 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3143
3144 add_setshow_uinteger_cmd ("buffer-size", no_class,
3145 &record_btrace_conf.bts.size,
3146 _("Set the record/replay bts buffer size."),
3147 _("Show the record/replay bts buffer size."), _("\
3148 When starting recording request a trace buffer of this size. \
3149 The actual buffer size may differ from the requested size. \
3150 Use \"info record\" to see the actual buffer size.\n\n\
3151 Bigger buffers allow longer recording but also take more time to process \
3152 the recorded execution trace.\n\n\
3153 The trace buffer size may not be changed while recording."), NULL,
3154 show_record_bts_buffer_size_value,
3155 &set_record_btrace_bts_cmdlist,
3156 &show_record_btrace_bts_cmdlist);
3157
3158 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3159 _("Set record btrace pt options"),
3160 &set_record_btrace_pt_cmdlist,
3161 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3162
3163 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3164 _("Show record btrace pt options"),
3165 &show_record_btrace_pt_cmdlist,
3166 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3167
3168 add_setshow_uinteger_cmd ("buffer-size", no_class,
3169 &record_btrace_conf.pt.size,
3170 _("Set the record/replay pt buffer size."),
3171 _("Show the record/replay pt buffer size."), _("\
3172 Bigger buffers allow longer recording but also take more time to process \
3173 the recorded execution.\n\
3174 The actual buffer size may differ from the requested size. Use \"info record\" \
3175 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3176 &set_record_btrace_pt_cmdlist,
3177 &show_record_btrace_pt_cmdlist);
3178
3179 init_record_btrace_ops ();
3180 add_target (&record_btrace_ops);
3181
3182 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3183 xcalloc, xfree);
3184
3185 record_btrace_conf.bts.size = 64 * 1024;
3186 record_btrace_conf.pt.size = 16 * 1024;
3187 }
This page took 0.096483 seconds and 4 git commands to generate.