Formatting fixes in rust-exp.y
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Enable automatic tracing of new threads. */
163
164 static void
165 record_btrace_auto_enable (void)
166 {
167 DEBUG ("attach thread observer");
168
169 record_btrace_thread_observer
170 = observer_attach_new_thread (record_btrace_enable_warn);
171 }
172
173 /* Disable automatic tracing of new threads. */
174
175 static void
176 record_btrace_auto_disable (void)
177 {
178 /* The observer may have been detached, already. */
179 if (record_btrace_thread_observer == NULL)
180 return;
181
182 DEBUG ("detach thread observer");
183
184 observer_detach_new_thread (record_btrace_thread_observer);
185 record_btrace_thread_observer = NULL;
186 }
187
188 /* The record-btrace async event handler function. */
189
190 static void
191 record_btrace_handle_async_inferior_event (gdb_client_data data)
192 {
193 inferior_event_handler (INF_REG_EVENT, NULL);
194 }
195
196 /* See record-btrace.h. */
197
198 void
199 record_btrace_push_target (void)
200 {
201 const char *format;
202
203 record_btrace_auto_enable ();
204
205 push_target (&record_btrace_ops);
206
207 record_btrace_async_inferior_event_handler
208 = create_async_event_handler (record_btrace_handle_async_inferior_event,
209 NULL);
210 record_btrace_generating_corefile = 0;
211
212 format = btrace_format_short_string (record_btrace_conf.format);
213 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
214 }
215
216 /* Disable btrace on a set of threads on scope exit. */
217
218 struct scoped_btrace_disable
219 {
220 scoped_btrace_disable () = default;
221
222 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
223
224 ~scoped_btrace_disable ()
225 {
226 for (thread_info *tp : m_threads)
227 btrace_disable (tp);
228 }
229
230 void add_thread (thread_info *thread)
231 {
232 m_threads.push_front (thread);
233 }
234
235 void discard ()
236 {
237 m_threads.clear ();
238 }
239
240 private:
241 std::forward_list<thread_info *> m_threads;
242 };
243
244 /* The to_open method of target record-btrace. */
245
246 static void
247 record_btrace_open (const char *args, int from_tty)
248 {
249 /* If we fail to enable btrace for one thread, disable it for the threads for
250 which it was successfully enabled. */
251 scoped_btrace_disable btrace_disable;
252 struct thread_info *tp;
253
254 DEBUG ("open");
255
256 record_preopen ();
257
258 if (!target_has_execution)
259 error (_("The program is not being run."));
260
261 gdb_assert (record_btrace_thread_observer == NULL);
262
263 ALL_NON_EXITED_THREADS (tp)
264 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
265 {
266 btrace_enable (tp, &record_btrace_conf);
267
268 btrace_disable.add_thread (tp);
269 }
270
271 record_btrace_push_target ();
272
273 btrace_disable.discard ();
274 }
275
276 /* The to_stop_recording method of target record-btrace. */
277
278 static void
279 record_btrace_stop_recording (struct target_ops *self)
280 {
281 struct thread_info *tp;
282
283 DEBUG ("stop recording");
284
285 record_btrace_auto_disable ();
286
287 ALL_NON_EXITED_THREADS (tp)
288 if (tp->btrace.target != NULL)
289 btrace_disable (tp);
290 }
291
292 /* The to_disconnect method of target record-btrace. */
293
294 static void
295 record_btrace_disconnect (struct target_ops *self, const char *args,
296 int from_tty)
297 {
298 struct target_ops *beneath = self->beneath;
299
300 /* Do not stop recording, just clean up GDB side. */
301 unpush_target (self);
302
303 /* Forward disconnect. */
304 beneath->to_disconnect (beneath, args, from_tty);
305 }
306
307 /* The to_close method of target record-btrace. */
308
309 static void
310 record_btrace_close (struct target_ops *self)
311 {
312 struct thread_info *tp;
313
314 if (record_btrace_async_inferior_event_handler != NULL)
315 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
316
317 /* Make sure automatic recording gets disabled even if we did not stop
318 recording before closing the record-btrace target. */
319 record_btrace_auto_disable ();
320
321 /* We should have already stopped recording.
322 Tear down btrace in case we have not. */
323 ALL_NON_EXITED_THREADS (tp)
324 btrace_teardown (tp);
325 }
326
327 /* The to_async method of target record-btrace. */
328
329 static void
330 record_btrace_async (struct target_ops *ops, int enable)
331 {
332 if (enable)
333 mark_async_event_handler (record_btrace_async_inferior_event_handler);
334 else
335 clear_async_event_handler (record_btrace_async_inferior_event_handler);
336
337 ops->beneath->to_async (ops->beneath, enable);
338 }
339
340 /* Adjusts the size and returns a human readable size suffix. */
341
342 static const char *
343 record_btrace_adjust_size (unsigned int *size)
344 {
345 unsigned int sz;
346
347 sz = *size;
348
349 if ((sz & ((1u << 30) - 1)) == 0)
350 {
351 *size = sz >> 30;
352 return "GB";
353 }
354 else if ((sz & ((1u << 20) - 1)) == 0)
355 {
356 *size = sz >> 20;
357 return "MB";
358 }
359 else if ((sz & ((1u << 10) - 1)) == 0)
360 {
361 *size = sz >> 10;
362 return "kB";
363 }
364 else
365 return "";
366 }
367
368 /* Print a BTS configuration. */
369
370 static void
371 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
372 {
373 const char *suffix;
374 unsigned int size;
375
376 size = conf->size;
377 if (size > 0)
378 {
379 suffix = record_btrace_adjust_size (&size);
380 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 }
382 }
383
384 /* Print an Intel Processor Trace configuration. */
385
386 static void
387 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
388 {
389 const char *suffix;
390 unsigned int size;
391
392 size = conf->size;
393 if (size > 0)
394 {
395 suffix = record_btrace_adjust_size (&size);
396 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
397 }
398 }
399
400 /* Print a branch tracing configuration. */
401
402 static void
403 record_btrace_print_conf (const struct btrace_config *conf)
404 {
405 printf_unfiltered (_("Recording format: %s.\n"),
406 btrace_format_string (conf->format));
407
408 switch (conf->format)
409 {
410 case BTRACE_FORMAT_NONE:
411 return;
412
413 case BTRACE_FORMAT_BTS:
414 record_btrace_print_bts_conf (&conf->bts);
415 return;
416
417 case BTRACE_FORMAT_PT:
418 record_btrace_print_pt_conf (&conf->pt);
419 return;
420 }
421
422 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
423 }
424
425 /* The to_info_record method of target record-btrace. */
426
427 static void
428 record_btrace_info (struct target_ops *self)
429 {
430 struct btrace_thread_info *btinfo;
431 const struct btrace_config *conf;
432 struct thread_info *tp;
433 unsigned int insns, calls, gaps;
434
435 DEBUG ("info");
436
437 tp = find_thread_ptid (inferior_ptid);
438 if (tp == NULL)
439 error (_("No thread."));
440
441 validate_registers_access ();
442
443 btinfo = &tp->btrace;
444
445 conf = btrace_conf (btinfo);
446 if (conf != NULL)
447 record_btrace_print_conf (conf);
448
449 btrace_fetch (tp);
450
451 insns = 0;
452 calls = 0;
453 gaps = 0;
454
455 if (!btrace_is_empty (tp))
456 {
457 struct btrace_call_iterator call;
458 struct btrace_insn_iterator insn;
459
460 btrace_call_end (&call, btinfo);
461 btrace_call_prev (&call, 1);
462 calls = btrace_call_number (&call);
463
464 btrace_insn_end (&insn, btinfo);
465 insns = btrace_insn_number (&insn);
466
467 /* If the last instruction is not a gap, it is the current instruction
468 that is not actually part of the record. */
469 if (btrace_insn_get (&insn) != NULL)
470 insns -= 1;
471
472 gaps = btinfo->ngaps;
473 }
474
475 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
476 "for thread %s (%s).\n"), insns, calls, gaps,
477 print_thread_id (tp), target_pid_to_str (tp->ptid));
478
479 if (btrace_is_replaying (tp))
480 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
481 btrace_insn_number (btinfo->replay));
482 }
483
484 /* Print a decode error. */
485
486 static void
487 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
488 enum btrace_format format)
489 {
490 const char *errstr = btrace_decode_error (format, errcode);
491
492 uiout->text (_("["));
493 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
494 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
495 {
496 uiout->text (_("decode error ("));
497 uiout->field_int ("errcode", errcode);
498 uiout->text (_("): "));
499 }
500 uiout->text (errstr);
501 uiout->text (_("]\n"));
502 }
503
504 /* Print an unsigned int. */
505
506 static void
507 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
508 {
509 uiout->field_fmt (fld, "%u", val);
510 }
511
512 /* A range of source lines. */
513
514 struct btrace_line_range
515 {
516 /* The symtab this line is from. */
517 struct symtab *symtab;
518
519 /* The first line (inclusive). */
520 int begin;
521
522 /* The last line (exclusive). */
523 int end;
524 };
525
526 /* Construct a line range. */
527
528 static struct btrace_line_range
529 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
530 {
531 struct btrace_line_range range;
532
533 range.symtab = symtab;
534 range.begin = begin;
535 range.end = end;
536
537 return range;
538 }
539
540 /* Add a line to a line range. */
541
542 static struct btrace_line_range
543 btrace_line_range_add (struct btrace_line_range range, int line)
544 {
545 if (range.end <= range.begin)
546 {
547 /* This is the first entry. */
548 range.begin = line;
549 range.end = line + 1;
550 }
551 else if (line < range.begin)
552 range.begin = line;
553 else if (range.end < line)
554 range.end = line;
555
556 return range;
557 }
558
559 /* Return non-zero if RANGE is empty, zero otherwise. */
560
561 static int
562 btrace_line_range_is_empty (struct btrace_line_range range)
563 {
564 return range.end <= range.begin;
565 }
566
567 /* Return non-zero if LHS contains RHS, zero otherwise. */
568
569 static int
570 btrace_line_range_contains_range (struct btrace_line_range lhs,
571 struct btrace_line_range rhs)
572 {
573 return ((lhs.symtab == rhs.symtab)
574 && (lhs.begin <= rhs.begin)
575 && (rhs.end <= lhs.end));
576 }
577
578 /* Find the line range associated with PC. */
579
580 static struct btrace_line_range
581 btrace_find_line_range (CORE_ADDR pc)
582 {
583 struct btrace_line_range range;
584 struct linetable_entry *lines;
585 struct linetable *ltable;
586 struct symtab *symtab;
587 int nlines, i;
588
589 symtab = find_pc_line_symtab (pc);
590 if (symtab == NULL)
591 return btrace_mk_line_range (NULL, 0, 0);
592
593 ltable = SYMTAB_LINETABLE (symtab);
594 if (ltable == NULL)
595 return btrace_mk_line_range (symtab, 0, 0);
596
597 nlines = ltable->nitems;
598 lines = ltable->item;
599 if (nlines <= 0)
600 return btrace_mk_line_range (symtab, 0, 0);
601
602 range = btrace_mk_line_range (symtab, 0, 0);
603 for (i = 0; i < nlines - 1; i++)
604 {
605 if ((lines[i].pc == pc) && (lines[i].line != 0))
606 range = btrace_line_range_add (range, lines[i].line);
607 }
608
609 return range;
610 }
611
612 /* Print source lines in LINES to UIOUT.
613
614 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
615 instructions corresponding to that source line. When printing a new source
616 line, we do the cleanups for the open chain and open a new cleanup chain for
617 the new source line. If the source line range in LINES is not empty, this
618 function will leave the cleanup chain for the last printed source line open
619 so instructions can be added to it. */
620
621 static void
622 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
623 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
624 gdb::optional<ui_out_emit_list> *asm_list,
625 gdb_disassembly_flags flags)
626 {
627 print_source_lines_flags psl_flags;
628
629 if (flags & DISASSEMBLY_FILENAME)
630 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
631
632 for (int line = lines.begin; line < lines.end; ++line)
633 {
634 asm_list->reset ();
635
636 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
637
638 print_source_lines (lines.symtab, line, line + 1, psl_flags);
639
640 asm_list->emplace (uiout, "line_asm_insn");
641 }
642 }
643
644 /* Disassemble a section of the recorded instruction trace. */
645
646 static void
647 btrace_insn_history (struct ui_out *uiout,
648 const struct btrace_thread_info *btinfo,
649 const struct btrace_insn_iterator *begin,
650 const struct btrace_insn_iterator *end,
651 gdb_disassembly_flags flags)
652 {
653 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
654 btrace_insn_number (begin), btrace_insn_number (end));
655
656 flags |= DISASSEMBLY_SPECULATIVE;
657
658 struct gdbarch *gdbarch = target_gdbarch ();
659 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
660
661 ui_out_emit_list list_emitter (uiout, "asm_insns");
662
663 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
664 gdb::optional<ui_out_emit_list> asm_list;
665
666 gdb_pretty_print_disassembler disasm (gdbarch);
667
668 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
669 btrace_insn_next (&it, 1))
670 {
671 const struct btrace_insn *insn;
672
673 insn = btrace_insn_get (&it);
674
675 /* A NULL instruction indicates a gap in the trace. */
676 if (insn == NULL)
677 {
678 const struct btrace_config *conf;
679
680 conf = btrace_conf (btinfo);
681
682 /* We have trace so we must have a configuration. */
683 gdb_assert (conf != NULL);
684
685 uiout->field_fmt ("insn-number", "%u",
686 btrace_insn_number (&it));
687 uiout->text ("\t");
688
689 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
690 conf->format);
691 }
692 else
693 {
694 struct disasm_insn dinsn;
695
696 if ((flags & DISASSEMBLY_SOURCE) != 0)
697 {
698 struct btrace_line_range lines;
699
700 lines = btrace_find_line_range (insn->pc);
701 if (!btrace_line_range_is_empty (lines)
702 && !btrace_line_range_contains_range (last_lines, lines))
703 {
704 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
705 flags);
706 last_lines = lines;
707 }
708 else if (!src_and_asm_tuple.has_value ())
709 {
710 gdb_assert (!asm_list.has_value ());
711
712 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
713
714 /* No source information. */
715 asm_list.emplace (uiout, "line_asm_insn");
716 }
717
718 gdb_assert (src_and_asm_tuple.has_value ());
719 gdb_assert (asm_list.has_value ());
720 }
721
722 memset (&dinsn, 0, sizeof (dinsn));
723 dinsn.number = btrace_insn_number (&it);
724 dinsn.addr = insn->pc;
725
726 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
727 dinsn.is_speculative = 1;
728
729 disasm.pretty_print_insn (uiout, &dinsn, flags);
730 }
731 }
732 }
733
734 /* The to_insn_history method of target record-btrace. */
735
736 static void
737 record_btrace_insn_history (struct target_ops *self, int size,
738 gdb_disassembly_flags flags)
739 {
740 struct btrace_thread_info *btinfo;
741 struct btrace_insn_history *history;
742 struct btrace_insn_iterator begin, end;
743 struct ui_out *uiout;
744 unsigned int context, covered;
745
746 uiout = current_uiout;
747 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
748 context = abs (size);
749 if (context == 0)
750 error (_("Bad record instruction-history-size."));
751
752 btinfo = require_btrace ();
753 history = btinfo->insn_history;
754 if (history == NULL)
755 {
756 struct btrace_insn_iterator *replay;
757
758 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
759
760 /* If we're replaying, we start at the replay position. Otherwise, we
761 start at the tail of the trace. */
762 replay = btinfo->replay;
763 if (replay != NULL)
764 begin = *replay;
765 else
766 btrace_insn_end (&begin, btinfo);
767
768 /* We start from here and expand in the requested direction. Then we
769 expand in the other direction, as well, to fill up any remaining
770 context. */
771 end = begin;
772 if (size < 0)
773 {
774 /* We want the current position covered, as well. */
775 covered = btrace_insn_next (&end, 1);
776 covered += btrace_insn_prev (&begin, context - covered);
777 covered += btrace_insn_next (&end, context - covered);
778 }
779 else
780 {
781 covered = btrace_insn_next (&end, context);
782 covered += btrace_insn_prev (&begin, context - covered);
783 }
784 }
785 else
786 {
787 begin = history->begin;
788 end = history->end;
789
790 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
791 btrace_insn_number (&begin), btrace_insn_number (&end));
792
793 if (size < 0)
794 {
795 end = begin;
796 covered = btrace_insn_prev (&begin, context);
797 }
798 else
799 {
800 begin = end;
801 covered = btrace_insn_next (&end, context);
802 }
803 }
804
805 if (covered > 0)
806 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
807 else
808 {
809 if (size < 0)
810 printf_unfiltered (_("At the start of the branch trace record.\n"));
811 else
812 printf_unfiltered (_("At the end of the branch trace record.\n"));
813 }
814
815 btrace_set_insn_history (btinfo, &begin, &end);
816 }
817
818 /* The to_insn_history_range method of target record-btrace. */
819
820 static void
821 record_btrace_insn_history_range (struct target_ops *self,
822 ULONGEST from, ULONGEST to,
823 gdb_disassembly_flags flags)
824 {
825 struct btrace_thread_info *btinfo;
826 struct btrace_insn_iterator begin, end;
827 struct ui_out *uiout;
828 unsigned int low, high;
829 int found;
830
831 uiout = current_uiout;
832 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
833 low = from;
834 high = to;
835
836 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
837
838 /* Check for wrap-arounds. */
839 if (low != from || high != to)
840 error (_("Bad range."));
841
842 if (high < low)
843 error (_("Bad range."));
844
845 btinfo = require_btrace ();
846
847 found = btrace_find_insn_by_number (&begin, btinfo, low);
848 if (found == 0)
849 error (_("Range out of bounds."));
850
851 found = btrace_find_insn_by_number (&end, btinfo, high);
852 if (found == 0)
853 {
854 /* Silently truncate the range. */
855 btrace_insn_end (&end, btinfo);
856 }
857 else
858 {
859 /* We want both begin and end to be inclusive. */
860 btrace_insn_next (&end, 1);
861 }
862
863 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
864 btrace_set_insn_history (btinfo, &begin, &end);
865 }
866
867 /* The to_insn_history_from method of target record-btrace. */
868
869 static void
870 record_btrace_insn_history_from (struct target_ops *self,
871 ULONGEST from, int size,
872 gdb_disassembly_flags flags)
873 {
874 ULONGEST begin, end, context;
875
876 context = abs (size);
877 if (context == 0)
878 error (_("Bad record instruction-history-size."));
879
880 if (size < 0)
881 {
882 end = from;
883
884 if (from < context)
885 begin = 0;
886 else
887 begin = from - context + 1;
888 }
889 else
890 {
891 begin = from;
892 end = from + context - 1;
893
894 /* Check for wrap-around. */
895 if (end < begin)
896 end = ULONGEST_MAX;
897 }
898
899 record_btrace_insn_history_range (self, begin, end, flags);
900 }
901
902 /* Print the instruction number range for a function call history line. */
903
904 static void
905 btrace_call_history_insn_range (struct ui_out *uiout,
906 const struct btrace_function *bfun)
907 {
908 unsigned int begin, end, size;
909
910 size = bfun->insn.size ();
911 gdb_assert (size > 0);
912
913 begin = bfun->insn_offset;
914 end = begin + size - 1;
915
916 ui_out_field_uint (uiout, "insn begin", begin);
917 uiout->text (",");
918 ui_out_field_uint (uiout, "insn end", end);
919 }
920
921 /* Compute the lowest and highest source line for the instructions in BFUN
922 and return them in PBEGIN and PEND.
923 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
924 result from inlining or macro expansion. */
925
926 static void
927 btrace_compute_src_line_range (const struct btrace_function *bfun,
928 int *pbegin, int *pend)
929 {
930 struct symtab *symtab;
931 struct symbol *sym;
932 int begin, end;
933
934 begin = INT_MAX;
935 end = INT_MIN;
936
937 sym = bfun->sym;
938 if (sym == NULL)
939 goto out;
940
941 symtab = symbol_symtab (sym);
942
943 for (const btrace_insn &insn : bfun->insn)
944 {
945 struct symtab_and_line sal;
946
947 sal = find_pc_line (insn.pc, 0);
948 if (sal.symtab != symtab || sal.line == 0)
949 continue;
950
951 begin = std::min (begin, sal.line);
952 end = std::max (end, sal.line);
953 }
954
955 out:
956 *pbegin = begin;
957 *pend = end;
958 }
959
960 /* Print the source line information for a function call history line. */
961
962 static void
963 btrace_call_history_src_line (struct ui_out *uiout,
964 const struct btrace_function *bfun)
965 {
966 struct symbol *sym;
967 int begin, end;
968
969 sym = bfun->sym;
970 if (sym == NULL)
971 return;
972
973 uiout->field_string ("file",
974 symtab_to_filename_for_display (symbol_symtab (sym)));
975
976 btrace_compute_src_line_range (bfun, &begin, &end);
977 if (end < begin)
978 return;
979
980 uiout->text (":");
981 uiout->field_int ("min line", begin);
982
983 if (end == begin)
984 return;
985
986 uiout->text (",");
987 uiout->field_int ("max line", end);
988 }
989
990 /* Get the name of a branch trace function. */
991
992 static const char *
993 btrace_get_bfun_name (const struct btrace_function *bfun)
994 {
995 struct minimal_symbol *msym;
996 struct symbol *sym;
997
998 if (bfun == NULL)
999 return "??";
1000
1001 msym = bfun->msym;
1002 sym = bfun->sym;
1003
1004 if (sym != NULL)
1005 return SYMBOL_PRINT_NAME (sym);
1006 else if (msym != NULL)
1007 return MSYMBOL_PRINT_NAME (msym);
1008 else
1009 return "??";
1010 }
1011
1012 /* Disassemble a section of the recorded function trace. */
1013
1014 static void
1015 btrace_call_history (struct ui_out *uiout,
1016 const struct btrace_thread_info *btinfo,
1017 const struct btrace_call_iterator *begin,
1018 const struct btrace_call_iterator *end,
1019 int int_flags)
1020 {
1021 struct btrace_call_iterator it;
1022 record_print_flags flags = (enum record_print_flag) int_flags;
1023
1024 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1025 btrace_call_number (end));
1026
1027 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1028 {
1029 const struct btrace_function *bfun;
1030 struct minimal_symbol *msym;
1031 struct symbol *sym;
1032
1033 bfun = btrace_call_get (&it);
1034 sym = bfun->sym;
1035 msym = bfun->msym;
1036
1037 /* Print the function index. */
1038 ui_out_field_uint (uiout, "index", bfun->number);
1039 uiout->text ("\t");
1040
1041 /* Indicate gaps in the trace. */
1042 if (bfun->errcode != 0)
1043 {
1044 const struct btrace_config *conf;
1045
1046 conf = btrace_conf (btinfo);
1047
1048 /* We have trace so we must have a configuration. */
1049 gdb_assert (conf != NULL);
1050
1051 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1052
1053 continue;
1054 }
1055
1056 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1057 {
1058 int level = bfun->level + btinfo->level, i;
1059
1060 for (i = 0; i < level; ++i)
1061 uiout->text (" ");
1062 }
1063
1064 if (sym != NULL)
1065 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1066 else if (msym != NULL)
1067 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1068 else if (!uiout->is_mi_like_p ())
1069 uiout->field_string ("function", "??");
1070
1071 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1072 {
1073 uiout->text (_("\tinst "));
1074 btrace_call_history_insn_range (uiout, bfun);
1075 }
1076
1077 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1078 {
1079 uiout->text (_("\tat "));
1080 btrace_call_history_src_line (uiout, bfun);
1081 }
1082
1083 uiout->text ("\n");
1084 }
1085 }
1086
1087 /* The to_call_history method of target record-btrace. */
1088
1089 static void
1090 record_btrace_call_history (struct target_ops *self, int size,
1091 record_print_flags flags)
1092 {
1093 struct btrace_thread_info *btinfo;
1094 struct btrace_call_history *history;
1095 struct btrace_call_iterator begin, end;
1096 struct ui_out *uiout;
1097 unsigned int context, covered;
1098
1099 uiout = current_uiout;
1100 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1101 context = abs (size);
1102 if (context == 0)
1103 error (_("Bad record function-call-history-size."));
1104
1105 btinfo = require_btrace ();
1106 history = btinfo->call_history;
1107 if (history == NULL)
1108 {
1109 struct btrace_insn_iterator *replay;
1110
1111 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1112
1113 /* If we're replaying, we start at the replay position. Otherwise, we
1114 start at the tail of the trace. */
1115 replay = btinfo->replay;
1116 if (replay != NULL)
1117 {
1118 begin.btinfo = btinfo;
1119 begin.index = replay->call_index;
1120 }
1121 else
1122 btrace_call_end (&begin, btinfo);
1123
1124 /* We start from here and expand in the requested direction. Then we
1125 expand in the other direction, as well, to fill up any remaining
1126 context. */
1127 end = begin;
1128 if (size < 0)
1129 {
1130 /* We want the current position covered, as well. */
1131 covered = btrace_call_next (&end, 1);
1132 covered += btrace_call_prev (&begin, context - covered);
1133 covered += btrace_call_next (&end, context - covered);
1134 }
1135 else
1136 {
1137 covered = btrace_call_next (&end, context);
1138 covered += btrace_call_prev (&begin, context- covered);
1139 }
1140 }
1141 else
1142 {
1143 begin = history->begin;
1144 end = history->end;
1145
1146 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1147 btrace_call_number (&begin), btrace_call_number (&end));
1148
1149 if (size < 0)
1150 {
1151 end = begin;
1152 covered = btrace_call_prev (&begin, context);
1153 }
1154 else
1155 {
1156 begin = end;
1157 covered = btrace_call_next (&end, context);
1158 }
1159 }
1160
1161 if (covered > 0)
1162 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1163 else
1164 {
1165 if (size < 0)
1166 printf_unfiltered (_("At the start of the branch trace record.\n"));
1167 else
1168 printf_unfiltered (_("At the end of the branch trace record.\n"));
1169 }
1170
1171 btrace_set_call_history (btinfo, &begin, &end);
1172 }
1173
1174 /* The to_call_history_range method of target record-btrace. */
1175
1176 static void
1177 record_btrace_call_history_range (struct target_ops *self,
1178 ULONGEST from, ULONGEST to,
1179 record_print_flags flags)
1180 {
1181 struct btrace_thread_info *btinfo;
1182 struct btrace_call_iterator begin, end;
1183 struct ui_out *uiout;
1184 unsigned int low, high;
1185 int found;
1186
1187 uiout = current_uiout;
1188 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1189 low = from;
1190 high = to;
1191
1192 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1193
1194 /* Check for wrap-arounds. */
1195 if (low != from || high != to)
1196 error (_("Bad range."));
1197
1198 if (high < low)
1199 error (_("Bad range."));
1200
1201 btinfo = require_btrace ();
1202
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1204 if (found == 0)
1205 error (_("Range out of bounds."));
1206
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1208 if (found == 0)
1209 {
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1212 }
1213 else
1214 {
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1217 }
1218
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 btrace_set_call_history (btinfo, &begin, &end);
1221 }
1222
1223 /* The to_call_history_from method of target record-btrace. */
1224
1225 static void
1226 record_btrace_call_history_from (struct target_ops *self,
1227 ULONGEST from, int size,
1228 record_print_flags flags)
1229 {
1230 ULONGEST begin, end, context;
1231
1232 context = abs (size);
1233 if (context == 0)
1234 error (_("Bad record function-call-history-size."));
1235
1236 if (size < 0)
1237 {
1238 end = from;
1239
1240 if (from < context)
1241 begin = 0;
1242 else
1243 begin = from - context + 1;
1244 }
1245 else
1246 {
1247 begin = from;
1248 end = from + context - 1;
1249
1250 /* Check for wrap-around. */
1251 if (end < begin)
1252 end = ULONGEST_MAX;
1253 }
1254
1255 record_btrace_call_history_range (self, begin, end, flags);
1256 }
1257
1258 /* The to_record_method method of target record-btrace. */
1259
1260 static enum record_method
1261 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1262 {
1263 struct thread_info * const tp = find_thread_ptid (ptid);
1264
1265 if (tp == NULL)
1266 error (_("No thread."));
1267
1268 if (tp->btrace.target == NULL)
1269 return RECORD_METHOD_NONE;
1270
1271 return RECORD_METHOD_BTRACE;
1272 }
1273
1274 /* The to_record_is_replaying method of target record-btrace. */
1275
1276 static int
1277 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1278 {
1279 struct thread_info *tp;
1280
1281 ALL_NON_EXITED_THREADS (tp)
1282 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1283 return 1;
1284
1285 return 0;
1286 }
1287
1288 /* The to_record_will_replay method of target record-btrace. */
1289
1290 static int
1291 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1292 {
1293 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1294 }
1295
1296 /* The to_xfer_partial method of target record-btrace. */
1297
1298 static enum target_xfer_status
1299 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1300 const char *annex, gdb_byte *readbuf,
1301 const gdb_byte *writebuf, ULONGEST offset,
1302 ULONGEST len, ULONGEST *xfered_len)
1303 {
1304 /* Filter out requests that don't make sense during replay. */
1305 if (replay_memory_access == replay_memory_access_read_only
1306 && !record_btrace_generating_corefile
1307 && record_btrace_is_replaying (ops, inferior_ptid))
1308 {
1309 switch (object)
1310 {
1311 case TARGET_OBJECT_MEMORY:
1312 {
1313 struct target_section *section;
1314
1315 /* We do not allow writing memory in general. */
1316 if (writebuf != NULL)
1317 {
1318 *xfered_len = len;
1319 return TARGET_XFER_UNAVAILABLE;
1320 }
1321
1322 /* We allow reading readonly memory. */
1323 section = target_section_by_addr (ops, offset);
1324 if (section != NULL)
1325 {
1326 /* Check if the section we found is readonly. */
1327 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1328 section->the_bfd_section)
1329 & SEC_READONLY) != 0)
1330 {
1331 /* Truncate the request to fit into this section. */
1332 len = std::min (len, section->endaddr - offset);
1333 break;
1334 }
1335 }
1336
1337 *xfered_len = len;
1338 return TARGET_XFER_UNAVAILABLE;
1339 }
1340 }
1341 }
1342
1343 /* Forward the request. */
1344 ops = ops->beneath;
1345 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1346 offset, len, xfered_len);
1347 }
1348
1349 /* The to_insert_breakpoint method of target record-btrace. */
1350
1351 static int
1352 record_btrace_insert_breakpoint (struct target_ops *ops,
1353 struct gdbarch *gdbarch,
1354 struct bp_target_info *bp_tgt)
1355 {
1356 const char *old;
1357 int ret;
1358
1359 /* Inserting breakpoints requires accessing memory. Allow it for the
1360 duration of this function. */
1361 old = replay_memory_access;
1362 replay_memory_access = replay_memory_access_read_write;
1363
1364 ret = 0;
1365 TRY
1366 {
1367 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1368 }
1369 CATCH (except, RETURN_MASK_ALL)
1370 {
1371 replay_memory_access = old;
1372 throw_exception (except);
1373 }
1374 END_CATCH
1375 replay_memory_access = old;
1376
1377 return ret;
1378 }
1379
1380 /* The to_remove_breakpoint method of target record-btrace. */
1381
1382 static int
1383 record_btrace_remove_breakpoint (struct target_ops *ops,
1384 struct gdbarch *gdbarch,
1385 struct bp_target_info *bp_tgt,
1386 enum remove_bp_reason reason)
1387 {
1388 const char *old;
1389 int ret;
1390
1391 /* Removing breakpoints requires accessing memory. Allow it for the
1392 duration of this function. */
1393 old = replay_memory_access;
1394 replay_memory_access = replay_memory_access_read_write;
1395
1396 ret = 0;
1397 TRY
1398 {
1399 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1400 reason);
1401 }
1402 CATCH (except, RETURN_MASK_ALL)
1403 {
1404 replay_memory_access = old;
1405 throw_exception (except);
1406 }
1407 END_CATCH
1408 replay_memory_access = old;
1409
1410 return ret;
1411 }
1412
1413 /* The to_fetch_registers method of target record-btrace. */
1414
1415 static void
1416 record_btrace_fetch_registers (struct target_ops *ops,
1417 struct regcache *regcache, int regno)
1418 {
1419 struct btrace_insn_iterator *replay;
1420 struct thread_info *tp;
1421
1422 tp = find_thread_ptid (regcache_get_ptid (regcache));
1423 gdb_assert (tp != NULL);
1424
1425 replay = tp->btrace.replay;
1426 if (replay != NULL && !record_btrace_generating_corefile)
1427 {
1428 const struct btrace_insn *insn;
1429 struct gdbarch *gdbarch;
1430 int pcreg;
1431
1432 gdbarch = regcache->arch ();
1433 pcreg = gdbarch_pc_regnum (gdbarch);
1434 if (pcreg < 0)
1435 return;
1436
1437 /* We can only provide the PC register. */
1438 if (regno >= 0 && regno != pcreg)
1439 return;
1440
1441 insn = btrace_insn_get (replay);
1442 gdb_assert (insn != NULL);
1443
1444 regcache_raw_supply (regcache, regno, &insn->pc);
1445 }
1446 else
1447 {
1448 struct target_ops *t = ops->beneath;
1449
1450 t->to_fetch_registers (t, regcache, regno);
1451 }
1452 }
1453
1454 /* The to_store_registers method of target record-btrace. */
1455
1456 static void
1457 record_btrace_store_registers (struct target_ops *ops,
1458 struct regcache *regcache, int regno)
1459 {
1460 struct target_ops *t;
1461
1462 if (!record_btrace_generating_corefile
1463 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1464 error (_("Cannot write registers while replaying."));
1465
1466 gdb_assert (may_write_registers != 0);
1467
1468 t = ops->beneath;
1469 t->to_store_registers (t, regcache, regno);
1470 }
1471
1472 /* The to_prepare_to_store method of target record-btrace. */
1473
1474 static void
1475 record_btrace_prepare_to_store (struct target_ops *ops,
1476 struct regcache *regcache)
1477 {
1478 struct target_ops *t;
1479
1480 if (!record_btrace_generating_corefile
1481 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1482 return;
1483
1484 t = ops->beneath;
1485 t->to_prepare_to_store (t, regcache);
1486 }
1487
1488 /* The branch trace frame cache. */
1489
1490 struct btrace_frame_cache
1491 {
1492 /* The thread. */
1493 struct thread_info *tp;
1494
1495 /* The frame info. */
1496 struct frame_info *frame;
1497
1498 /* The branch trace function segment. */
1499 const struct btrace_function *bfun;
1500 };
1501
1502 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1503
1504 static htab_t bfcache;
1505
1506 /* hash_f for htab_create_alloc of bfcache. */
1507
1508 static hashval_t
1509 bfcache_hash (const void *arg)
1510 {
1511 const struct btrace_frame_cache *cache
1512 = (const struct btrace_frame_cache *) arg;
1513
1514 return htab_hash_pointer (cache->frame);
1515 }
1516
1517 /* eq_f for htab_create_alloc of bfcache. */
1518
1519 static int
1520 bfcache_eq (const void *arg1, const void *arg2)
1521 {
1522 const struct btrace_frame_cache *cache1
1523 = (const struct btrace_frame_cache *) arg1;
1524 const struct btrace_frame_cache *cache2
1525 = (const struct btrace_frame_cache *) arg2;
1526
1527 return cache1->frame == cache2->frame;
1528 }
1529
1530 /* Create a new btrace frame cache. */
1531
1532 static struct btrace_frame_cache *
1533 bfcache_new (struct frame_info *frame)
1534 {
1535 struct btrace_frame_cache *cache;
1536 void **slot;
1537
1538 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1539 cache->frame = frame;
1540
1541 slot = htab_find_slot (bfcache, cache, INSERT);
1542 gdb_assert (*slot == NULL);
1543 *slot = cache;
1544
1545 return cache;
1546 }
1547
1548 /* Extract the branch trace function from a branch trace frame. */
1549
1550 static const struct btrace_function *
1551 btrace_get_frame_function (struct frame_info *frame)
1552 {
1553 const struct btrace_frame_cache *cache;
1554 struct btrace_frame_cache pattern;
1555 void **slot;
1556
1557 pattern.frame = frame;
1558
1559 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1560 if (slot == NULL)
1561 return NULL;
1562
1563 cache = (const struct btrace_frame_cache *) *slot;
1564 return cache->bfun;
1565 }
1566
1567 /* Implement stop_reason method for record_btrace_frame_unwind. */
1568
1569 static enum unwind_stop_reason
1570 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1571 void **this_cache)
1572 {
1573 const struct btrace_frame_cache *cache;
1574 const struct btrace_function *bfun;
1575
1576 cache = (const struct btrace_frame_cache *) *this_cache;
1577 bfun = cache->bfun;
1578 gdb_assert (bfun != NULL);
1579
1580 if (bfun->up == 0)
1581 return UNWIND_UNAVAILABLE;
1582
1583 return UNWIND_NO_REASON;
1584 }
1585
1586 /* Implement this_id method for record_btrace_frame_unwind. */
1587
1588 static void
1589 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1590 struct frame_id *this_id)
1591 {
1592 const struct btrace_frame_cache *cache;
1593 const struct btrace_function *bfun;
1594 struct btrace_call_iterator it;
1595 CORE_ADDR code, special;
1596
1597 cache = (const struct btrace_frame_cache *) *this_cache;
1598
1599 bfun = cache->bfun;
1600 gdb_assert (bfun != NULL);
1601
1602 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1603 bfun = btrace_call_get (&it);
1604
1605 code = get_frame_func (this_frame);
1606 special = bfun->number;
1607
1608 *this_id = frame_id_build_unavailable_stack_special (code, special);
1609
1610 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1611 btrace_get_bfun_name (cache->bfun),
1612 core_addr_to_string_nz (this_id->code_addr),
1613 core_addr_to_string_nz (this_id->special_addr));
1614 }
1615
1616 /* Implement prev_register method for record_btrace_frame_unwind. */
1617
1618 static struct value *
1619 record_btrace_frame_prev_register (struct frame_info *this_frame,
1620 void **this_cache,
1621 int regnum)
1622 {
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun, *caller;
1625 struct btrace_call_iterator it;
1626 struct gdbarch *gdbarch;
1627 CORE_ADDR pc;
1628 int pcreg;
1629
1630 gdbarch = get_frame_arch (this_frame);
1631 pcreg = gdbarch_pc_regnum (gdbarch);
1632 if (pcreg < 0 || regnum != pcreg)
1633 throw_error (NOT_AVAILABLE_ERROR,
1634 _("Registers are not available in btrace record history"));
1635
1636 cache = (const struct btrace_frame_cache *) *this_cache;
1637 bfun = cache->bfun;
1638 gdb_assert (bfun != NULL);
1639
1640 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1641 throw_error (NOT_AVAILABLE_ERROR,
1642 _("No caller in btrace record history"));
1643
1644 caller = btrace_call_get (&it);
1645
1646 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1647 pc = caller->insn.front ().pc;
1648 else
1649 {
1650 pc = caller->insn.back ().pc;
1651 pc += gdb_insn_length (gdbarch, pc);
1652 }
1653
1654 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1655 btrace_get_bfun_name (bfun), bfun->level,
1656 core_addr_to_string_nz (pc));
1657
1658 return frame_unwind_got_address (this_frame, regnum, pc);
1659 }
1660
1661 /* Implement sniffer method for record_btrace_frame_unwind. */
1662
1663 static int
1664 record_btrace_frame_sniffer (const struct frame_unwind *self,
1665 struct frame_info *this_frame,
1666 void **this_cache)
1667 {
1668 const struct btrace_function *bfun;
1669 struct btrace_frame_cache *cache;
1670 struct thread_info *tp;
1671 struct frame_info *next;
1672
1673 /* THIS_FRAME does not contain a reference to its thread. */
1674 tp = find_thread_ptid (inferior_ptid);
1675 gdb_assert (tp != NULL);
1676
1677 bfun = NULL;
1678 next = get_next_frame (this_frame);
1679 if (next == NULL)
1680 {
1681 const struct btrace_insn_iterator *replay;
1682
1683 replay = tp->btrace.replay;
1684 if (replay != NULL)
1685 bfun = &replay->btinfo->functions[replay->call_index];
1686 }
1687 else
1688 {
1689 const struct btrace_function *callee;
1690 struct btrace_call_iterator it;
1691
1692 callee = btrace_get_frame_function (next);
1693 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1694 return 0;
1695
1696 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1697 return 0;
1698
1699 bfun = btrace_call_get (&it);
1700 }
1701
1702 if (bfun == NULL)
1703 return 0;
1704
1705 DEBUG ("[frame] sniffed frame for %s on level %d",
1706 btrace_get_bfun_name (bfun), bfun->level);
1707
1708 /* This is our frame. Initialize the frame cache. */
1709 cache = bfcache_new (this_frame);
1710 cache->tp = tp;
1711 cache->bfun = bfun;
1712
1713 *this_cache = cache;
1714 return 1;
1715 }
1716
1717 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1718
1719 static int
1720 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1722 void **this_cache)
1723 {
1724 const struct btrace_function *bfun, *callee;
1725 struct btrace_frame_cache *cache;
1726 struct btrace_call_iterator it;
1727 struct frame_info *next;
1728 struct thread_info *tinfo;
1729
1730 next = get_next_frame (this_frame);
1731 if (next == NULL)
1732 return 0;
1733
1734 callee = btrace_get_frame_function (next);
1735 if (callee == NULL)
1736 return 0;
1737
1738 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1739 return 0;
1740
1741 tinfo = find_thread_ptid (inferior_ptid);
1742 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1743 return 0;
1744
1745 bfun = btrace_call_get (&it);
1746
1747 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1748 btrace_get_bfun_name (bfun), bfun->level);
1749
1750 /* This is our frame. Initialize the frame cache. */
1751 cache = bfcache_new (this_frame);
1752 cache->tp = tinfo;
1753 cache->bfun = bfun;
1754
1755 *this_cache = cache;
1756 return 1;
1757 }
1758
1759 static void
1760 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1761 {
1762 struct btrace_frame_cache *cache;
1763 void **slot;
1764
1765 cache = (struct btrace_frame_cache *) this_cache;
1766
1767 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1768 gdb_assert (slot != NULL);
1769
1770 htab_remove_elt (bfcache, cache);
1771 }
1772
1773 /* btrace recording does not store previous memory content, neither the stack
1774 frames content. Any unwinding would return errorneous results as the stack
1775 contents no longer matches the changed PC value restored from history.
1776 Therefore this unwinder reports any possibly unwound registers as
1777 <unavailable>. */
1778
1779 const struct frame_unwind record_btrace_frame_unwind =
1780 {
1781 NORMAL_FRAME,
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1785 NULL,
1786 record_btrace_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1788 };
1789
1790 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1791 {
1792 TAILCALL_FRAME,
1793 record_btrace_frame_unwind_stop_reason,
1794 record_btrace_frame_this_id,
1795 record_btrace_frame_prev_register,
1796 NULL,
1797 record_btrace_tailcall_frame_sniffer,
1798 record_btrace_frame_dealloc_cache
1799 };
1800
1801 /* Implement the to_get_unwinder method. */
1802
1803 static const struct frame_unwind *
1804 record_btrace_to_get_unwinder (struct target_ops *self)
1805 {
1806 return &record_btrace_frame_unwind;
1807 }
1808
1809 /* Implement the to_get_tailcall_unwinder method. */
1810
1811 static const struct frame_unwind *
1812 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1813 {
1814 return &record_btrace_tailcall_frame_unwind;
1815 }
1816
1817 /* Return a human-readable string for FLAG. */
1818
1819 static const char *
1820 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1821 {
1822 switch (flag)
1823 {
1824 case BTHR_STEP:
1825 return "step";
1826
1827 case BTHR_RSTEP:
1828 return "reverse-step";
1829
1830 case BTHR_CONT:
1831 return "cont";
1832
1833 case BTHR_RCONT:
1834 return "reverse-cont";
1835
1836 case BTHR_STOP:
1837 return "stop";
1838 }
1839
1840 return "<invalid>";
1841 }
1842
1843 /* Indicate that TP should be resumed according to FLAG. */
1844
1845 static void
1846 record_btrace_resume_thread (struct thread_info *tp,
1847 enum btrace_thread_flag flag)
1848 {
1849 struct btrace_thread_info *btinfo;
1850
1851 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1852 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1853
1854 btinfo = &tp->btrace;
1855
1856 /* Fetch the latest branch trace. */
1857 btrace_fetch (tp);
1858
1859 /* A resume request overwrites a preceding resume or stop request. */
1860 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1861 btinfo->flags |= flag;
1862 }
1863
1864 /* Get the current frame for TP. */
1865
1866 static struct frame_info *
1867 get_thread_current_frame (struct thread_info *tp)
1868 {
1869 struct frame_info *frame;
1870 ptid_t old_inferior_ptid;
1871 int executing;
1872
1873 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1874 old_inferior_ptid = inferior_ptid;
1875 inferior_ptid = tp->ptid;
1876
1877 /* Clear the executing flag to allow changes to the current frame.
1878 We are not actually running, yet. We just started a reverse execution
1879 command or a record goto command.
1880 For the latter, EXECUTING is false and this has no effect.
1881 For the former, EXECUTING is true and we're in to_wait, about to
1882 move the thread. Since we need to recompute the stack, we temporarily
1883 set EXECUTING to flase. */
1884 executing = is_executing (inferior_ptid);
1885 set_executing (inferior_ptid, 0);
1886
1887 frame = NULL;
1888 TRY
1889 {
1890 frame = get_current_frame ();
1891 }
1892 CATCH (except, RETURN_MASK_ALL)
1893 {
1894 /* Restore the previous execution state. */
1895 set_executing (inferior_ptid, executing);
1896
1897 /* Restore the previous inferior_ptid. */
1898 inferior_ptid = old_inferior_ptid;
1899
1900 throw_exception (except);
1901 }
1902 END_CATCH
1903
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1906
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1909
1910 return frame;
1911 }
1912
1913 /* Start replaying a thread. */
1914
1915 static struct btrace_insn_iterator *
1916 record_btrace_start_replaying (struct thread_info *tp)
1917 {
1918 struct btrace_insn_iterator *replay;
1919 struct btrace_thread_info *btinfo;
1920
1921 btinfo = &tp->btrace;
1922 replay = NULL;
1923
1924 /* We can't start replaying without trace. */
1925 if (btinfo->functions.empty ())
1926 return NULL;
1927
1928 /* GDB stores the current frame_id when stepping in order to detects steps
1929 into subroutines.
1930 Since frames are computed differently when we're replaying, we need to
1931 recompute those stored frames and fix them up so we can still detect
1932 subroutines after we started replaying. */
1933 TRY
1934 {
1935 struct frame_info *frame;
1936 struct frame_id frame_id;
1937 int upd_step_frame_id, upd_step_stack_frame_id;
1938
1939 /* The current frame without replaying - computed via normal unwind. */
1940 frame = get_thread_current_frame (tp);
1941 frame_id = get_frame_id (frame);
1942
1943 /* Check if we need to update any stepping-related frame id's. */
1944 upd_step_frame_id = frame_id_eq (frame_id,
1945 tp->control.step_frame_id);
1946 upd_step_stack_frame_id = frame_id_eq (frame_id,
1947 tp->control.step_stack_frame_id);
1948
1949 /* We start replaying at the end of the branch trace. This corresponds
1950 to the current instruction. */
1951 replay = XNEW (struct btrace_insn_iterator);
1952 btrace_insn_end (replay, btinfo);
1953
1954 /* Skip gaps at the end of the trace. */
1955 while (btrace_insn_get (replay) == NULL)
1956 {
1957 unsigned int steps;
1958
1959 steps = btrace_insn_prev (replay, 1);
1960 if (steps == 0)
1961 error (_("No trace."));
1962 }
1963
1964 /* We're not replaying, yet. */
1965 gdb_assert (btinfo->replay == NULL);
1966 btinfo->replay = replay;
1967
1968 /* Make sure we're not using any stale registers. */
1969 registers_changed_ptid (tp->ptid);
1970
1971 /* The current frame with replaying - computed via btrace unwind. */
1972 frame = get_thread_current_frame (tp);
1973 frame_id = get_frame_id (frame);
1974
1975 /* Replace stepping related frames where necessary. */
1976 if (upd_step_frame_id)
1977 tp->control.step_frame_id = frame_id;
1978 if (upd_step_stack_frame_id)
1979 tp->control.step_stack_frame_id = frame_id;
1980 }
1981 CATCH (except, RETURN_MASK_ALL)
1982 {
1983 xfree (btinfo->replay);
1984 btinfo->replay = NULL;
1985
1986 registers_changed_ptid (tp->ptid);
1987
1988 throw_exception (except);
1989 }
1990 END_CATCH
1991
1992 return replay;
1993 }
1994
1995 /* Stop replaying a thread. */
1996
1997 static void
1998 record_btrace_stop_replaying (struct thread_info *tp)
1999 {
2000 struct btrace_thread_info *btinfo;
2001
2002 btinfo = &tp->btrace;
2003
2004 xfree (btinfo->replay);
2005 btinfo->replay = NULL;
2006
2007 /* Make sure we're not leaving any stale registers. */
2008 registers_changed_ptid (tp->ptid);
2009 }
2010
2011 /* Stop replaying TP if it is at the end of its execution history. */
2012
2013 static void
2014 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2015 {
2016 struct btrace_insn_iterator *replay, end;
2017 struct btrace_thread_info *btinfo;
2018
2019 btinfo = &tp->btrace;
2020 replay = btinfo->replay;
2021
2022 if (replay == NULL)
2023 return;
2024
2025 btrace_insn_end (&end, btinfo);
2026
2027 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2029 }
2030
2031 /* The to_resume method of target record-btrace. */
2032
2033 static void
2034 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2035 enum gdb_signal signal)
2036 {
2037 struct thread_info *tp;
2038 enum btrace_thread_flag flag, cflag;
2039
2040 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2041 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2042 step ? "step" : "cont");
2043
2044 /* Store the execution direction of the last resume.
2045
2046 If there is more than one to_resume call, we have to rely on infrun
2047 to not change the execution direction in-between. */
2048 record_btrace_resume_exec_dir = execution_direction;
2049
2050 /* As long as we're not replaying, just forward the request.
2051
2052 For non-stop targets this means that no thread is replaying. In order to
2053 make progress, we may need to explicitly move replaying threads to the end
2054 of their execution history. */
2055 if ((execution_direction != EXEC_REVERSE)
2056 && !record_btrace_is_replaying (ops, minus_one_ptid))
2057 {
2058 ops = ops->beneath;
2059 ops->to_resume (ops, ptid, step, signal);
2060 return;
2061 }
2062
2063 /* Compute the btrace thread flag for the requested move. */
2064 if (execution_direction == EXEC_REVERSE)
2065 {
2066 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2067 cflag = BTHR_RCONT;
2068 }
2069 else
2070 {
2071 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2072 cflag = BTHR_CONT;
2073 }
2074
2075 /* We just indicate the resume intent here. The actual stepping happens in
2076 record_btrace_wait below.
2077
2078 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2079 if (!target_is_non_stop_p ())
2080 {
2081 gdb_assert (ptid_match (inferior_ptid, ptid));
2082
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2085 {
2086 if (ptid_match (tp->ptid, inferior_ptid))
2087 record_btrace_resume_thread (tp, flag);
2088 else
2089 record_btrace_resume_thread (tp, cflag);
2090 }
2091 }
2092 else
2093 {
2094 ALL_NON_EXITED_THREADS (tp)
2095 if (ptid_match (tp->ptid, ptid))
2096 record_btrace_resume_thread (tp, flag);
2097 }
2098
2099 /* Async support. */
2100 if (target_can_async_p ())
2101 {
2102 target_async (1);
2103 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2104 }
2105 }
2106
2107 /* The to_commit_resume method of target record-btrace. */
2108
2109 static void
2110 record_btrace_commit_resume (struct target_ops *ops)
2111 {
2112 if ((execution_direction != EXEC_REVERSE)
2113 && !record_btrace_is_replaying (ops, minus_one_ptid))
2114 ops->beneath->to_commit_resume (ops->beneath);
2115 }
2116
2117 /* Cancel resuming TP. */
2118
2119 static void
2120 record_btrace_cancel_resume (struct thread_info *tp)
2121 {
2122 enum btrace_thread_flag flags;
2123
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2125 if (flags == 0)
2126 return;
2127
2128 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2129 print_thread_id (tp),
2130 target_pid_to_str (tp->ptid), flags,
2131 btrace_thread_flag_to_str (flags));
2132
2133 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2134 record_btrace_stop_replaying_at_end (tp);
2135 }
2136
2137 /* Return a target_waitstatus indicating that we ran out of history. */
2138
2139 static struct target_waitstatus
2140 btrace_step_no_history (void)
2141 {
2142 struct target_waitstatus status;
2143
2144 status.kind = TARGET_WAITKIND_NO_HISTORY;
2145
2146 return status;
2147 }
2148
2149 /* Return a target_waitstatus indicating that a step finished. */
2150
2151 static struct target_waitstatus
2152 btrace_step_stopped (void)
2153 {
2154 struct target_waitstatus status;
2155
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_TRAP;
2158
2159 return status;
2160 }
2161
2162 /* Return a target_waitstatus indicating that a thread was stopped as
2163 requested. */
2164
2165 static struct target_waitstatus
2166 btrace_step_stopped_on_request (void)
2167 {
2168 struct target_waitstatus status;
2169
2170 status.kind = TARGET_WAITKIND_STOPPED;
2171 status.value.sig = GDB_SIGNAL_0;
2172
2173 return status;
2174 }
2175
2176 /* Return a target_waitstatus indicating a spurious stop. */
2177
2178 static struct target_waitstatus
2179 btrace_step_spurious (void)
2180 {
2181 struct target_waitstatus status;
2182
2183 status.kind = TARGET_WAITKIND_SPURIOUS;
2184
2185 return status;
2186 }
2187
2188 /* Return a target_waitstatus indicating that the thread was not resumed. */
2189
2190 static struct target_waitstatus
2191 btrace_step_no_resumed (void)
2192 {
2193 struct target_waitstatus status;
2194
2195 status.kind = TARGET_WAITKIND_NO_RESUMED;
2196
2197 return status;
2198 }
2199
2200 /* Return a target_waitstatus indicating that we should wait again. */
2201
2202 static struct target_waitstatus
2203 btrace_step_again (void)
2204 {
2205 struct target_waitstatus status;
2206
2207 status.kind = TARGET_WAITKIND_IGNORE;
2208
2209 return status;
2210 }
2211
2212 /* Clear the record histories. */
2213
2214 static void
2215 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2216 {
2217 xfree (btinfo->insn_history);
2218 xfree (btinfo->call_history);
2219
2220 btinfo->insn_history = NULL;
2221 btinfo->call_history = NULL;
2222 }
2223
2224 /* Check whether TP's current replay position is at a breakpoint. */
2225
2226 static int
2227 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2228 {
2229 struct btrace_insn_iterator *replay;
2230 struct btrace_thread_info *btinfo;
2231 const struct btrace_insn *insn;
2232 struct inferior *inf;
2233
2234 btinfo = &tp->btrace;
2235 replay = btinfo->replay;
2236
2237 if (replay == NULL)
2238 return 0;
2239
2240 insn = btrace_insn_get (replay);
2241 if (insn == NULL)
2242 return 0;
2243
2244 inf = find_inferior_ptid (tp->ptid);
2245 if (inf == NULL)
2246 return 0;
2247
2248 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2249 &btinfo->stop_reason);
2250 }
2251
2252 /* Step one instruction in forward direction. */
2253
2254 static struct target_waitstatus
2255 record_btrace_single_step_forward (struct thread_info *tp)
2256 {
2257 struct btrace_insn_iterator *replay, end, start;
2258 struct btrace_thread_info *btinfo;
2259
2260 btinfo = &tp->btrace;
2261 replay = btinfo->replay;
2262
2263 /* We're done if we're not replaying. */
2264 if (replay == NULL)
2265 return btrace_step_no_history ();
2266
2267 /* Check if we're stepping a breakpoint. */
2268 if (record_btrace_replay_at_breakpoint (tp))
2269 return btrace_step_stopped ();
2270
2271 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2272 jump back to the instruction at which we started. */
2273 start = *replay;
2274 do
2275 {
2276 unsigned int steps;
2277
2278 /* We will bail out here if we continue stepping after reaching the end
2279 of the execution history. */
2280 steps = btrace_insn_next (replay, 1);
2281 if (steps == 0)
2282 {
2283 *replay = start;
2284 return btrace_step_no_history ();
2285 }
2286 }
2287 while (btrace_insn_get (replay) == NULL);
2288
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2291
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
2295 if (btrace_insn_cmp (replay, &end) == 0)
2296 return btrace_step_no_history ();
2297
2298 return btrace_step_spurious ();
2299 }
2300
2301 /* Step one instruction in backward direction. */
2302
2303 static struct target_waitstatus
2304 record_btrace_single_step_backward (struct thread_info *tp)
2305 {
2306 struct btrace_insn_iterator *replay, start;
2307 struct btrace_thread_info *btinfo;
2308
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 /* Start replaying if we're not already doing so. */
2313 if (replay == NULL)
2314 replay = record_btrace_start_replaying (tp);
2315
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. If we end up at a gap (at the beginning of
2318 the trace), jump back to the instruction at which we started. */
2319 start = *replay;
2320 do
2321 {
2322 unsigned int steps;
2323
2324 steps = btrace_insn_prev (replay, 1);
2325 if (steps == 0)
2326 {
2327 *replay = start;
2328 return btrace_step_no_history ();
2329 }
2330 }
2331 while (btrace_insn_get (replay) == NULL);
2332
2333 /* Check if we're stepping a breakpoint.
2334
2335 For reverse-stepping, this check is after the step. There is logic in
2336 infrun.c that handles reverse-stepping separately. See, for example,
2337 proceed and adjust_pc_after_break.
2338
2339 This code assumes that for reverse-stepping, PC points to the last
2340 de-executed instruction, whereas for forward-stepping PC points to the
2341 next to-be-executed instruction. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2344
2345 return btrace_step_spurious ();
2346 }
2347
2348 /* Step a single thread. */
2349
2350 static struct target_waitstatus
2351 record_btrace_step_thread (struct thread_info *tp)
2352 {
2353 struct btrace_thread_info *btinfo;
2354 struct target_waitstatus status;
2355 enum btrace_thread_flag flags;
2356
2357 btinfo = &tp->btrace;
2358
2359 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2360 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2361
2362 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2363 target_pid_to_str (tp->ptid), flags,
2364 btrace_thread_flag_to_str (flags));
2365
2366 /* We can't step without an execution history. */
2367 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2368 return btrace_step_no_history ();
2369
2370 switch (flags)
2371 {
2372 default:
2373 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2374
2375 case BTHR_STOP:
2376 return btrace_step_stopped_on_request ();
2377
2378 case BTHR_STEP:
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2381 break;
2382
2383 return btrace_step_stopped ();
2384
2385 case BTHR_RSTEP:
2386 status = record_btrace_single_step_backward (tp);
2387 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2388 break;
2389
2390 return btrace_step_stopped ();
2391
2392 case BTHR_CONT:
2393 status = record_btrace_single_step_forward (tp);
2394 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2395 break;
2396
2397 btinfo->flags |= flags;
2398 return btrace_step_again ();
2399
2400 case BTHR_RCONT:
2401 status = record_btrace_single_step_backward (tp);
2402 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2403 break;
2404
2405 btinfo->flags |= flags;
2406 return btrace_step_again ();
2407 }
2408
2409 /* We keep threads moving at the end of their execution history. The to_wait
2410 method will stop the thread for whom the event is reported. */
2411 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2412 btinfo->flags |= flags;
2413
2414 return status;
2415 }
2416
2417 /* A vector of threads. */
2418
2419 typedef struct thread_info * tp_t;
2420 DEF_VEC_P (tp_t);
2421
2422 /* Announce further events if necessary. */
2423
2424 static void
2425 record_btrace_maybe_mark_async_event
2426 (const std::vector<thread_info *> &moving,
2427 const std::vector<thread_info *> &no_history)
2428 {
2429 bool more_moving = !moving.empty ();
2430 bool more_no_history = !no_history.empty ();;
2431
2432 if (!more_moving && !more_no_history)
2433 return;
2434
2435 if (more_moving)
2436 DEBUG ("movers pending");
2437
2438 if (more_no_history)
2439 DEBUG ("no-history pending");
2440
2441 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2442 }
2443
2444 /* The to_wait method of target record-btrace. */
2445
2446 static ptid_t
2447 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2448 struct target_waitstatus *status, int options)
2449 {
2450 std::vector<thread_info *> moving;
2451 std::vector<thread_info *> no_history;
2452
2453 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2454
2455 /* As long as we're not replaying, just forward the request. */
2456 if ((execution_direction != EXEC_REVERSE)
2457 && !record_btrace_is_replaying (ops, minus_one_ptid))
2458 {
2459 ops = ops->beneath;
2460 return ops->to_wait (ops, ptid, status, options);
2461 }
2462
2463 /* Keep a work list of moving threads. */
2464 {
2465 thread_info *tp;
2466
2467 ALL_NON_EXITED_THREADS (tp)
2468 {
2469 if (ptid_match (tp->ptid, ptid)
2470 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2471 moving.push_back (tp);
2472 }
2473 }
2474
2475 if (moving.empty ())
2476 {
2477 *status = btrace_step_no_resumed ();
2478
2479 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2480 target_waitstatus_to_string (status).c_str ());
2481
2482 return null_ptid;
2483 }
2484
2485 /* Step moving threads one by one, one step each, until either one thread
2486 reports an event or we run out of threads to step.
2487
2488 When stepping more than one thread, chances are that some threads reach
2489 the end of their execution history earlier than others. If we reported
2490 this immediately, all-stop on top of non-stop would stop all threads and
2491 resume the same threads next time. And we would report the same thread
2492 having reached the end of its execution history again.
2493
2494 In the worst case, this would starve the other threads. But even if other
2495 threads would be allowed to make progress, this would result in far too
2496 many intermediate stops.
2497
2498 We therefore delay the reporting of "no execution history" until we have
2499 nothing else to report. By this time, all threads should have moved to
2500 either the beginning or the end of their execution history. There will
2501 be a single user-visible stop. */
2502 struct thread_info *eventing = NULL;
2503 while ((eventing == NULL) && !moving.empty ())
2504 {
2505 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2506 {
2507 thread_info *tp = moving[ix];
2508
2509 *status = record_btrace_step_thread (tp);
2510
2511 switch (status->kind)
2512 {
2513 case TARGET_WAITKIND_IGNORE:
2514 ix++;
2515 break;
2516
2517 case TARGET_WAITKIND_NO_HISTORY:
2518 no_history.push_back (ordered_remove (moving, ix));
2519 break;
2520
2521 default:
2522 eventing = unordered_remove (moving, ix);
2523 break;
2524 }
2525 }
2526 }
2527
2528 if (eventing == NULL)
2529 {
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2532
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!no_history.empty ());
2536
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = unordered_remove (no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2541
2542 *status = btrace_step_no_history ();
2543 }
2544
2545 gdb_assert (eventing != NULL);
2546
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
2550
2551 /* Stop all other threads. */
2552 if (!target_is_non_stop_p ())
2553 {
2554 thread_info *tp;
2555
2556 ALL_NON_EXITED_THREADS (tp)
2557 record_btrace_cancel_resume (tp);
2558 }
2559
2560 /* In async mode, we need to announce further events. */
2561 if (target_is_async_p ())
2562 record_btrace_maybe_mark_async_event (moving, no_history);
2563
2564 /* Start record histories anew from the current position. */
2565 record_btrace_clear_histories (&eventing->btrace);
2566
2567 /* We moved the replay position but did not update registers. */
2568 registers_changed_ptid (eventing->ptid);
2569
2570 DEBUG ("wait ended by thread %s (%s): %s",
2571 print_thread_id (eventing),
2572 target_pid_to_str (eventing->ptid),
2573 target_waitstatus_to_string (status).c_str ());
2574
2575 return eventing->ptid;
2576 }
2577
2578 /* The to_stop method of target record-btrace. */
2579
2580 static void
2581 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2582 {
2583 DEBUG ("stop %s", target_pid_to_str (ptid));
2584
2585 /* As long as we're not replaying, just forward the request. */
2586 if ((execution_direction != EXEC_REVERSE)
2587 && !record_btrace_is_replaying (ops, minus_one_ptid))
2588 {
2589 ops = ops->beneath;
2590 ops->to_stop (ops, ptid);
2591 }
2592 else
2593 {
2594 struct thread_info *tp;
2595
2596 ALL_NON_EXITED_THREADS (tp)
2597 if (ptid_match (tp->ptid, ptid))
2598 {
2599 tp->btrace.flags &= ~BTHR_MOVE;
2600 tp->btrace.flags |= BTHR_STOP;
2601 }
2602 }
2603 }
2604
2605 /* The to_can_execute_reverse method of target record-btrace. */
2606
2607 static int
2608 record_btrace_can_execute_reverse (struct target_ops *self)
2609 {
2610 return 1;
2611 }
2612
2613 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2614
2615 static int
2616 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2617 {
2618 if (record_btrace_is_replaying (ops, minus_one_ptid))
2619 {
2620 struct thread_info *tp = inferior_thread ();
2621
2622 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2623 }
2624
2625 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2626 }
2627
2628 /* The to_supports_stopped_by_sw_breakpoint method of target
2629 record-btrace. */
2630
2631 static int
2632 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2633 {
2634 if (record_btrace_is_replaying (ops, minus_one_ptid))
2635 return 1;
2636
2637 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2638 }
2639
2640 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2641
2642 static int
2643 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2644 {
2645 if (record_btrace_is_replaying (ops, minus_one_ptid))
2646 {
2647 struct thread_info *tp = inferior_thread ();
2648
2649 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2650 }
2651
2652 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2653 }
2654
2655 /* The to_supports_stopped_by_hw_breakpoint method of target
2656 record-btrace. */
2657
2658 static int
2659 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2660 {
2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
2662 return 1;
2663
2664 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2665 }
2666
2667 /* The to_update_thread_list method of target record-btrace. */
2668
2669 static void
2670 record_btrace_update_thread_list (struct target_ops *ops)
2671 {
2672 /* We don't add or remove threads during replay. */
2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
2674 return;
2675
2676 /* Forward the request. */
2677 ops = ops->beneath;
2678 ops->to_update_thread_list (ops);
2679 }
2680
2681 /* The to_thread_alive method of target record-btrace. */
2682
2683 static int
2684 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2685 {
2686 /* We don't add or remove threads during replay. */
2687 if (record_btrace_is_replaying (ops, minus_one_ptid))
2688 return find_thread_ptid (ptid) != NULL;
2689
2690 /* Forward the request. */
2691 ops = ops->beneath;
2692 return ops->to_thread_alive (ops, ptid);
2693 }
2694
2695 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2696 is stopped. */
2697
2698 static void
2699 record_btrace_set_replay (struct thread_info *tp,
2700 const struct btrace_insn_iterator *it)
2701 {
2702 struct btrace_thread_info *btinfo;
2703
2704 btinfo = &tp->btrace;
2705
2706 if (it == NULL)
2707 record_btrace_stop_replaying (tp);
2708 else
2709 {
2710 if (btinfo->replay == NULL)
2711 record_btrace_start_replaying (tp);
2712 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2713 return;
2714
2715 *btinfo->replay = *it;
2716 registers_changed_ptid (tp->ptid);
2717 }
2718
2719 /* Start anew from the new replay position. */
2720 record_btrace_clear_histories (btinfo);
2721
2722 stop_pc = regcache_read_pc (get_current_regcache ());
2723 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2724 }
2725
2726 /* The to_goto_record_begin method of target record-btrace. */
2727
2728 static void
2729 record_btrace_goto_begin (struct target_ops *self)
2730 {
2731 struct thread_info *tp;
2732 struct btrace_insn_iterator begin;
2733
2734 tp = require_btrace_thread ();
2735
2736 btrace_insn_begin (&begin, &tp->btrace);
2737
2738 /* Skip gaps at the beginning of the trace. */
2739 while (btrace_insn_get (&begin) == NULL)
2740 {
2741 unsigned int steps;
2742
2743 steps = btrace_insn_next (&begin, 1);
2744 if (steps == 0)
2745 error (_("No trace."));
2746 }
2747
2748 record_btrace_set_replay (tp, &begin);
2749 }
2750
2751 /* The to_goto_record_end method of target record-btrace. */
2752
2753 static void
2754 record_btrace_goto_end (struct target_ops *ops)
2755 {
2756 struct thread_info *tp;
2757
2758 tp = require_btrace_thread ();
2759
2760 record_btrace_set_replay (tp, NULL);
2761 }
2762
2763 /* The to_goto_record method of target record-btrace. */
2764
2765 static void
2766 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2767 {
2768 struct thread_info *tp;
2769 struct btrace_insn_iterator it;
2770 unsigned int number;
2771 int found;
2772
2773 number = insn;
2774
2775 /* Check for wrap-arounds. */
2776 if (number != insn)
2777 error (_("Instruction number out of range."));
2778
2779 tp = require_btrace_thread ();
2780
2781 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2782
2783 /* Check if the instruction could not be found or is a gap. */
2784 if (found == 0 || btrace_insn_get (&it) == NULL)
2785 error (_("No such instruction."));
2786
2787 record_btrace_set_replay (tp, &it);
2788 }
2789
2790 /* The to_record_stop_replaying method of target record-btrace. */
2791
2792 static void
2793 record_btrace_stop_replaying_all (struct target_ops *self)
2794 {
2795 struct thread_info *tp;
2796
2797 ALL_NON_EXITED_THREADS (tp)
2798 record_btrace_stop_replaying (tp);
2799 }
2800
2801 /* The to_execution_direction target method. */
2802
2803 static enum exec_direction_kind
2804 record_btrace_execution_direction (struct target_ops *self)
2805 {
2806 return record_btrace_resume_exec_dir;
2807 }
2808
2809 /* The to_prepare_to_generate_core target method. */
2810
2811 static void
2812 record_btrace_prepare_to_generate_core (struct target_ops *self)
2813 {
2814 record_btrace_generating_corefile = 1;
2815 }
2816
2817 /* The to_done_generating_core target method. */
2818
2819 static void
2820 record_btrace_done_generating_core (struct target_ops *self)
2821 {
2822 record_btrace_generating_corefile = 0;
2823 }
2824
2825 /* Initialize the record-btrace target ops. */
2826
2827 static void
2828 init_record_btrace_ops (void)
2829 {
2830 struct target_ops *ops;
2831
2832 ops = &record_btrace_ops;
2833 ops->to_shortname = "record-btrace";
2834 ops->to_longname = "Branch tracing target";
2835 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2836 ops->to_open = record_btrace_open;
2837 ops->to_close = record_btrace_close;
2838 ops->to_async = record_btrace_async;
2839 ops->to_detach = record_detach;
2840 ops->to_disconnect = record_btrace_disconnect;
2841 ops->to_mourn_inferior = record_mourn_inferior;
2842 ops->to_kill = record_kill;
2843 ops->to_stop_recording = record_btrace_stop_recording;
2844 ops->to_info_record = record_btrace_info;
2845 ops->to_insn_history = record_btrace_insn_history;
2846 ops->to_insn_history_from = record_btrace_insn_history_from;
2847 ops->to_insn_history_range = record_btrace_insn_history_range;
2848 ops->to_call_history = record_btrace_call_history;
2849 ops->to_call_history_from = record_btrace_call_history_from;
2850 ops->to_call_history_range = record_btrace_call_history_range;
2851 ops->to_record_method = record_btrace_record_method;
2852 ops->to_record_is_replaying = record_btrace_is_replaying;
2853 ops->to_record_will_replay = record_btrace_will_replay;
2854 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2855 ops->to_xfer_partial = record_btrace_xfer_partial;
2856 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2857 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2858 ops->to_fetch_registers = record_btrace_fetch_registers;
2859 ops->to_store_registers = record_btrace_store_registers;
2860 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2861 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2862 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2863 ops->to_resume = record_btrace_resume;
2864 ops->to_commit_resume = record_btrace_commit_resume;
2865 ops->to_wait = record_btrace_wait;
2866 ops->to_stop = record_btrace_stop;
2867 ops->to_update_thread_list = record_btrace_update_thread_list;
2868 ops->to_thread_alive = record_btrace_thread_alive;
2869 ops->to_goto_record_begin = record_btrace_goto_begin;
2870 ops->to_goto_record_end = record_btrace_goto_end;
2871 ops->to_goto_record = record_btrace_goto;
2872 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2873 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2874 ops->to_supports_stopped_by_sw_breakpoint
2875 = record_btrace_supports_stopped_by_sw_breakpoint;
2876 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2877 ops->to_supports_stopped_by_hw_breakpoint
2878 = record_btrace_supports_stopped_by_hw_breakpoint;
2879 ops->to_execution_direction = record_btrace_execution_direction;
2880 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2881 ops->to_done_generating_core = record_btrace_done_generating_core;
2882 ops->to_stratum = record_stratum;
2883 ops->to_magic = OPS_MAGIC;
2884 }
2885
2886 /* Start recording in BTS format. */
2887
2888 static void
2889 cmd_record_btrace_bts_start (const char *args, int from_tty)
2890 {
2891 if (args != NULL && *args != 0)
2892 error (_("Invalid argument."));
2893
2894 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2895
2896 TRY
2897 {
2898 execute_command ("target record-btrace", from_tty);
2899 }
2900 CATCH (exception, RETURN_MASK_ALL)
2901 {
2902 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2903 throw_exception (exception);
2904 }
2905 END_CATCH
2906 }
2907
2908 /* Start recording in Intel Processor Trace format. */
2909
2910 static void
2911 cmd_record_btrace_pt_start (const char *args, int from_tty)
2912 {
2913 if (args != NULL && *args != 0)
2914 error (_("Invalid argument."));
2915
2916 record_btrace_conf.format = BTRACE_FORMAT_PT;
2917
2918 TRY
2919 {
2920 execute_command ("target record-btrace", from_tty);
2921 }
2922 CATCH (exception, RETURN_MASK_ALL)
2923 {
2924 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2925 throw_exception (exception);
2926 }
2927 END_CATCH
2928 }
2929
2930 /* Alias for "target record". */
2931
2932 static void
2933 cmd_record_btrace_start (const char *args, int from_tty)
2934 {
2935 if (args != NULL && *args != 0)
2936 error (_("Invalid argument."));
2937
2938 record_btrace_conf.format = BTRACE_FORMAT_PT;
2939
2940 TRY
2941 {
2942 execute_command ("target record-btrace", from_tty);
2943 }
2944 CATCH (exception, RETURN_MASK_ALL)
2945 {
2946 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2947
2948 TRY
2949 {
2950 execute_command ("target record-btrace", from_tty);
2951 }
2952 CATCH (exception, RETURN_MASK_ALL)
2953 {
2954 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2955 throw_exception (exception);
2956 }
2957 END_CATCH
2958 }
2959 END_CATCH
2960 }
2961
2962 /* The "set record btrace" command. */
2963
2964 static void
2965 cmd_set_record_btrace (const char *args, int from_tty)
2966 {
2967 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2968 }
2969
2970 /* The "show record btrace" command. */
2971
2972 static void
2973 cmd_show_record_btrace (const char *args, int from_tty)
2974 {
2975 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2976 }
2977
2978 /* The "show record btrace replay-memory-access" command. */
2979
2980 static void
2981 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2982 struct cmd_list_element *c, const char *value)
2983 {
2984 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2985 replay_memory_access);
2986 }
2987
2988 /* The "set record btrace bts" command. */
2989
2990 static void
2991 cmd_set_record_btrace_bts (const char *args, int from_tty)
2992 {
2993 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2994 "by an appropriate subcommand.\n"));
2995 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2996 all_commands, gdb_stdout);
2997 }
2998
2999 /* The "show record btrace bts" command. */
3000
3001 static void
3002 cmd_show_record_btrace_bts (const char *args, int from_tty)
3003 {
3004 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3005 }
3006
3007 /* The "set record btrace pt" command. */
3008
3009 static void
3010 cmd_set_record_btrace_pt (const char *args, int from_tty)
3011 {
3012 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3013 "by an appropriate subcommand.\n"));
3014 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3015 all_commands, gdb_stdout);
3016 }
3017
3018 /* The "show record btrace pt" command. */
3019
3020 static void
3021 cmd_show_record_btrace_pt (const char *args, int from_tty)
3022 {
3023 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3024 }
3025
3026 /* The "record bts buffer-size" show value function. */
3027
3028 static void
3029 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c,
3031 const char *value)
3032 {
3033 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3034 value);
3035 }
3036
3037 /* The "record pt buffer-size" show value function. */
3038
3039 static void
3040 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3041 struct cmd_list_element *c,
3042 const char *value)
3043 {
3044 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3045 value);
3046 }
3047
3048 /* Initialize btrace commands. */
3049
3050 void
3051 _initialize_record_btrace (void)
3052 {
3053 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3054 _("Start branch trace recording."), &record_btrace_cmdlist,
3055 "record btrace ", 0, &record_cmdlist);
3056 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3057
3058 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3059 _("\
3060 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3061 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3062 This format may not be available on all processors."),
3063 &record_btrace_cmdlist);
3064 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3065
3066 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3067 _("\
3068 Start branch trace recording in Intel Processor Trace format.\n\n\
3069 This format may not be available on all processors."),
3070 &record_btrace_cmdlist);
3071 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3072
3073 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3074 _("Set record options"), &set_record_btrace_cmdlist,
3075 "set record btrace ", 0, &set_record_cmdlist);
3076
3077 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3078 _("Show record options"), &show_record_btrace_cmdlist,
3079 "show record btrace ", 0, &show_record_cmdlist);
3080
3081 add_setshow_enum_cmd ("replay-memory-access", no_class,
3082 replay_memory_access_types, &replay_memory_access, _("\
3083 Set what memory accesses are allowed during replay."), _("\
3084 Show what memory accesses are allowed during replay."),
3085 _("Default is READ-ONLY.\n\n\
3086 The btrace record target does not trace data.\n\
3087 The memory therefore corresponds to the live target and not \
3088 to the current replay position.\n\n\
3089 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3090 When READ-WRITE, allow accesses to read-only and read-write memory during \
3091 replay."),
3092 NULL, cmd_show_replay_memory_access,
3093 &set_record_btrace_cmdlist,
3094 &show_record_btrace_cmdlist);
3095
3096 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3097 _("Set record btrace bts options"),
3098 &set_record_btrace_bts_cmdlist,
3099 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3100
3101 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3102 _("Show record btrace bts options"),
3103 &show_record_btrace_bts_cmdlist,
3104 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3105
3106 add_setshow_uinteger_cmd ("buffer-size", no_class,
3107 &record_btrace_conf.bts.size,
3108 _("Set the record/replay bts buffer size."),
3109 _("Show the record/replay bts buffer size."), _("\
3110 When starting recording request a trace buffer of this size. \
3111 The actual buffer size may differ from the requested size. \
3112 Use \"info record\" to see the actual buffer size.\n\n\
3113 Bigger buffers allow longer recording but also take more time to process \
3114 the recorded execution trace.\n\n\
3115 The trace buffer size may not be changed while recording."), NULL,
3116 show_record_bts_buffer_size_value,
3117 &set_record_btrace_bts_cmdlist,
3118 &show_record_btrace_bts_cmdlist);
3119
3120 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3121 _("Set record btrace pt options"),
3122 &set_record_btrace_pt_cmdlist,
3123 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3124
3125 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3126 _("Show record btrace pt options"),
3127 &show_record_btrace_pt_cmdlist,
3128 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3129
3130 add_setshow_uinteger_cmd ("buffer-size", no_class,
3131 &record_btrace_conf.pt.size,
3132 _("Set the record/replay pt buffer size."),
3133 _("Show the record/replay pt buffer size."), _("\
3134 Bigger buffers allow longer recording but also take more time to process \
3135 the recorded execution.\n\
3136 The actual buffer size may differ from the requested size. Use \"info record\" \
3137 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3138 &set_record_btrace_pt_cmdlist,
3139 &show_record_btrace_pt_cmdlist);
3140
3141 init_record_btrace_ops ();
3142 add_target (&record_btrace_ops);
3143
3144 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3145 xcalloc, xfree);
3146
3147 record_btrace_conf.bts.size = 64 * 1024;
3148 record_btrace_conf.pt.size = 16 * 1024;
3149 }
This page took 0.148279 seconds and 4 git commands to generate.