arc: New Synopsys ARC port
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 btrace_fetch (tp);
121
122 if (btrace_is_empty (tp))
123 error (_("No trace."));
124
125 return tp;
126 }
127
128 /* Update the branch trace for the current thread and return a pointer to its
129 branch trace information struct.
130
131 Throws an error if there is no thread or no trace. This function never
132 returns NULL. */
133
134 static struct btrace_thread_info *
135 require_btrace (void)
136 {
137 struct thread_info *tp;
138
139 tp = require_btrace_thread ();
140
141 return &tp->btrace;
142 }
143
144 /* Enable branch tracing for one thread. Warn on errors. */
145
146 static void
147 record_btrace_enable_warn (struct thread_info *tp)
148 {
149 TRY
150 {
151 btrace_enable (tp, &record_btrace_conf);
152 }
153 CATCH (error, RETURN_MASK_ERROR)
154 {
155 warning ("%s", error.message);
156 }
157 END_CATCH
158 }
159
160 /* Callback function to disable branch tracing for one thread. */
161
162 static void
163 record_btrace_disable_callback (void *arg)
164 {
165 struct thread_info *tp = (struct thread_info *) arg;
166
167 btrace_disable (tp);
168 }
169
170 /* Enable automatic tracing of new threads. */
171
172 static void
173 record_btrace_auto_enable (void)
174 {
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179 }
180
181 /* Disable automatic tracing of new threads. */
182
183 static void
184 record_btrace_auto_disable (void)
185 {
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194 }
195
196 /* The record-btrace async event handler function. */
197
198 static void
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 {
201 inferior_event_handler (INF_REG_EVENT, NULL);
202 }
203
204 /* See record-btrace.h. */
205
206 void
207 record_btrace_push_target (void)
208 {
209 const char *format;
210
211 record_btrace_auto_enable ();
212
213 push_target (&record_btrace_ops);
214
215 record_btrace_async_inferior_event_handler
216 = create_async_event_handler (record_btrace_handle_async_inferior_event,
217 NULL);
218 record_btrace_generating_corefile = 0;
219
220 format = btrace_format_short_string (record_btrace_conf.format);
221 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
222 }
223
224 /* The to_open method of target record-btrace. */
225
226 static void
227 record_btrace_open (const char *args, int from_tty)
228 {
229 struct cleanup *disable_chain;
230 struct thread_info *tp;
231
232 DEBUG ("open");
233
234 record_preopen ();
235
236 if (!target_has_execution)
237 error (_("The program is not being run."));
238
239 gdb_assert (record_btrace_thread_observer == NULL);
240
241 disable_chain = make_cleanup (null_cleanup, NULL);
242 ALL_NON_EXITED_THREADS (tp)
243 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
244 {
245 btrace_enable (tp, &record_btrace_conf);
246
247 make_cleanup (record_btrace_disable_callback, tp);
248 }
249
250 record_btrace_push_target ();
251
252 discard_cleanups (disable_chain);
253 }
254
255 /* The to_stop_recording method of target record-btrace. */
256
257 static void
258 record_btrace_stop_recording (struct target_ops *self)
259 {
260 struct thread_info *tp;
261
262 DEBUG ("stop recording");
263
264 record_btrace_auto_disable ();
265
266 ALL_NON_EXITED_THREADS (tp)
267 if (tp->btrace.target != NULL)
268 btrace_disable (tp);
269 }
270
271 /* The to_disconnect method of target record-btrace. */
272
273 static void
274 record_btrace_disconnect (struct target_ops *self, const char *args,
275 int from_tty)
276 {
277 struct target_ops *beneath = self->beneath;
278
279 /* Do not stop recording, just clean up GDB side. */
280 unpush_target (self);
281
282 /* Forward disconnect. */
283 beneath->to_disconnect (beneath, args, from_tty);
284 }
285
286 /* The to_close method of target record-btrace. */
287
288 static void
289 record_btrace_close (struct target_ops *self)
290 {
291 struct thread_info *tp;
292
293 if (record_btrace_async_inferior_event_handler != NULL)
294 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
295
296 /* Make sure automatic recording gets disabled even if we did not stop
297 recording before closing the record-btrace target. */
298 record_btrace_auto_disable ();
299
300 /* We should have already stopped recording.
301 Tear down btrace in case we have not. */
302 ALL_NON_EXITED_THREADS (tp)
303 btrace_teardown (tp);
304 }
305
306 /* The to_async method of target record-btrace. */
307
308 static void
309 record_btrace_async (struct target_ops *ops, int enable)
310 {
311 if (enable)
312 mark_async_event_handler (record_btrace_async_inferior_event_handler);
313 else
314 clear_async_event_handler (record_btrace_async_inferior_event_handler);
315
316 ops->beneath->to_async (ops->beneath, enable);
317 }
318
319 /* Adjusts the size and returns a human readable size suffix. */
320
321 static const char *
322 record_btrace_adjust_size (unsigned int *size)
323 {
324 unsigned int sz;
325
326 sz = *size;
327
328 if ((sz & ((1u << 30) - 1)) == 0)
329 {
330 *size = sz >> 30;
331 return "GB";
332 }
333 else if ((sz & ((1u << 20) - 1)) == 0)
334 {
335 *size = sz >> 20;
336 return "MB";
337 }
338 else if ((sz & ((1u << 10) - 1)) == 0)
339 {
340 *size = sz >> 10;
341 return "kB";
342 }
343 else
344 return "";
345 }
346
347 /* Print a BTS configuration. */
348
349 static void
350 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
351 {
352 const char *suffix;
353 unsigned int size;
354
355 size = conf->size;
356 if (size > 0)
357 {
358 suffix = record_btrace_adjust_size (&size);
359 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
360 }
361 }
362
363 /* Print an Intel Processor Trace configuration. */
364
365 static void
366 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
367 {
368 const char *suffix;
369 unsigned int size;
370
371 size = conf->size;
372 if (size > 0)
373 {
374 suffix = record_btrace_adjust_size (&size);
375 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
376 }
377 }
378
379 /* Print a branch tracing configuration. */
380
381 static void
382 record_btrace_print_conf (const struct btrace_config *conf)
383 {
384 printf_unfiltered (_("Recording format: %s.\n"),
385 btrace_format_string (conf->format));
386
387 switch (conf->format)
388 {
389 case BTRACE_FORMAT_NONE:
390 return;
391
392 case BTRACE_FORMAT_BTS:
393 record_btrace_print_bts_conf (&conf->bts);
394 return;
395
396 case BTRACE_FORMAT_PT:
397 record_btrace_print_pt_conf (&conf->pt);
398 return;
399 }
400
401 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
402 }
403
404 /* The to_info_record method of target record-btrace. */
405
406 static void
407 record_btrace_info (struct target_ops *self)
408 {
409 struct btrace_thread_info *btinfo;
410 const struct btrace_config *conf;
411 struct thread_info *tp;
412 unsigned int insns, calls, gaps;
413
414 DEBUG ("info");
415
416 tp = find_thread_ptid (inferior_ptid);
417 if (tp == NULL)
418 error (_("No thread."));
419
420 btinfo = &tp->btrace;
421
422 conf = btrace_conf (btinfo);
423 if (conf != NULL)
424 record_btrace_print_conf (conf);
425
426 btrace_fetch (tp);
427
428 insns = 0;
429 calls = 0;
430 gaps = 0;
431
432 if (!btrace_is_empty (tp))
433 {
434 struct btrace_call_iterator call;
435 struct btrace_insn_iterator insn;
436
437 btrace_call_end (&call, btinfo);
438 btrace_call_prev (&call, 1);
439 calls = btrace_call_number (&call);
440
441 btrace_insn_end (&insn, btinfo);
442
443 insns = btrace_insn_number (&insn);
444 if (insns != 0)
445 {
446 /* The last instruction does not really belong to the trace. */
447 insns -= 1;
448 }
449 else
450 {
451 unsigned int steps;
452
453 /* Skip gaps at the end. */
454 do
455 {
456 steps = btrace_insn_prev (&insn, 1);
457 if (steps == 0)
458 break;
459
460 insns = btrace_insn_number (&insn);
461 }
462 while (insns == 0);
463 }
464
465 gaps = btinfo->ngaps;
466 }
467
468 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
469 "for thread %s (%s).\n"), insns, calls, gaps,
470 print_thread_id (tp), target_pid_to_str (tp->ptid));
471
472 if (btrace_is_replaying (tp))
473 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
474 btrace_insn_number (btinfo->replay));
475 }
476
477 /* Print a decode error. */
478
479 static void
480 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
481 enum btrace_format format)
482 {
483 const char *errstr;
484 int is_error;
485
486 errstr = _("unknown");
487 is_error = 1;
488
489 switch (format)
490 {
491 default:
492 break;
493
494 case BTRACE_FORMAT_BTS:
495 switch (errcode)
496 {
497 default:
498 break;
499
500 case BDE_BTS_OVERFLOW:
501 errstr = _("instruction overflow");
502 break;
503
504 case BDE_BTS_INSN_SIZE:
505 errstr = _("unknown instruction");
506 break;
507 }
508 break;
509
510 #if defined (HAVE_LIBIPT)
511 case BTRACE_FORMAT_PT:
512 switch (errcode)
513 {
514 case BDE_PT_USER_QUIT:
515 is_error = 0;
516 errstr = _("trace decode cancelled");
517 break;
518
519 case BDE_PT_DISABLED:
520 is_error = 0;
521 errstr = _("disabled");
522 break;
523
524 case BDE_PT_OVERFLOW:
525 is_error = 0;
526 errstr = _("overflow");
527 break;
528
529 default:
530 if (errcode < 0)
531 errstr = pt_errstr (pt_errcode (errcode));
532 break;
533 }
534 break;
535 #endif /* defined (HAVE_LIBIPT) */
536 }
537
538 ui_out_text (uiout, _("["));
539 if (is_error)
540 {
541 ui_out_text (uiout, _("decode error ("));
542 ui_out_field_int (uiout, "errcode", errcode);
543 ui_out_text (uiout, _("): "));
544 }
545 ui_out_text (uiout, errstr);
546 ui_out_text (uiout, _("]\n"));
547 }
548
549 /* Print an unsigned int. */
550
551 static void
552 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
553 {
554 ui_out_field_fmt (uiout, fld, "%u", val);
555 }
556
557 /* A range of source lines. */
558
559 struct btrace_line_range
560 {
561 /* The symtab this line is from. */
562 struct symtab *symtab;
563
564 /* The first line (inclusive). */
565 int begin;
566
567 /* The last line (exclusive). */
568 int end;
569 };
570
571 /* Construct a line range. */
572
573 static struct btrace_line_range
574 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
575 {
576 struct btrace_line_range range;
577
578 range.symtab = symtab;
579 range.begin = begin;
580 range.end = end;
581
582 return range;
583 }
584
585 /* Add a line to a line range. */
586
587 static struct btrace_line_range
588 btrace_line_range_add (struct btrace_line_range range, int line)
589 {
590 if (range.end <= range.begin)
591 {
592 /* This is the first entry. */
593 range.begin = line;
594 range.end = line + 1;
595 }
596 else if (line < range.begin)
597 range.begin = line;
598 else if (range.end < line)
599 range.end = line;
600
601 return range;
602 }
603
604 /* Return non-zero if RANGE is empty, zero otherwise. */
605
606 static int
607 btrace_line_range_is_empty (struct btrace_line_range range)
608 {
609 return range.end <= range.begin;
610 }
611
612 /* Return non-zero if LHS contains RHS, zero otherwise. */
613
614 static int
615 btrace_line_range_contains_range (struct btrace_line_range lhs,
616 struct btrace_line_range rhs)
617 {
618 return ((lhs.symtab == rhs.symtab)
619 && (lhs.begin <= rhs.begin)
620 && (rhs.end <= lhs.end));
621 }
622
623 /* Find the line range associated with PC. */
624
625 static struct btrace_line_range
626 btrace_find_line_range (CORE_ADDR pc)
627 {
628 struct btrace_line_range range;
629 struct linetable_entry *lines;
630 struct linetable *ltable;
631 struct symtab *symtab;
632 int nlines, i;
633
634 symtab = find_pc_line_symtab (pc);
635 if (symtab == NULL)
636 return btrace_mk_line_range (NULL, 0, 0);
637
638 ltable = SYMTAB_LINETABLE (symtab);
639 if (ltable == NULL)
640 return btrace_mk_line_range (symtab, 0, 0);
641
642 nlines = ltable->nitems;
643 lines = ltable->item;
644 if (nlines <= 0)
645 return btrace_mk_line_range (symtab, 0, 0);
646
647 range = btrace_mk_line_range (symtab, 0, 0);
648 for (i = 0; i < nlines - 1; i++)
649 {
650 if ((lines[i].pc == pc) && (lines[i].line != 0))
651 range = btrace_line_range_add (range, lines[i].line);
652 }
653
654 return range;
655 }
656
657 /* Print source lines in LINES to UIOUT.
658
659 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
660 instructions corresponding to that source line. When printing a new source
661 line, we do the cleanups for the open chain and open a new cleanup chain for
662 the new source line. If the source line range in LINES is not empty, this
663 function will leave the cleanup chain for the last printed source line open
664 so instructions can be added to it. */
665
666 static void
667 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
668 struct cleanup **ui_item_chain, int flags)
669 {
670 print_source_lines_flags psl_flags;
671 int line;
672
673 psl_flags = 0;
674 if (flags & DISASSEMBLY_FILENAME)
675 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
676
677 for (line = lines.begin; line < lines.end; ++line)
678 {
679 if (*ui_item_chain != NULL)
680 do_cleanups (*ui_item_chain);
681
682 *ui_item_chain
683 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
684
685 print_source_lines (lines.symtab, line, line + 1, psl_flags);
686
687 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
688 }
689 }
690
691 /* Disassemble a section of the recorded instruction trace. */
692
693 static void
694 btrace_insn_history (struct ui_out *uiout,
695 const struct btrace_thread_info *btinfo,
696 const struct btrace_insn_iterator *begin,
697 const struct btrace_insn_iterator *end, int flags)
698 {
699 struct ui_file *stb;
700 struct cleanup *cleanups, *ui_item_chain;
701 struct disassemble_info di;
702 struct gdbarch *gdbarch;
703 struct btrace_insn_iterator it;
704 struct btrace_line_range last_lines;
705
706 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
707 btrace_insn_number (end));
708
709 flags |= DISASSEMBLY_SPECULATIVE;
710
711 gdbarch = target_gdbarch ();
712 stb = mem_fileopen ();
713 cleanups = make_cleanup_ui_file_delete (stb);
714 di = gdb_disassemble_info (gdbarch, stb);
715 last_lines = btrace_mk_line_range (NULL, 0, 0);
716
717 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
718
719 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
720 instructions corresponding to that line. */
721 ui_item_chain = NULL;
722
723 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
724 {
725 const struct btrace_insn *insn;
726
727 insn = btrace_insn_get (&it);
728
729 /* A NULL instruction indicates a gap in the trace. */
730 if (insn == NULL)
731 {
732 const struct btrace_config *conf;
733
734 conf = btrace_conf (btinfo);
735
736 /* We have trace so we must have a configuration. */
737 gdb_assert (conf != NULL);
738
739 btrace_ui_out_decode_error (uiout, it.function->errcode,
740 conf->format);
741 }
742 else
743 {
744 struct disasm_insn dinsn;
745
746 if ((flags & DISASSEMBLY_SOURCE) != 0)
747 {
748 struct btrace_line_range lines;
749
750 lines = btrace_find_line_range (insn->pc);
751 if (!btrace_line_range_is_empty (lines)
752 && !btrace_line_range_contains_range (last_lines, lines))
753 {
754 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
755 last_lines = lines;
756 }
757 else if (ui_item_chain == NULL)
758 {
759 ui_item_chain
760 = make_cleanup_ui_out_tuple_begin_end (uiout,
761 "src_and_asm_line");
762 /* No source information. */
763 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
764 }
765
766 gdb_assert (ui_item_chain != NULL);
767 }
768
769 memset (&dinsn, 0, sizeof (dinsn));
770 dinsn.number = btrace_insn_number (&it);
771 dinsn.addr = insn->pc;
772
773 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
774 dinsn.is_speculative = 1;
775
776 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
777 }
778 }
779
780 do_cleanups (cleanups);
781 }
782
783 /* The to_insn_history method of target record-btrace. */
784
785 static void
786 record_btrace_insn_history (struct target_ops *self, int size, int flags)
787 {
788 struct btrace_thread_info *btinfo;
789 struct btrace_insn_history *history;
790 struct btrace_insn_iterator begin, end;
791 struct cleanup *uiout_cleanup;
792 struct ui_out *uiout;
793 unsigned int context, covered;
794
795 uiout = current_uiout;
796 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
797 "insn history");
798 context = abs (size);
799 if (context == 0)
800 error (_("Bad record instruction-history-size."));
801
802 btinfo = require_btrace ();
803 history = btinfo->insn_history;
804 if (history == NULL)
805 {
806 struct btrace_insn_iterator *replay;
807
808 DEBUG ("insn-history (0x%x): %d", flags, size);
809
810 /* If we're replaying, we start at the replay position. Otherwise, we
811 start at the tail of the trace. */
812 replay = btinfo->replay;
813 if (replay != NULL)
814 begin = *replay;
815 else
816 btrace_insn_end (&begin, btinfo);
817
818 /* We start from here and expand in the requested direction. Then we
819 expand in the other direction, as well, to fill up any remaining
820 context. */
821 end = begin;
822 if (size < 0)
823 {
824 /* We want the current position covered, as well. */
825 covered = btrace_insn_next (&end, 1);
826 covered += btrace_insn_prev (&begin, context - covered);
827 covered += btrace_insn_next (&end, context - covered);
828 }
829 else
830 {
831 covered = btrace_insn_next (&end, context);
832 covered += btrace_insn_prev (&begin, context - covered);
833 }
834 }
835 else
836 {
837 begin = history->begin;
838 end = history->end;
839
840 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
841 btrace_insn_number (&begin), btrace_insn_number (&end));
842
843 if (size < 0)
844 {
845 end = begin;
846 covered = btrace_insn_prev (&begin, context);
847 }
848 else
849 {
850 begin = end;
851 covered = btrace_insn_next (&end, context);
852 }
853 }
854
855 if (covered > 0)
856 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
857 else
858 {
859 if (size < 0)
860 printf_unfiltered (_("At the start of the branch trace record.\n"));
861 else
862 printf_unfiltered (_("At the end of the branch trace record.\n"));
863 }
864
865 btrace_set_insn_history (btinfo, &begin, &end);
866 do_cleanups (uiout_cleanup);
867 }
868
869 /* The to_insn_history_range method of target record-btrace. */
870
871 static void
872 record_btrace_insn_history_range (struct target_ops *self,
873 ULONGEST from, ULONGEST to, int flags)
874 {
875 struct btrace_thread_info *btinfo;
876 struct btrace_insn_history *history;
877 struct btrace_insn_iterator begin, end;
878 struct cleanup *uiout_cleanup;
879 struct ui_out *uiout;
880 unsigned int low, high;
881 int found;
882
883 uiout = current_uiout;
884 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
885 "insn history");
886 low = from;
887 high = to;
888
889 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
890
891 /* Check for wrap-arounds. */
892 if (low != from || high != to)
893 error (_("Bad range."));
894
895 if (high < low)
896 error (_("Bad range."));
897
898 btinfo = require_btrace ();
899
900 found = btrace_find_insn_by_number (&begin, btinfo, low);
901 if (found == 0)
902 error (_("Range out of bounds."));
903
904 found = btrace_find_insn_by_number (&end, btinfo, high);
905 if (found == 0)
906 {
907 /* Silently truncate the range. */
908 btrace_insn_end (&end, btinfo);
909 }
910 else
911 {
912 /* We want both begin and end to be inclusive. */
913 btrace_insn_next (&end, 1);
914 }
915
916 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
917 btrace_set_insn_history (btinfo, &begin, &end);
918
919 do_cleanups (uiout_cleanup);
920 }
921
922 /* The to_insn_history_from method of target record-btrace. */
923
924 static void
925 record_btrace_insn_history_from (struct target_ops *self,
926 ULONGEST from, int size, int flags)
927 {
928 ULONGEST begin, end, context;
929
930 context = abs (size);
931 if (context == 0)
932 error (_("Bad record instruction-history-size."));
933
934 if (size < 0)
935 {
936 end = from;
937
938 if (from < context)
939 begin = 0;
940 else
941 begin = from - context + 1;
942 }
943 else
944 {
945 begin = from;
946 end = from + context - 1;
947
948 /* Check for wrap-around. */
949 if (end < begin)
950 end = ULONGEST_MAX;
951 }
952
953 record_btrace_insn_history_range (self, begin, end, flags);
954 }
955
956 /* Print the instruction number range for a function call history line. */
957
958 static void
959 btrace_call_history_insn_range (struct ui_out *uiout,
960 const struct btrace_function *bfun)
961 {
962 unsigned int begin, end, size;
963
964 size = VEC_length (btrace_insn_s, bfun->insn);
965 gdb_assert (size > 0);
966
967 begin = bfun->insn_offset;
968 end = begin + size - 1;
969
970 ui_out_field_uint (uiout, "insn begin", begin);
971 ui_out_text (uiout, ",");
972 ui_out_field_uint (uiout, "insn end", end);
973 }
974
975 /* Compute the lowest and highest source line for the instructions in BFUN
976 and return them in PBEGIN and PEND.
977 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
978 result from inlining or macro expansion. */
979
980 static void
981 btrace_compute_src_line_range (const struct btrace_function *bfun,
982 int *pbegin, int *pend)
983 {
984 struct btrace_insn *insn;
985 struct symtab *symtab;
986 struct symbol *sym;
987 unsigned int idx;
988 int begin, end;
989
990 begin = INT_MAX;
991 end = INT_MIN;
992
993 sym = bfun->sym;
994 if (sym == NULL)
995 goto out;
996
997 symtab = symbol_symtab (sym);
998
999 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1000 {
1001 struct symtab_and_line sal;
1002
1003 sal = find_pc_line (insn->pc, 0);
1004 if (sal.symtab != symtab || sal.line == 0)
1005 continue;
1006
1007 begin = std::min (begin, sal.line);
1008 end = std::max (end, sal.line);
1009 }
1010
1011 out:
1012 *pbegin = begin;
1013 *pend = end;
1014 }
1015
1016 /* Print the source line information for a function call history line. */
1017
1018 static void
1019 btrace_call_history_src_line (struct ui_out *uiout,
1020 const struct btrace_function *bfun)
1021 {
1022 struct symbol *sym;
1023 int begin, end;
1024
1025 sym = bfun->sym;
1026 if (sym == NULL)
1027 return;
1028
1029 ui_out_field_string (uiout, "file",
1030 symtab_to_filename_for_display (symbol_symtab (sym)));
1031
1032 btrace_compute_src_line_range (bfun, &begin, &end);
1033 if (end < begin)
1034 return;
1035
1036 ui_out_text (uiout, ":");
1037 ui_out_field_int (uiout, "min line", begin);
1038
1039 if (end == begin)
1040 return;
1041
1042 ui_out_text (uiout, ",");
1043 ui_out_field_int (uiout, "max line", end);
1044 }
1045
1046 /* Get the name of a branch trace function. */
1047
1048 static const char *
1049 btrace_get_bfun_name (const struct btrace_function *bfun)
1050 {
1051 struct minimal_symbol *msym;
1052 struct symbol *sym;
1053
1054 if (bfun == NULL)
1055 return "??";
1056
1057 msym = bfun->msym;
1058 sym = bfun->sym;
1059
1060 if (sym != NULL)
1061 return SYMBOL_PRINT_NAME (sym);
1062 else if (msym != NULL)
1063 return MSYMBOL_PRINT_NAME (msym);
1064 else
1065 return "??";
1066 }
1067
1068 /* Disassemble a section of the recorded function trace. */
1069
1070 static void
1071 btrace_call_history (struct ui_out *uiout,
1072 const struct btrace_thread_info *btinfo,
1073 const struct btrace_call_iterator *begin,
1074 const struct btrace_call_iterator *end,
1075 int int_flags)
1076 {
1077 struct btrace_call_iterator it;
1078 record_print_flags flags = (enum record_print_flag) int_flags;
1079
1080 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1081 btrace_call_number (end));
1082
1083 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1084 {
1085 const struct btrace_function *bfun;
1086 struct minimal_symbol *msym;
1087 struct symbol *sym;
1088
1089 bfun = btrace_call_get (&it);
1090 sym = bfun->sym;
1091 msym = bfun->msym;
1092
1093 /* Print the function index. */
1094 ui_out_field_uint (uiout, "index", bfun->number);
1095 ui_out_text (uiout, "\t");
1096
1097 /* Indicate gaps in the trace. */
1098 if (bfun->errcode != 0)
1099 {
1100 const struct btrace_config *conf;
1101
1102 conf = btrace_conf (btinfo);
1103
1104 /* We have trace so we must have a configuration. */
1105 gdb_assert (conf != NULL);
1106
1107 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1108
1109 continue;
1110 }
1111
1112 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1113 {
1114 int level = bfun->level + btinfo->level, i;
1115
1116 for (i = 0; i < level; ++i)
1117 ui_out_text (uiout, " ");
1118 }
1119
1120 if (sym != NULL)
1121 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1122 else if (msym != NULL)
1123 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1124 else if (!ui_out_is_mi_like_p (uiout))
1125 ui_out_field_string (uiout, "function", "??");
1126
1127 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1128 {
1129 ui_out_text (uiout, _("\tinst "));
1130 btrace_call_history_insn_range (uiout, bfun);
1131 }
1132
1133 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1134 {
1135 ui_out_text (uiout, _("\tat "));
1136 btrace_call_history_src_line (uiout, bfun);
1137 }
1138
1139 ui_out_text (uiout, "\n");
1140 }
1141 }
1142
1143 /* The to_call_history method of target record-btrace. */
1144
1145 static void
1146 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1147 {
1148 struct btrace_thread_info *btinfo;
1149 struct btrace_call_history *history;
1150 struct btrace_call_iterator begin, end;
1151 struct cleanup *uiout_cleanup;
1152 struct ui_out *uiout;
1153 unsigned int context, covered;
1154 record_print_flags flags = (enum record_print_flag) int_flags;
1155
1156 uiout = current_uiout;
1157 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1158 "insn history");
1159 context = abs (size);
1160 if (context == 0)
1161 error (_("Bad record function-call-history-size."));
1162
1163 btinfo = require_btrace ();
1164 history = btinfo->call_history;
1165 if (history == NULL)
1166 {
1167 struct btrace_insn_iterator *replay;
1168
1169 DEBUG ("call-history (0x%x): %d", int_flags, size);
1170
1171 /* If we're replaying, we start at the replay position. Otherwise, we
1172 start at the tail of the trace. */
1173 replay = btinfo->replay;
1174 if (replay != NULL)
1175 {
1176 begin.function = replay->function;
1177 begin.btinfo = btinfo;
1178 }
1179 else
1180 btrace_call_end (&begin, btinfo);
1181
1182 /* We start from here and expand in the requested direction. Then we
1183 expand in the other direction, as well, to fill up any remaining
1184 context. */
1185 end = begin;
1186 if (size < 0)
1187 {
1188 /* We want the current position covered, as well. */
1189 covered = btrace_call_next (&end, 1);
1190 covered += btrace_call_prev (&begin, context - covered);
1191 covered += btrace_call_next (&end, context - covered);
1192 }
1193 else
1194 {
1195 covered = btrace_call_next (&end, context);
1196 covered += btrace_call_prev (&begin, context- covered);
1197 }
1198 }
1199 else
1200 {
1201 begin = history->begin;
1202 end = history->end;
1203
1204 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1205 btrace_call_number (&begin), btrace_call_number (&end));
1206
1207 if (size < 0)
1208 {
1209 end = begin;
1210 covered = btrace_call_prev (&begin, context);
1211 }
1212 else
1213 {
1214 begin = end;
1215 covered = btrace_call_next (&end, context);
1216 }
1217 }
1218
1219 if (covered > 0)
1220 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1221 else
1222 {
1223 if (size < 0)
1224 printf_unfiltered (_("At the start of the branch trace record.\n"));
1225 else
1226 printf_unfiltered (_("At the end of the branch trace record.\n"));
1227 }
1228
1229 btrace_set_call_history (btinfo, &begin, &end);
1230 do_cleanups (uiout_cleanup);
1231 }
1232
1233 /* The to_call_history_range method of target record-btrace. */
1234
1235 static void
1236 record_btrace_call_history_range (struct target_ops *self,
1237 ULONGEST from, ULONGEST to,
1238 int int_flags)
1239 {
1240 struct btrace_thread_info *btinfo;
1241 struct btrace_call_history *history;
1242 struct btrace_call_iterator begin, end;
1243 struct cleanup *uiout_cleanup;
1244 struct ui_out *uiout;
1245 unsigned int low, high;
1246 int found;
1247 record_print_flags flags = (enum record_print_flag) int_flags;
1248
1249 uiout = current_uiout;
1250 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1251 "func history");
1252 low = from;
1253 high = to;
1254
1255 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1256
1257 /* Check for wrap-arounds. */
1258 if (low != from || high != to)
1259 error (_("Bad range."));
1260
1261 if (high < low)
1262 error (_("Bad range."));
1263
1264 btinfo = require_btrace ();
1265
1266 found = btrace_find_call_by_number (&begin, btinfo, low);
1267 if (found == 0)
1268 error (_("Range out of bounds."));
1269
1270 found = btrace_find_call_by_number (&end, btinfo, high);
1271 if (found == 0)
1272 {
1273 /* Silently truncate the range. */
1274 btrace_call_end (&end, btinfo);
1275 }
1276 else
1277 {
1278 /* We want both begin and end to be inclusive. */
1279 btrace_call_next (&end, 1);
1280 }
1281
1282 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1283 btrace_set_call_history (btinfo, &begin, &end);
1284
1285 do_cleanups (uiout_cleanup);
1286 }
1287
1288 /* The to_call_history_from method of target record-btrace. */
1289
1290 static void
1291 record_btrace_call_history_from (struct target_ops *self,
1292 ULONGEST from, int size,
1293 int int_flags)
1294 {
1295 ULONGEST begin, end, context;
1296 record_print_flags flags = (enum record_print_flag) int_flags;
1297
1298 context = abs (size);
1299 if (context == 0)
1300 error (_("Bad record function-call-history-size."));
1301
1302 if (size < 0)
1303 {
1304 end = from;
1305
1306 if (from < context)
1307 begin = 0;
1308 else
1309 begin = from - context + 1;
1310 }
1311 else
1312 {
1313 begin = from;
1314 end = from + context - 1;
1315
1316 /* Check for wrap-around. */
1317 if (end < begin)
1318 end = ULONGEST_MAX;
1319 }
1320
1321 record_btrace_call_history_range (self, begin, end, flags);
1322 }
1323
1324 /* The to_record_is_replaying method of target record-btrace. */
1325
1326 static int
1327 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1328 {
1329 struct thread_info *tp;
1330
1331 ALL_NON_EXITED_THREADS (tp)
1332 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1333 return 1;
1334
1335 return 0;
1336 }
1337
1338 /* The to_record_will_replay method of target record-btrace. */
1339
1340 static int
1341 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1342 {
1343 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1344 }
1345
1346 /* The to_xfer_partial method of target record-btrace. */
1347
1348 static enum target_xfer_status
1349 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1350 const char *annex, gdb_byte *readbuf,
1351 const gdb_byte *writebuf, ULONGEST offset,
1352 ULONGEST len, ULONGEST *xfered_len)
1353 {
1354 struct target_ops *t;
1355
1356 /* Filter out requests that don't make sense during replay. */
1357 if (replay_memory_access == replay_memory_access_read_only
1358 && !record_btrace_generating_corefile
1359 && record_btrace_is_replaying (ops, inferior_ptid))
1360 {
1361 switch (object)
1362 {
1363 case TARGET_OBJECT_MEMORY:
1364 {
1365 struct target_section *section;
1366
1367 /* We do not allow writing memory in general. */
1368 if (writebuf != NULL)
1369 {
1370 *xfered_len = len;
1371 return TARGET_XFER_UNAVAILABLE;
1372 }
1373
1374 /* We allow reading readonly memory. */
1375 section = target_section_by_addr (ops, offset);
1376 if (section != NULL)
1377 {
1378 /* Check if the section we found is readonly. */
1379 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1380 section->the_bfd_section)
1381 & SEC_READONLY) != 0)
1382 {
1383 /* Truncate the request to fit into this section. */
1384 len = std::min (len, section->endaddr - offset);
1385 break;
1386 }
1387 }
1388
1389 *xfered_len = len;
1390 return TARGET_XFER_UNAVAILABLE;
1391 }
1392 }
1393 }
1394
1395 /* Forward the request. */
1396 ops = ops->beneath;
1397 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1398 offset, len, xfered_len);
1399 }
1400
1401 /* The to_insert_breakpoint method of target record-btrace. */
1402
1403 static int
1404 record_btrace_insert_breakpoint (struct target_ops *ops,
1405 struct gdbarch *gdbarch,
1406 struct bp_target_info *bp_tgt)
1407 {
1408 const char *old;
1409 int ret;
1410
1411 /* Inserting breakpoints requires accessing memory. Allow it for the
1412 duration of this function. */
1413 old = replay_memory_access;
1414 replay_memory_access = replay_memory_access_read_write;
1415
1416 ret = 0;
1417 TRY
1418 {
1419 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1420 }
1421 CATCH (except, RETURN_MASK_ALL)
1422 {
1423 replay_memory_access = old;
1424 throw_exception (except);
1425 }
1426 END_CATCH
1427 replay_memory_access = old;
1428
1429 return ret;
1430 }
1431
1432 /* The to_remove_breakpoint method of target record-btrace. */
1433
1434 static int
1435 record_btrace_remove_breakpoint (struct target_ops *ops,
1436 struct gdbarch *gdbarch,
1437 struct bp_target_info *bp_tgt,
1438 enum remove_bp_reason reason)
1439 {
1440 const char *old;
1441 int ret;
1442
1443 /* Removing breakpoints requires accessing memory. Allow it for the
1444 duration of this function. */
1445 old = replay_memory_access;
1446 replay_memory_access = replay_memory_access_read_write;
1447
1448 ret = 0;
1449 TRY
1450 {
1451 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1452 reason);
1453 }
1454 CATCH (except, RETURN_MASK_ALL)
1455 {
1456 replay_memory_access = old;
1457 throw_exception (except);
1458 }
1459 END_CATCH
1460 replay_memory_access = old;
1461
1462 return ret;
1463 }
1464
1465 /* The to_fetch_registers method of target record-btrace. */
1466
1467 static void
1468 record_btrace_fetch_registers (struct target_ops *ops,
1469 struct regcache *regcache, int regno)
1470 {
1471 struct btrace_insn_iterator *replay;
1472 struct thread_info *tp;
1473
1474 tp = find_thread_ptid (inferior_ptid);
1475 gdb_assert (tp != NULL);
1476
1477 replay = tp->btrace.replay;
1478 if (replay != NULL && !record_btrace_generating_corefile)
1479 {
1480 const struct btrace_insn *insn;
1481 struct gdbarch *gdbarch;
1482 int pcreg;
1483
1484 gdbarch = get_regcache_arch (regcache);
1485 pcreg = gdbarch_pc_regnum (gdbarch);
1486 if (pcreg < 0)
1487 return;
1488
1489 /* We can only provide the PC register. */
1490 if (regno >= 0 && regno != pcreg)
1491 return;
1492
1493 insn = btrace_insn_get (replay);
1494 gdb_assert (insn != NULL);
1495
1496 regcache_raw_supply (regcache, regno, &insn->pc);
1497 }
1498 else
1499 {
1500 struct target_ops *t = ops->beneath;
1501
1502 t->to_fetch_registers (t, regcache, regno);
1503 }
1504 }
1505
1506 /* The to_store_registers method of target record-btrace. */
1507
1508 static void
1509 record_btrace_store_registers (struct target_ops *ops,
1510 struct regcache *regcache, int regno)
1511 {
1512 struct target_ops *t;
1513
1514 if (!record_btrace_generating_corefile
1515 && record_btrace_is_replaying (ops, inferior_ptid))
1516 error (_("Cannot write registers while replaying."));
1517
1518 gdb_assert (may_write_registers != 0);
1519
1520 t = ops->beneath;
1521 t->to_store_registers (t, regcache, regno);
1522 }
1523
1524 /* The to_prepare_to_store method of target record-btrace. */
1525
1526 static void
1527 record_btrace_prepare_to_store (struct target_ops *ops,
1528 struct regcache *regcache)
1529 {
1530 struct target_ops *t;
1531
1532 if (!record_btrace_generating_corefile
1533 && record_btrace_is_replaying (ops, inferior_ptid))
1534 return;
1535
1536 t = ops->beneath;
1537 t->to_prepare_to_store (t, regcache);
1538 }
1539
1540 /* The branch trace frame cache. */
1541
1542 struct btrace_frame_cache
1543 {
1544 /* The thread. */
1545 struct thread_info *tp;
1546
1547 /* The frame info. */
1548 struct frame_info *frame;
1549
1550 /* The branch trace function segment. */
1551 const struct btrace_function *bfun;
1552 };
1553
1554 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1555
1556 static htab_t bfcache;
1557
1558 /* hash_f for htab_create_alloc of bfcache. */
1559
1560 static hashval_t
1561 bfcache_hash (const void *arg)
1562 {
1563 const struct btrace_frame_cache *cache
1564 = (const struct btrace_frame_cache *) arg;
1565
1566 return htab_hash_pointer (cache->frame);
1567 }
1568
1569 /* eq_f for htab_create_alloc of bfcache. */
1570
1571 static int
1572 bfcache_eq (const void *arg1, const void *arg2)
1573 {
1574 const struct btrace_frame_cache *cache1
1575 = (const struct btrace_frame_cache *) arg1;
1576 const struct btrace_frame_cache *cache2
1577 = (const struct btrace_frame_cache *) arg2;
1578
1579 return cache1->frame == cache2->frame;
1580 }
1581
1582 /* Create a new btrace frame cache. */
1583
1584 static struct btrace_frame_cache *
1585 bfcache_new (struct frame_info *frame)
1586 {
1587 struct btrace_frame_cache *cache;
1588 void **slot;
1589
1590 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1591 cache->frame = frame;
1592
1593 slot = htab_find_slot (bfcache, cache, INSERT);
1594 gdb_assert (*slot == NULL);
1595 *slot = cache;
1596
1597 return cache;
1598 }
1599
1600 /* Extract the branch trace function from a branch trace frame. */
1601
1602 static const struct btrace_function *
1603 btrace_get_frame_function (struct frame_info *frame)
1604 {
1605 const struct btrace_frame_cache *cache;
1606 const struct btrace_function *bfun;
1607 struct btrace_frame_cache pattern;
1608 void **slot;
1609
1610 pattern.frame = frame;
1611
1612 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1613 if (slot == NULL)
1614 return NULL;
1615
1616 cache = (const struct btrace_frame_cache *) *slot;
1617 return cache->bfun;
1618 }
1619
1620 /* Implement stop_reason method for record_btrace_frame_unwind. */
1621
1622 static enum unwind_stop_reason
1623 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1624 void **this_cache)
1625 {
1626 const struct btrace_frame_cache *cache;
1627 const struct btrace_function *bfun;
1628
1629 cache = (const struct btrace_frame_cache *) *this_cache;
1630 bfun = cache->bfun;
1631 gdb_assert (bfun != NULL);
1632
1633 if (bfun->up == NULL)
1634 return UNWIND_UNAVAILABLE;
1635
1636 return UNWIND_NO_REASON;
1637 }
1638
1639 /* Implement this_id method for record_btrace_frame_unwind. */
1640
1641 static void
1642 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1643 struct frame_id *this_id)
1644 {
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun;
1647 CORE_ADDR code, special;
1648
1649 cache = (const struct btrace_frame_cache *) *this_cache;
1650
1651 bfun = cache->bfun;
1652 gdb_assert (bfun != NULL);
1653
1654 while (bfun->segment.prev != NULL)
1655 bfun = bfun->segment.prev;
1656
1657 code = get_frame_func (this_frame);
1658 special = bfun->number;
1659
1660 *this_id = frame_id_build_unavailable_stack_special (code, special);
1661
1662 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1663 btrace_get_bfun_name (cache->bfun),
1664 core_addr_to_string_nz (this_id->code_addr),
1665 core_addr_to_string_nz (this_id->special_addr));
1666 }
1667
1668 /* Implement prev_register method for record_btrace_frame_unwind. */
1669
1670 static struct value *
1671 record_btrace_frame_prev_register (struct frame_info *this_frame,
1672 void **this_cache,
1673 int regnum)
1674 {
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun, *caller;
1677 const struct btrace_insn *insn;
1678 struct gdbarch *gdbarch;
1679 CORE_ADDR pc;
1680 int pcreg;
1681
1682 gdbarch = get_frame_arch (this_frame);
1683 pcreg = gdbarch_pc_regnum (gdbarch);
1684 if (pcreg < 0 || regnum != pcreg)
1685 throw_error (NOT_AVAILABLE_ERROR,
1686 _("Registers are not available in btrace record history"));
1687
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
1692 caller = bfun->up;
1693 if (caller == NULL)
1694 throw_error (NOT_AVAILABLE_ERROR,
1695 _("No caller in btrace record history"));
1696
1697 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1698 {
1699 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1700 pc = insn->pc;
1701 }
1702 else
1703 {
1704 insn = VEC_last (btrace_insn_s, caller->insn);
1705 pc = insn->pc;
1706
1707 pc += gdb_insn_length (gdbarch, pc);
1708 }
1709
1710 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1711 btrace_get_bfun_name (bfun), bfun->level,
1712 core_addr_to_string_nz (pc));
1713
1714 return frame_unwind_got_address (this_frame, regnum, pc);
1715 }
1716
1717 /* Implement sniffer method for record_btrace_frame_unwind. */
1718
1719 static int
1720 record_btrace_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1722 void **this_cache)
1723 {
1724 const struct btrace_function *bfun;
1725 struct btrace_frame_cache *cache;
1726 struct thread_info *tp;
1727 struct frame_info *next;
1728
1729 /* THIS_FRAME does not contain a reference to its thread. */
1730 tp = find_thread_ptid (inferior_ptid);
1731 gdb_assert (tp != NULL);
1732
1733 bfun = NULL;
1734 next = get_next_frame (this_frame);
1735 if (next == NULL)
1736 {
1737 const struct btrace_insn_iterator *replay;
1738
1739 replay = tp->btrace.replay;
1740 if (replay != NULL)
1741 bfun = replay->function;
1742 }
1743 else
1744 {
1745 const struct btrace_function *callee;
1746
1747 callee = btrace_get_frame_function (next);
1748 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1749 bfun = callee->up;
1750 }
1751
1752 if (bfun == NULL)
1753 return 0;
1754
1755 DEBUG ("[frame] sniffed frame for %s on level %d",
1756 btrace_get_bfun_name (bfun), bfun->level);
1757
1758 /* This is our frame. Initialize the frame cache. */
1759 cache = bfcache_new (this_frame);
1760 cache->tp = tp;
1761 cache->bfun = bfun;
1762
1763 *this_cache = cache;
1764 return 1;
1765 }
1766
1767 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1768
1769 static int
1770 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1772 void **this_cache)
1773 {
1774 const struct btrace_function *bfun, *callee;
1775 struct btrace_frame_cache *cache;
1776 struct frame_info *next;
1777
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 return 0;
1781
1782 callee = btrace_get_frame_function (next);
1783 if (callee == NULL)
1784 return 0;
1785
1786 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1787 return 0;
1788
1789 bfun = callee->up;
1790 if (bfun == NULL)
1791 return 0;
1792
1793 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1794 btrace_get_bfun_name (bfun), bfun->level);
1795
1796 /* This is our frame. Initialize the frame cache. */
1797 cache = bfcache_new (this_frame);
1798 cache->tp = find_thread_ptid (inferior_ptid);
1799 cache->bfun = bfun;
1800
1801 *this_cache = cache;
1802 return 1;
1803 }
1804
1805 static void
1806 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1807 {
1808 struct btrace_frame_cache *cache;
1809 void **slot;
1810
1811 cache = (struct btrace_frame_cache *) this_cache;
1812
1813 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1814 gdb_assert (slot != NULL);
1815
1816 htab_remove_elt (bfcache, cache);
1817 }
1818
1819 /* btrace recording does not store previous memory content, neither the stack
1820 frames content. Any unwinding would return errorneous results as the stack
1821 contents no longer matches the changed PC value restored from history.
1822 Therefore this unwinder reports any possibly unwound registers as
1823 <unavailable>. */
1824
1825 const struct frame_unwind record_btrace_frame_unwind =
1826 {
1827 NORMAL_FRAME,
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1831 NULL,
1832 record_btrace_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1834 };
1835
1836 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1837 {
1838 TAILCALL_FRAME,
1839 record_btrace_frame_unwind_stop_reason,
1840 record_btrace_frame_this_id,
1841 record_btrace_frame_prev_register,
1842 NULL,
1843 record_btrace_tailcall_frame_sniffer,
1844 record_btrace_frame_dealloc_cache
1845 };
1846
1847 /* Implement the to_get_unwinder method. */
1848
1849 static const struct frame_unwind *
1850 record_btrace_to_get_unwinder (struct target_ops *self)
1851 {
1852 return &record_btrace_frame_unwind;
1853 }
1854
1855 /* Implement the to_get_tailcall_unwinder method. */
1856
1857 static const struct frame_unwind *
1858 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1859 {
1860 return &record_btrace_tailcall_frame_unwind;
1861 }
1862
1863 /* Return a human-readable string for FLAG. */
1864
1865 static const char *
1866 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1867 {
1868 switch (flag)
1869 {
1870 case BTHR_STEP:
1871 return "step";
1872
1873 case BTHR_RSTEP:
1874 return "reverse-step";
1875
1876 case BTHR_CONT:
1877 return "cont";
1878
1879 case BTHR_RCONT:
1880 return "reverse-cont";
1881
1882 case BTHR_STOP:
1883 return "stop";
1884 }
1885
1886 return "<invalid>";
1887 }
1888
1889 /* Indicate that TP should be resumed according to FLAG. */
1890
1891 static void
1892 record_btrace_resume_thread (struct thread_info *tp,
1893 enum btrace_thread_flag flag)
1894 {
1895 struct btrace_thread_info *btinfo;
1896
1897 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1898 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1899
1900 btinfo = &tp->btrace;
1901
1902 /* Fetch the latest branch trace. */
1903 btrace_fetch (tp);
1904
1905 /* A resume request overwrites a preceding resume or stop request. */
1906 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1907 btinfo->flags |= flag;
1908 }
1909
1910 /* Get the current frame for TP. */
1911
1912 static struct frame_info *
1913 get_thread_current_frame (struct thread_info *tp)
1914 {
1915 struct frame_info *frame;
1916 ptid_t old_inferior_ptid;
1917 int executing;
1918
1919 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1920 old_inferior_ptid = inferior_ptid;
1921 inferior_ptid = tp->ptid;
1922
1923 /* Clear the executing flag to allow changes to the current frame.
1924 We are not actually running, yet. We just started a reverse execution
1925 command or a record goto command.
1926 For the latter, EXECUTING is false and this has no effect.
1927 For the former, EXECUTING is true and we're in to_wait, about to
1928 move the thread. Since we need to recompute the stack, we temporarily
1929 set EXECUTING to flase. */
1930 executing = is_executing (inferior_ptid);
1931 set_executing (inferior_ptid, 0);
1932
1933 frame = NULL;
1934 TRY
1935 {
1936 frame = get_current_frame ();
1937 }
1938 CATCH (except, RETURN_MASK_ALL)
1939 {
1940 /* Restore the previous execution state. */
1941 set_executing (inferior_ptid, executing);
1942
1943 /* Restore the previous inferior_ptid. */
1944 inferior_ptid = old_inferior_ptid;
1945
1946 throw_exception (except);
1947 }
1948 END_CATCH
1949
1950 /* Restore the previous execution state. */
1951 set_executing (inferior_ptid, executing);
1952
1953 /* Restore the previous inferior_ptid. */
1954 inferior_ptid = old_inferior_ptid;
1955
1956 return frame;
1957 }
1958
1959 /* Start replaying a thread. */
1960
1961 static struct btrace_insn_iterator *
1962 record_btrace_start_replaying (struct thread_info *tp)
1963 {
1964 struct btrace_insn_iterator *replay;
1965 struct btrace_thread_info *btinfo;
1966
1967 btinfo = &tp->btrace;
1968 replay = NULL;
1969
1970 /* We can't start replaying without trace. */
1971 if (btinfo->begin == NULL)
1972 return NULL;
1973
1974 /* GDB stores the current frame_id when stepping in order to detects steps
1975 into subroutines.
1976 Since frames are computed differently when we're replaying, we need to
1977 recompute those stored frames and fix them up so we can still detect
1978 subroutines after we started replaying. */
1979 TRY
1980 {
1981 struct frame_info *frame;
1982 struct frame_id frame_id;
1983 int upd_step_frame_id, upd_step_stack_frame_id;
1984
1985 /* The current frame without replaying - computed via normal unwind. */
1986 frame = get_thread_current_frame (tp);
1987 frame_id = get_frame_id (frame);
1988
1989 /* Check if we need to update any stepping-related frame id's. */
1990 upd_step_frame_id = frame_id_eq (frame_id,
1991 tp->control.step_frame_id);
1992 upd_step_stack_frame_id = frame_id_eq (frame_id,
1993 tp->control.step_stack_frame_id);
1994
1995 /* We start replaying at the end of the branch trace. This corresponds
1996 to the current instruction. */
1997 replay = XNEW (struct btrace_insn_iterator);
1998 btrace_insn_end (replay, btinfo);
1999
2000 /* Skip gaps at the end of the trace. */
2001 while (btrace_insn_get (replay) == NULL)
2002 {
2003 unsigned int steps;
2004
2005 steps = btrace_insn_prev (replay, 1);
2006 if (steps == 0)
2007 error (_("No trace."));
2008 }
2009
2010 /* We're not replaying, yet. */
2011 gdb_assert (btinfo->replay == NULL);
2012 btinfo->replay = replay;
2013
2014 /* Make sure we're not using any stale registers. */
2015 registers_changed_ptid (tp->ptid);
2016
2017 /* The current frame with replaying - computed via btrace unwind. */
2018 frame = get_thread_current_frame (tp);
2019 frame_id = get_frame_id (frame);
2020
2021 /* Replace stepping related frames where necessary. */
2022 if (upd_step_frame_id)
2023 tp->control.step_frame_id = frame_id;
2024 if (upd_step_stack_frame_id)
2025 tp->control.step_stack_frame_id = frame_id;
2026 }
2027 CATCH (except, RETURN_MASK_ALL)
2028 {
2029 xfree (btinfo->replay);
2030 btinfo->replay = NULL;
2031
2032 registers_changed_ptid (tp->ptid);
2033
2034 throw_exception (except);
2035 }
2036 END_CATCH
2037
2038 return replay;
2039 }
2040
2041 /* Stop replaying a thread. */
2042
2043 static void
2044 record_btrace_stop_replaying (struct thread_info *tp)
2045 {
2046 struct btrace_thread_info *btinfo;
2047
2048 btinfo = &tp->btrace;
2049
2050 xfree (btinfo->replay);
2051 btinfo->replay = NULL;
2052
2053 /* Make sure we're not leaving any stale registers. */
2054 registers_changed_ptid (tp->ptid);
2055 }
2056
2057 /* Stop replaying TP if it is at the end of its execution history. */
2058
2059 static void
2060 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2061 {
2062 struct btrace_insn_iterator *replay, end;
2063 struct btrace_thread_info *btinfo;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return;
2070
2071 btrace_insn_end (&end, btinfo);
2072
2073 if (btrace_insn_cmp (replay, &end) == 0)
2074 record_btrace_stop_replaying (tp);
2075 }
2076
2077 /* The to_resume method of target record-btrace. */
2078
2079 static void
2080 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2081 enum gdb_signal signal)
2082 {
2083 struct thread_info *tp;
2084 enum btrace_thread_flag flag, cflag;
2085
2086 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2087 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2088 step ? "step" : "cont");
2089
2090 /* Store the execution direction of the last resume.
2091
2092 If there is more than one to_resume call, we have to rely on infrun
2093 to not change the execution direction in-between. */
2094 record_btrace_resume_exec_dir = execution_direction;
2095
2096 /* As long as we're not replaying, just forward the request.
2097
2098 For non-stop targets this means that no thread is replaying. In order to
2099 make progress, we may need to explicitly move replaying threads to the end
2100 of their execution history. */
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
2103 {
2104 ops = ops->beneath;
2105 ops->to_resume (ops, ptid, step, signal);
2106 return;
2107 }
2108
2109 /* Compute the btrace thread flag for the requested move. */
2110 if (execution_direction == EXEC_REVERSE)
2111 {
2112 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2113 cflag = BTHR_RCONT;
2114 }
2115 else
2116 {
2117 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2118 cflag = BTHR_CONT;
2119 }
2120
2121 /* We just indicate the resume intent here. The actual stepping happens in
2122 record_btrace_wait below.
2123
2124 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2125 if (!target_is_non_stop_p ())
2126 {
2127 gdb_assert (ptid_match (inferior_ptid, ptid));
2128
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2131 {
2132 if (ptid_match (tp->ptid, inferior_ptid))
2133 record_btrace_resume_thread (tp, flag);
2134 else
2135 record_btrace_resume_thread (tp, cflag);
2136 }
2137 }
2138 else
2139 {
2140 ALL_NON_EXITED_THREADS (tp)
2141 if (ptid_match (tp->ptid, ptid))
2142 record_btrace_resume_thread (tp, flag);
2143 }
2144
2145 /* Async support. */
2146 if (target_can_async_p ())
2147 {
2148 target_async (1);
2149 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2150 }
2151 }
2152
2153 /* Cancel resuming TP. */
2154
2155 static void
2156 record_btrace_cancel_resume (struct thread_info *tp)
2157 {
2158 enum btrace_thread_flag flags;
2159
2160 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2161 if (flags == 0)
2162 return;
2163
2164 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2165 print_thread_id (tp),
2166 target_pid_to_str (tp->ptid), flags,
2167 btrace_thread_flag_to_str (flags));
2168
2169 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2170 record_btrace_stop_replaying_at_end (tp);
2171 }
2172
2173 /* Return a target_waitstatus indicating that we ran out of history. */
2174
2175 static struct target_waitstatus
2176 btrace_step_no_history (void)
2177 {
2178 struct target_waitstatus status;
2179
2180 status.kind = TARGET_WAITKIND_NO_HISTORY;
2181
2182 return status;
2183 }
2184
2185 /* Return a target_waitstatus indicating that a step finished. */
2186
2187 static struct target_waitstatus
2188 btrace_step_stopped (void)
2189 {
2190 struct target_waitstatus status;
2191
2192 status.kind = TARGET_WAITKIND_STOPPED;
2193 status.value.sig = GDB_SIGNAL_TRAP;
2194
2195 return status;
2196 }
2197
2198 /* Return a target_waitstatus indicating that a thread was stopped as
2199 requested. */
2200
2201 static struct target_waitstatus
2202 btrace_step_stopped_on_request (void)
2203 {
2204 struct target_waitstatus status;
2205
2206 status.kind = TARGET_WAITKIND_STOPPED;
2207 status.value.sig = GDB_SIGNAL_0;
2208
2209 return status;
2210 }
2211
2212 /* Return a target_waitstatus indicating a spurious stop. */
2213
2214 static struct target_waitstatus
2215 btrace_step_spurious (void)
2216 {
2217 struct target_waitstatus status;
2218
2219 status.kind = TARGET_WAITKIND_SPURIOUS;
2220
2221 return status;
2222 }
2223
2224 /* Return a target_waitstatus indicating that the thread was not resumed. */
2225
2226 static struct target_waitstatus
2227 btrace_step_no_resumed (void)
2228 {
2229 struct target_waitstatus status;
2230
2231 status.kind = TARGET_WAITKIND_NO_RESUMED;
2232
2233 return status;
2234 }
2235
2236 /* Return a target_waitstatus indicating that we should wait again. */
2237
2238 static struct target_waitstatus
2239 btrace_step_again (void)
2240 {
2241 struct target_waitstatus status;
2242
2243 status.kind = TARGET_WAITKIND_IGNORE;
2244
2245 return status;
2246 }
2247
2248 /* Clear the record histories. */
2249
2250 static void
2251 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2252 {
2253 xfree (btinfo->insn_history);
2254 xfree (btinfo->call_history);
2255
2256 btinfo->insn_history = NULL;
2257 btinfo->call_history = NULL;
2258 }
2259
2260 /* Check whether TP's current replay position is at a breakpoint. */
2261
2262 static int
2263 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2264 {
2265 struct btrace_insn_iterator *replay;
2266 struct btrace_thread_info *btinfo;
2267 const struct btrace_insn *insn;
2268 struct inferior *inf;
2269
2270 btinfo = &tp->btrace;
2271 replay = btinfo->replay;
2272
2273 if (replay == NULL)
2274 return 0;
2275
2276 insn = btrace_insn_get (replay);
2277 if (insn == NULL)
2278 return 0;
2279
2280 inf = find_inferior_ptid (tp->ptid);
2281 if (inf == NULL)
2282 return 0;
2283
2284 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2285 &btinfo->stop_reason);
2286 }
2287
2288 /* Step one instruction in forward direction. */
2289
2290 static struct target_waitstatus
2291 record_btrace_single_step_forward (struct thread_info *tp)
2292 {
2293 struct btrace_insn_iterator *replay, end;
2294 struct btrace_thread_info *btinfo;
2295
2296 btinfo = &tp->btrace;
2297 replay = btinfo->replay;
2298
2299 /* We're done if we're not replaying. */
2300 if (replay == NULL)
2301 return btrace_step_no_history ();
2302
2303 /* Check if we're stepping a breakpoint. */
2304 if (record_btrace_replay_at_breakpoint (tp))
2305 return btrace_step_stopped ();
2306
2307 /* Skip gaps during replay. */
2308 do
2309 {
2310 unsigned int steps;
2311
2312 /* We will bail out here if we continue stepping after reaching the end
2313 of the execution history. */
2314 steps = btrace_insn_next (replay, 1);
2315 if (steps == 0)
2316 return btrace_step_no_history ();
2317 }
2318 while (btrace_insn_get (replay) == NULL);
2319
2320 /* Determine the end of the instruction trace. */
2321 btrace_insn_end (&end, btinfo);
2322
2323 /* The execution trace contains (and ends with) the current instruction.
2324 This instruction has not been executed, yet, so the trace really ends
2325 one instruction earlier. */
2326 if (btrace_insn_cmp (replay, &end) == 0)
2327 return btrace_step_no_history ();
2328
2329 return btrace_step_spurious ();
2330 }
2331
2332 /* Step one instruction in backward direction. */
2333
2334 static struct target_waitstatus
2335 record_btrace_single_step_backward (struct thread_info *tp)
2336 {
2337 struct btrace_insn_iterator *replay;
2338 struct btrace_thread_info *btinfo;
2339
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
2343 /* Start replaying if we're not already doing so. */
2344 if (replay == NULL)
2345 replay = record_btrace_start_replaying (tp);
2346
2347 /* If we can't step any further, we reached the end of the history.
2348 Skip gaps during replay. */
2349 do
2350 {
2351 unsigned int steps;
2352
2353 steps = btrace_insn_prev (replay, 1);
2354 if (steps == 0)
2355 return btrace_step_no_history ();
2356 }
2357 while (btrace_insn_get (replay) == NULL);
2358
2359 /* Check if we're stepping a breakpoint.
2360
2361 For reverse-stepping, this check is after the step. There is logic in
2362 infrun.c that handles reverse-stepping separately. See, for example,
2363 proceed and adjust_pc_after_break.
2364
2365 This code assumes that for reverse-stepping, PC points to the last
2366 de-executed instruction, whereas for forward-stepping PC points to the
2367 next to-be-executed instruction. */
2368 if (record_btrace_replay_at_breakpoint (tp))
2369 return btrace_step_stopped ();
2370
2371 return btrace_step_spurious ();
2372 }
2373
2374 /* Step a single thread. */
2375
2376 static struct target_waitstatus
2377 record_btrace_step_thread (struct thread_info *tp)
2378 {
2379 struct btrace_thread_info *btinfo;
2380 struct target_waitstatus status;
2381 enum btrace_thread_flag flags;
2382
2383 btinfo = &tp->btrace;
2384
2385 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2386 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2387
2388 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2389 target_pid_to_str (tp->ptid), flags,
2390 btrace_thread_flag_to_str (flags));
2391
2392 /* We can't step without an execution history. */
2393 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2394 return btrace_step_no_history ();
2395
2396 switch (flags)
2397 {
2398 default:
2399 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2400
2401 case BTHR_STOP:
2402 return btrace_step_stopped_on_request ();
2403
2404 case BTHR_STEP:
2405 status = record_btrace_single_step_forward (tp);
2406 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2407 break;
2408
2409 return btrace_step_stopped ();
2410
2411 case BTHR_RSTEP:
2412 status = record_btrace_single_step_backward (tp);
2413 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2414 break;
2415
2416 return btrace_step_stopped ();
2417
2418 case BTHR_CONT:
2419 status = record_btrace_single_step_forward (tp);
2420 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2421 break;
2422
2423 btinfo->flags |= flags;
2424 return btrace_step_again ();
2425
2426 case BTHR_RCONT:
2427 status = record_btrace_single_step_backward (tp);
2428 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2429 break;
2430
2431 btinfo->flags |= flags;
2432 return btrace_step_again ();
2433 }
2434
2435 /* We keep threads moving at the end of their execution history. The to_wait
2436 method will stop the thread for whom the event is reported. */
2437 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2438 btinfo->flags |= flags;
2439
2440 return status;
2441 }
2442
2443 /* A vector of threads. */
2444
2445 typedef struct thread_info * tp_t;
2446 DEF_VEC_P (tp_t);
2447
2448 /* Announce further events if necessary. */
2449
2450 static void
2451 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2452 const VEC (tp_t) *no_history)
2453 {
2454 int more_moving, more_no_history;
2455
2456 more_moving = !VEC_empty (tp_t, moving);
2457 more_no_history = !VEC_empty (tp_t, no_history);
2458
2459 if (!more_moving && !more_no_history)
2460 return;
2461
2462 if (more_moving)
2463 DEBUG ("movers pending");
2464
2465 if (more_no_history)
2466 DEBUG ("no-history pending");
2467
2468 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2469 }
2470
2471 /* The to_wait method of target record-btrace. */
2472
2473 static ptid_t
2474 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2475 struct target_waitstatus *status, int options)
2476 {
2477 VEC (tp_t) *moving, *no_history;
2478 struct thread_info *tp, *eventing;
2479 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2480
2481 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2482
2483 /* As long as we're not replaying, just forward the request. */
2484 if ((execution_direction != EXEC_REVERSE)
2485 && !record_btrace_is_replaying (ops, minus_one_ptid))
2486 {
2487 ops = ops->beneath;
2488 return ops->to_wait (ops, ptid, status, options);
2489 }
2490
2491 moving = NULL;
2492 no_history = NULL;
2493
2494 make_cleanup (VEC_cleanup (tp_t), &moving);
2495 make_cleanup (VEC_cleanup (tp_t), &no_history);
2496
2497 /* Keep a work list of moving threads. */
2498 ALL_NON_EXITED_THREADS (tp)
2499 if (ptid_match (tp->ptid, ptid)
2500 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2501 VEC_safe_push (tp_t, moving, tp);
2502
2503 if (VEC_empty (tp_t, moving))
2504 {
2505 *status = btrace_step_no_resumed ();
2506
2507 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2508 target_waitstatus_to_string (status));
2509
2510 do_cleanups (cleanups);
2511 return null_ptid;
2512 }
2513
2514 /* Step moving threads one by one, one step each, until either one thread
2515 reports an event or we run out of threads to step.
2516
2517 When stepping more than one thread, chances are that some threads reach
2518 the end of their execution history earlier than others. If we reported
2519 this immediately, all-stop on top of non-stop would stop all threads and
2520 resume the same threads next time. And we would report the same thread
2521 having reached the end of its execution history again.
2522
2523 In the worst case, this would starve the other threads. But even if other
2524 threads would be allowed to make progress, this would result in far too
2525 many intermediate stops.
2526
2527 We therefore delay the reporting of "no execution history" until we have
2528 nothing else to report. By this time, all threads should have moved to
2529 either the beginning or the end of their execution history. There will
2530 be a single user-visible stop. */
2531 eventing = NULL;
2532 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2533 {
2534 unsigned int ix;
2535
2536 ix = 0;
2537 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2538 {
2539 *status = record_btrace_step_thread (tp);
2540
2541 switch (status->kind)
2542 {
2543 case TARGET_WAITKIND_IGNORE:
2544 ix++;
2545 break;
2546
2547 case TARGET_WAITKIND_NO_HISTORY:
2548 VEC_safe_push (tp_t, no_history,
2549 VEC_ordered_remove (tp_t, moving, ix));
2550 break;
2551
2552 default:
2553 eventing = VEC_unordered_remove (tp_t, moving, ix);
2554 break;
2555 }
2556 }
2557 }
2558
2559 if (eventing == NULL)
2560 {
2561 /* We started with at least one moving thread. This thread must have
2562 either stopped or reached the end of its execution history.
2563
2564 In the former case, EVENTING must not be NULL.
2565 In the latter case, NO_HISTORY must not be empty. */
2566 gdb_assert (!VEC_empty (tp_t, no_history));
2567
2568 /* We kept threads moving at the end of their execution history. Stop
2569 EVENTING now that we are going to report its stop. */
2570 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2571 eventing->btrace.flags &= ~BTHR_MOVE;
2572
2573 *status = btrace_step_no_history ();
2574 }
2575
2576 gdb_assert (eventing != NULL);
2577
2578 /* We kept threads replaying at the end of their execution history. Stop
2579 replaying EVENTING now that we are going to report its stop. */
2580 record_btrace_stop_replaying_at_end (eventing);
2581
2582 /* Stop all other threads. */
2583 if (!target_is_non_stop_p ())
2584 ALL_NON_EXITED_THREADS (tp)
2585 record_btrace_cancel_resume (tp);
2586
2587 /* In async mode, we need to announce further events. */
2588 if (target_is_async_p ())
2589 record_btrace_maybe_mark_async_event (moving, no_history);
2590
2591 /* Start record histories anew from the current position. */
2592 record_btrace_clear_histories (&eventing->btrace);
2593
2594 /* We moved the replay position but did not update registers. */
2595 registers_changed_ptid (eventing->ptid);
2596
2597 DEBUG ("wait ended by thread %s (%s): %s",
2598 print_thread_id (eventing),
2599 target_pid_to_str (eventing->ptid),
2600 target_waitstatus_to_string (status));
2601
2602 do_cleanups (cleanups);
2603 return eventing->ptid;
2604 }
2605
2606 /* The to_stop method of target record-btrace. */
2607
2608 static void
2609 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2610 {
2611 DEBUG ("stop %s", target_pid_to_str (ptid));
2612
2613 /* As long as we're not replaying, just forward the request. */
2614 if ((execution_direction != EXEC_REVERSE)
2615 && !record_btrace_is_replaying (ops, minus_one_ptid))
2616 {
2617 ops = ops->beneath;
2618 ops->to_stop (ops, ptid);
2619 }
2620 else
2621 {
2622 struct thread_info *tp;
2623
2624 ALL_NON_EXITED_THREADS (tp)
2625 if (ptid_match (tp->ptid, ptid))
2626 {
2627 tp->btrace.flags &= ~BTHR_MOVE;
2628 tp->btrace.flags |= BTHR_STOP;
2629 }
2630 }
2631 }
2632
2633 /* The to_can_execute_reverse method of target record-btrace. */
2634
2635 static int
2636 record_btrace_can_execute_reverse (struct target_ops *self)
2637 {
2638 return 1;
2639 }
2640
2641 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2642
2643 static int
2644 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2645 {
2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
2647 {
2648 struct thread_info *tp = inferior_thread ();
2649
2650 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2651 }
2652
2653 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2654 }
2655
2656 /* The to_supports_stopped_by_sw_breakpoint method of target
2657 record-btrace. */
2658
2659 static int
2660 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2661 {
2662 if (record_btrace_is_replaying (ops, minus_one_ptid))
2663 return 1;
2664
2665 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2666 }
2667
2668 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2669
2670 static int
2671 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2672 {
2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
2674 {
2675 struct thread_info *tp = inferior_thread ();
2676
2677 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2678 }
2679
2680 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2681 }
2682
2683 /* The to_supports_stopped_by_hw_breakpoint method of target
2684 record-btrace. */
2685
2686 static int
2687 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2688 {
2689 if (record_btrace_is_replaying (ops, minus_one_ptid))
2690 return 1;
2691
2692 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2693 }
2694
2695 /* The to_update_thread_list method of target record-btrace. */
2696
2697 static void
2698 record_btrace_update_thread_list (struct target_ops *ops)
2699 {
2700 /* We don't add or remove threads during replay. */
2701 if (record_btrace_is_replaying (ops, minus_one_ptid))
2702 return;
2703
2704 /* Forward the request. */
2705 ops = ops->beneath;
2706 ops->to_update_thread_list (ops);
2707 }
2708
2709 /* The to_thread_alive method of target record-btrace. */
2710
2711 static int
2712 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2713 {
2714 /* We don't add or remove threads during replay. */
2715 if (record_btrace_is_replaying (ops, minus_one_ptid))
2716 return find_thread_ptid (ptid) != NULL;
2717
2718 /* Forward the request. */
2719 ops = ops->beneath;
2720 return ops->to_thread_alive (ops, ptid);
2721 }
2722
2723 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2724 is stopped. */
2725
2726 static void
2727 record_btrace_set_replay (struct thread_info *tp,
2728 const struct btrace_insn_iterator *it)
2729 {
2730 struct btrace_thread_info *btinfo;
2731
2732 btinfo = &tp->btrace;
2733
2734 if (it == NULL || it->function == NULL)
2735 record_btrace_stop_replaying (tp);
2736 else
2737 {
2738 if (btinfo->replay == NULL)
2739 record_btrace_start_replaying (tp);
2740 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2741 return;
2742
2743 *btinfo->replay = *it;
2744 registers_changed_ptid (tp->ptid);
2745 }
2746
2747 /* Start anew from the new replay position. */
2748 record_btrace_clear_histories (btinfo);
2749
2750 stop_pc = regcache_read_pc (get_current_regcache ());
2751 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2752 }
2753
2754 /* The to_goto_record_begin method of target record-btrace. */
2755
2756 static void
2757 record_btrace_goto_begin (struct target_ops *self)
2758 {
2759 struct thread_info *tp;
2760 struct btrace_insn_iterator begin;
2761
2762 tp = require_btrace_thread ();
2763
2764 btrace_insn_begin (&begin, &tp->btrace);
2765 record_btrace_set_replay (tp, &begin);
2766 }
2767
2768 /* The to_goto_record_end method of target record-btrace. */
2769
2770 static void
2771 record_btrace_goto_end (struct target_ops *ops)
2772 {
2773 struct thread_info *tp;
2774
2775 tp = require_btrace_thread ();
2776
2777 record_btrace_set_replay (tp, NULL);
2778 }
2779
2780 /* The to_goto_record method of target record-btrace. */
2781
2782 static void
2783 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2784 {
2785 struct thread_info *tp;
2786 struct btrace_insn_iterator it;
2787 unsigned int number;
2788 int found;
2789
2790 number = insn;
2791
2792 /* Check for wrap-arounds. */
2793 if (number != insn)
2794 error (_("Instruction number out of range."));
2795
2796 tp = require_btrace_thread ();
2797
2798 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2799 if (found == 0)
2800 error (_("No such instruction."));
2801
2802 record_btrace_set_replay (tp, &it);
2803 }
2804
2805 /* The to_record_stop_replaying method of target record-btrace. */
2806
2807 static void
2808 record_btrace_stop_replaying_all (struct target_ops *self)
2809 {
2810 struct thread_info *tp;
2811
2812 ALL_NON_EXITED_THREADS (tp)
2813 record_btrace_stop_replaying (tp);
2814 }
2815
2816 /* The to_execution_direction target method. */
2817
2818 static enum exec_direction_kind
2819 record_btrace_execution_direction (struct target_ops *self)
2820 {
2821 return record_btrace_resume_exec_dir;
2822 }
2823
2824 /* The to_prepare_to_generate_core target method. */
2825
2826 static void
2827 record_btrace_prepare_to_generate_core (struct target_ops *self)
2828 {
2829 record_btrace_generating_corefile = 1;
2830 }
2831
2832 /* The to_done_generating_core target method. */
2833
2834 static void
2835 record_btrace_done_generating_core (struct target_ops *self)
2836 {
2837 record_btrace_generating_corefile = 0;
2838 }
2839
2840 /* Initialize the record-btrace target ops. */
2841
2842 static void
2843 init_record_btrace_ops (void)
2844 {
2845 struct target_ops *ops;
2846
2847 ops = &record_btrace_ops;
2848 ops->to_shortname = "record-btrace";
2849 ops->to_longname = "Branch tracing target";
2850 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2851 ops->to_open = record_btrace_open;
2852 ops->to_close = record_btrace_close;
2853 ops->to_async = record_btrace_async;
2854 ops->to_detach = record_detach;
2855 ops->to_disconnect = record_btrace_disconnect;
2856 ops->to_mourn_inferior = record_mourn_inferior;
2857 ops->to_kill = record_kill;
2858 ops->to_stop_recording = record_btrace_stop_recording;
2859 ops->to_info_record = record_btrace_info;
2860 ops->to_insn_history = record_btrace_insn_history;
2861 ops->to_insn_history_from = record_btrace_insn_history_from;
2862 ops->to_insn_history_range = record_btrace_insn_history_range;
2863 ops->to_call_history = record_btrace_call_history;
2864 ops->to_call_history_from = record_btrace_call_history_from;
2865 ops->to_call_history_range = record_btrace_call_history_range;
2866 ops->to_record_is_replaying = record_btrace_is_replaying;
2867 ops->to_record_will_replay = record_btrace_will_replay;
2868 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2869 ops->to_xfer_partial = record_btrace_xfer_partial;
2870 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2871 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2872 ops->to_fetch_registers = record_btrace_fetch_registers;
2873 ops->to_store_registers = record_btrace_store_registers;
2874 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2875 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2876 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2877 ops->to_resume = record_btrace_resume;
2878 ops->to_wait = record_btrace_wait;
2879 ops->to_stop = record_btrace_stop;
2880 ops->to_update_thread_list = record_btrace_update_thread_list;
2881 ops->to_thread_alive = record_btrace_thread_alive;
2882 ops->to_goto_record_begin = record_btrace_goto_begin;
2883 ops->to_goto_record_end = record_btrace_goto_end;
2884 ops->to_goto_record = record_btrace_goto;
2885 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2886 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2887 ops->to_supports_stopped_by_sw_breakpoint
2888 = record_btrace_supports_stopped_by_sw_breakpoint;
2889 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2890 ops->to_supports_stopped_by_hw_breakpoint
2891 = record_btrace_supports_stopped_by_hw_breakpoint;
2892 ops->to_execution_direction = record_btrace_execution_direction;
2893 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2894 ops->to_done_generating_core = record_btrace_done_generating_core;
2895 ops->to_stratum = record_stratum;
2896 ops->to_magic = OPS_MAGIC;
2897 }
2898
2899 /* Start recording in BTS format. */
2900
2901 static void
2902 cmd_record_btrace_bts_start (char *args, int from_tty)
2903 {
2904 if (args != NULL && *args != 0)
2905 error (_("Invalid argument."));
2906
2907 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2908
2909 TRY
2910 {
2911 execute_command ("target record-btrace", from_tty);
2912 }
2913 CATCH (exception, RETURN_MASK_ALL)
2914 {
2915 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2916 throw_exception (exception);
2917 }
2918 END_CATCH
2919 }
2920
2921 /* Start recording in Intel Processor Trace format. */
2922
2923 static void
2924 cmd_record_btrace_pt_start (char *args, int from_tty)
2925 {
2926 if (args != NULL && *args != 0)
2927 error (_("Invalid argument."));
2928
2929 record_btrace_conf.format = BTRACE_FORMAT_PT;
2930
2931 TRY
2932 {
2933 execute_command ("target record-btrace", from_tty);
2934 }
2935 CATCH (exception, RETURN_MASK_ALL)
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2938 throw_exception (exception);
2939 }
2940 END_CATCH
2941 }
2942
2943 /* Alias for "target record". */
2944
2945 static void
2946 cmd_record_btrace_start (char *args, int from_tty)
2947 {
2948 if (args != NULL && *args != 0)
2949 error (_("Invalid argument."));
2950
2951 record_btrace_conf.format = BTRACE_FORMAT_PT;
2952
2953 TRY
2954 {
2955 execute_command ("target record-btrace", from_tty);
2956 }
2957 CATCH (exception, RETURN_MASK_ALL)
2958 {
2959 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2960
2961 TRY
2962 {
2963 execute_command ("target record-btrace", from_tty);
2964 }
2965 CATCH (exception, RETURN_MASK_ALL)
2966 {
2967 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2968 throw_exception (exception);
2969 }
2970 END_CATCH
2971 }
2972 END_CATCH
2973 }
2974
2975 /* The "set record btrace" command. */
2976
2977 static void
2978 cmd_set_record_btrace (char *args, int from_tty)
2979 {
2980 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2981 }
2982
2983 /* The "show record btrace" command. */
2984
2985 static void
2986 cmd_show_record_btrace (char *args, int from_tty)
2987 {
2988 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2989 }
2990
2991 /* The "show record btrace replay-memory-access" command. */
2992
2993 static void
2994 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2995 struct cmd_list_element *c, const char *value)
2996 {
2997 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2998 replay_memory_access);
2999 }
3000
3001 /* The "set record btrace bts" command. */
3002
3003 static void
3004 cmd_set_record_btrace_bts (char *args, int from_tty)
3005 {
3006 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3007 "by an appropriate subcommand.\n"));
3008 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3009 all_commands, gdb_stdout);
3010 }
3011
3012 /* The "show record btrace bts" command. */
3013
3014 static void
3015 cmd_show_record_btrace_bts (char *args, int from_tty)
3016 {
3017 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3018 }
3019
3020 /* The "set record btrace pt" command. */
3021
3022 static void
3023 cmd_set_record_btrace_pt (char *args, int from_tty)
3024 {
3025 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3026 "by an appropriate subcommand.\n"));
3027 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3028 all_commands, gdb_stdout);
3029 }
3030
3031 /* The "show record btrace pt" command. */
3032
3033 static void
3034 cmd_show_record_btrace_pt (char *args, int from_tty)
3035 {
3036 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3037 }
3038
3039 /* The "record bts buffer-size" show value function. */
3040
3041 static void
3042 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3043 struct cmd_list_element *c,
3044 const char *value)
3045 {
3046 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3047 value);
3048 }
3049
3050 /* The "record pt buffer-size" show value function. */
3051
3052 static void
3053 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3054 struct cmd_list_element *c,
3055 const char *value)
3056 {
3057 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3058 value);
3059 }
3060
3061 void _initialize_record_btrace (void);
3062
3063 /* Initialize btrace commands. */
3064
3065 void
3066 _initialize_record_btrace (void)
3067 {
3068 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3069 _("Start branch trace recording."), &record_btrace_cmdlist,
3070 "record btrace ", 0, &record_cmdlist);
3071 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3072
3073 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3074 _("\
3075 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3076 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3077 This format may not be available on all processors."),
3078 &record_btrace_cmdlist);
3079 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3080
3081 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3082 _("\
3083 Start branch trace recording in Intel Processor Trace format.\n\n\
3084 This format may not be available on all processors."),
3085 &record_btrace_cmdlist);
3086 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3087
3088 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3089 _("Set record options"), &set_record_btrace_cmdlist,
3090 "set record btrace ", 0, &set_record_cmdlist);
3091
3092 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3093 _("Show record options"), &show_record_btrace_cmdlist,
3094 "show record btrace ", 0, &show_record_cmdlist);
3095
3096 add_setshow_enum_cmd ("replay-memory-access", no_class,
3097 replay_memory_access_types, &replay_memory_access, _("\
3098 Set what memory accesses are allowed during replay."), _("\
3099 Show what memory accesses are allowed during replay."),
3100 _("Default is READ-ONLY.\n\n\
3101 The btrace record target does not trace data.\n\
3102 The memory therefore corresponds to the live target and not \
3103 to the current replay position.\n\n\
3104 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3105 When READ-WRITE, allow accesses to read-only and read-write memory during \
3106 replay."),
3107 NULL, cmd_show_replay_memory_access,
3108 &set_record_btrace_cmdlist,
3109 &show_record_btrace_cmdlist);
3110
3111 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3112 _("Set record btrace bts options"),
3113 &set_record_btrace_bts_cmdlist,
3114 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3115
3116 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3117 _("Show record btrace bts options"),
3118 &show_record_btrace_bts_cmdlist,
3119 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3120
3121 add_setshow_uinteger_cmd ("buffer-size", no_class,
3122 &record_btrace_conf.bts.size,
3123 _("Set the record/replay bts buffer size."),
3124 _("Show the record/replay bts buffer size."), _("\
3125 When starting recording request a trace buffer of this size. \
3126 The actual buffer size may differ from the requested size. \
3127 Use \"info record\" to see the actual buffer size.\n\n\
3128 Bigger buffers allow longer recording but also take more time to process \
3129 the recorded execution trace.\n\n\
3130 The trace buffer size may not be changed while recording."), NULL,
3131 show_record_bts_buffer_size_value,
3132 &set_record_btrace_bts_cmdlist,
3133 &show_record_btrace_bts_cmdlist);
3134
3135 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3136 _("Set record btrace pt options"),
3137 &set_record_btrace_pt_cmdlist,
3138 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3139
3140 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3141 _("Show record btrace pt options"),
3142 &show_record_btrace_pt_cmdlist,
3143 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3144
3145 add_setshow_uinteger_cmd ("buffer-size", no_class,
3146 &record_btrace_conf.pt.size,
3147 _("Set the record/replay pt buffer size."),
3148 _("Show the record/replay pt buffer size."), _("\
3149 Bigger buffers allow longer recording but also take more time to process \
3150 the recorded execution.\n\
3151 The actual buffer size may differ from the requested size. Use \"info record\" \
3152 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3153 &set_record_btrace_pt_cmdlist,
3154 &show_record_btrace_pt_cmdlist);
3155
3156 init_record_btrace_ops ();
3157 add_target (&record_btrace_ops);
3158
3159 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3160 xcalloc, xfree);
3161
3162 record_btrace_conf.bts.size = 64 * 1024;
3163 record_btrace_conf.pt.size = 16 * 1024;
3164 }
This page took 0.112383 seconds and 4 git commands to generate.