gdb/record-full: Use xmalloc instead of alloca for temporary memory storage.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
19ba03f4 163 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
164
165 btrace_disable (tp);
166}
167
168/* Enable automatic tracing of new threads. */
169
170static void
171record_btrace_auto_enable (void)
172{
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177}
178
179/* Disable automatic tracing of new threads. */
180
181static void
182record_btrace_auto_disable (void)
183{
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192}
193
70ad5bff
MM
194/* The record-btrace async event handler function. */
195
196static void
197record_btrace_handle_async_inferior_event (gdb_client_data data)
198{
199 inferior_event_handler (INF_REG_EVENT, NULL);
200}
201
afedecd3
MM
202/* The to_open method of target record-btrace. */
203
204static void
014f9477 205record_btrace_open (const char *args, int from_tty)
afedecd3
MM
206{
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
8213266a 212 record_preopen ();
afedecd3
MM
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
afedecd3
MM
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 220 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
f4abbc16 223 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
70ad5bff
MM
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
aef92902 235 record_btrace_generating_corefile = 0;
70ad5bff 236
afedecd3
MM
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240}
241
242/* The to_stop_recording method of target record-btrace. */
243
244static void
c6cd7c02 245record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
246{
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
034f788c 253 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256}
257
258/* The to_close method of target record-btrace. */
259
260static void
de90e03d 261record_btrace_close (struct target_ops *self)
afedecd3 262{
568e808b
MM
263 struct thread_info *tp;
264
70ad5bff
MM
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
99c819ee
MM
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
568e808b
MM
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
034f788c 274 ALL_NON_EXITED_THREADS (tp)
568e808b 275 btrace_teardown (tp);
afedecd3
MM
276}
277
b7d2e916
PA
278/* The to_async method of target record-btrace. */
279
280static void
6a3753b3 281record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 282{
6a3753b3 283 if (enable)
b7d2e916
PA
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
6a3753b3 288 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
289}
290
d33501a5
MM
291/* Adjusts the size and returns a human readable size suffix. */
292
293static const char *
294record_btrace_adjust_size (unsigned int *size)
295{
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317}
318
319/* Print a BTS configuration. */
320
321static void
322record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323{
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333}
334
b20a6524
MM
335/* Print an Intel(R) Processor Trace configuration. */
336
337static void
338record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339{
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349}
350
d33501a5
MM
351/* Print a branch tracing configuration. */
352
353static void
354record_btrace_print_conf (const struct btrace_config *conf)
355{
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
b20a6524
MM
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
d33501a5
MM
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374}
375
afedecd3
MM
376/* The to_info_record method of target record-btrace. */
377
378static void
630d6a4a 379record_btrace_info (struct target_ops *self)
afedecd3
MM
380{
381 struct btrace_thread_info *btinfo;
f4abbc16 382 const struct btrace_config *conf;
afedecd3 383 struct thread_info *tp;
31fd9caa 384 unsigned int insns, calls, gaps;
afedecd3
MM
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
f4abbc16
MM
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
d33501a5 396 record_btrace_print_conf (conf);
f4abbc16 397
afedecd3
MM
398 btrace_fetch (tp);
399
23a7fe75
MM
400 insns = 0;
401 calls = 0;
31fd9caa 402 gaps = 0;
23a7fe75 403
6e07b1d2 404 if (!btrace_is_empty (tp))
23a7fe75
MM
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
5de9129b 411 calls = btrace_call_number (&call);
23a7fe75
MM
412
413 btrace_insn_end (&insn, btinfo);
31fd9caa 414
5de9129b 415 insns = btrace_insn_number (&insn);
31fd9caa
MM
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
23a7fe75 438 }
afedecd3 439
31fd9caa
MM
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
afedecd3
MM
447}
448
31fd9caa
MM
449/* Print a decode error. */
450
451static void
452btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454{
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
b20a6524
MM
481
482#if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519}
520
afedecd3
MM
521/* Print an unsigned int. */
522
523static void
524ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525{
526 ui_out_field_fmt (uiout, fld, "%u", val);
527}
528
f94cc897
MM
529/* A range of source lines. */
530
531struct btrace_line_range
532{
533 /* The symtab this line is from. */
534 struct symtab *symtab;
535
536 /* The first line (inclusive). */
537 int begin;
538
539 /* The last line (exclusive). */
540 int end;
541};
542
543/* Construct a line range. */
544
545static struct btrace_line_range
546btrace_mk_line_range (struct symtab *symtab, int begin, int end)
547{
548 struct btrace_line_range range;
549
550 range.symtab = symtab;
551 range.begin = begin;
552 range.end = end;
553
554 return range;
555}
556
557/* Add a line to a line range. */
558
559static struct btrace_line_range
560btrace_line_range_add (struct btrace_line_range range, int line)
561{
562 if (range.end <= range.begin)
563 {
564 /* This is the first entry. */
565 range.begin = line;
566 range.end = line + 1;
567 }
568 else if (line < range.begin)
569 range.begin = line;
570 else if (range.end < line)
571 range.end = line;
572
573 return range;
574}
575
576/* Return non-zero if RANGE is empty, zero otherwise. */
577
578static int
579btrace_line_range_is_empty (struct btrace_line_range range)
580{
581 return range.end <= range.begin;
582}
583
584/* Return non-zero if LHS contains RHS, zero otherwise. */
585
586static int
587btrace_line_range_contains_range (struct btrace_line_range lhs,
588 struct btrace_line_range rhs)
589{
590 return ((lhs.symtab == rhs.symtab)
591 && (lhs.begin <= rhs.begin)
592 && (rhs.end <= lhs.end));
593}
594
595/* Find the line range associated with PC. */
596
597static struct btrace_line_range
598btrace_find_line_range (CORE_ADDR pc)
599{
600 struct btrace_line_range range;
601 struct linetable_entry *lines;
602 struct linetable *ltable;
603 struct symtab *symtab;
604 int nlines, i;
605
606 symtab = find_pc_line_symtab (pc);
607 if (symtab == NULL)
608 return btrace_mk_line_range (NULL, 0, 0);
609
610 ltable = SYMTAB_LINETABLE (symtab);
611 if (ltable == NULL)
612 return btrace_mk_line_range (symtab, 0, 0);
613
614 nlines = ltable->nitems;
615 lines = ltable->item;
616 if (nlines <= 0)
617 return btrace_mk_line_range (symtab, 0, 0);
618
619 range = btrace_mk_line_range (symtab, 0, 0);
620 for (i = 0; i < nlines - 1; i++)
621 {
622 if ((lines[i].pc == pc) && (lines[i].line != 0))
623 range = btrace_line_range_add (range, lines[i].line);
624 }
625
626 return range;
627}
628
629/* Print source lines in LINES to UIOUT.
630
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
637
638static void
639btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
640 struct cleanup **ui_item_chain, int flags)
641{
642 enum print_source_lines_flags psl_flags;
643 int line;
644
645 psl_flags = 0;
646 if (flags & DISASSEMBLY_FILENAME)
647 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
648
649 for (line = lines.begin; line < lines.end; ++line)
650 {
651 if (*ui_item_chain != NULL)
652 do_cleanups (*ui_item_chain);
653
654 *ui_item_chain
655 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
656
657 print_source_lines (lines.symtab, line, line + 1, psl_flags);
658
659 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
660 }
661}
662
afedecd3
MM
663/* Disassemble a section of the recorded instruction trace. */
664
665static void
23a7fe75 666btrace_insn_history (struct ui_out *uiout,
31fd9caa 667 const struct btrace_thread_info *btinfo,
23a7fe75
MM
668 const struct btrace_insn_iterator *begin,
669 const struct btrace_insn_iterator *end, int flags)
afedecd3 670{
f94cc897
MM
671 struct ui_file *stb;
672 struct cleanup *cleanups, *ui_item_chain;
673 struct disassemble_info di;
afedecd3 674 struct gdbarch *gdbarch;
23a7fe75 675 struct btrace_insn_iterator it;
f94cc897 676 struct btrace_line_range last_lines;
afedecd3 677
23a7fe75
MM
678 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
679 btrace_insn_number (end));
afedecd3 680
f94cc897
MM
681 flags |= DISASSEMBLY_SPECULATIVE;
682
afedecd3 683 gdbarch = target_gdbarch ();
f94cc897
MM
684 stb = mem_fileopen ();
685 cleanups = make_cleanup_ui_file_delete (stb);
686 di = gdb_disassemble_info (gdbarch, stb);
687 last_lines = btrace_mk_line_range (NULL, 0, 0);
688
689 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
690
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain = NULL;
afedecd3 694
23a7fe75 695 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 696 {
23a7fe75
MM
697 const struct btrace_insn *insn;
698
699 insn = btrace_insn_get (&it);
700
31fd9caa
MM
701 /* A NULL instruction indicates a gap in the trace. */
702 if (insn == NULL)
703 {
704 const struct btrace_config *conf;
705
706 conf = btrace_conf (btinfo);
afedecd3 707
31fd9caa
MM
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf != NULL);
710
711 btrace_ui_out_decode_error (uiout, it.function->errcode,
712 conf->format);
713 }
714 else
715 {
f94cc897 716 struct disasm_insn dinsn;
da8c46d2 717
f94cc897 718 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 719 {
f94cc897
MM
720 struct btrace_line_range lines;
721
722 lines = btrace_find_line_range (insn->pc);
723 if (!btrace_line_range_is_empty (lines)
724 && !btrace_line_range_contains_range (last_lines, lines))
725 {
726 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
727 last_lines = lines;
728 }
729 else if (ui_item_chain == NULL)
730 {
731 ui_item_chain
732 = make_cleanup_ui_out_tuple_begin_end (uiout,
733 "src_and_asm_line");
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
736 }
737
738 gdb_assert (ui_item_chain != NULL);
da8c46d2 739 }
da8c46d2 740
f94cc897
MM
741 memset (&dinsn, 0, sizeof (dinsn));
742 dinsn.number = btrace_insn_number (&it);
743 dinsn.addr = insn->pc;
31fd9caa 744
da8c46d2 745 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 746 dinsn.is_speculative = 1;
da8c46d2 747
f94cc897 748 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 749 }
afedecd3 750 }
f94cc897
MM
751
752 do_cleanups (cleanups);
afedecd3
MM
753}
754
755/* The to_insn_history method of target record-btrace. */
756
757static void
7a6c5609 758record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
759{
760 struct btrace_thread_info *btinfo;
23a7fe75
MM
761 struct btrace_insn_history *history;
762 struct btrace_insn_iterator begin, end;
afedecd3
MM
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
23a7fe75 765 unsigned int context, covered;
afedecd3
MM
766
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
769 "insn history");
afedecd3 770 context = abs (size);
afedecd3
MM
771 if (context == 0)
772 error (_("Bad record instruction-history-size."));
773
23a7fe75
MM
774 btinfo = require_btrace ();
775 history = btinfo->insn_history;
776 if (history == NULL)
afedecd3 777 {
07bbe694 778 struct btrace_insn_iterator *replay;
afedecd3 779
23a7fe75 780 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 781
07bbe694
MM
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
785 if (replay != NULL)
786 begin = *replay;
787 else
788 btrace_insn_end (&begin, btinfo);
789
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
792 context. */
793 end = begin;
794 if (size < 0)
795 {
796 /* We want the current position covered, as well. */
797 covered = btrace_insn_next (&end, 1);
798 covered += btrace_insn_prev (&begin, context - covered);
799 covered += btrace_insn_next (&end, context - covered);
800 }
801 else
802 {
803 covered = btrace_insn_next (&end, context);
804 covered += btrace_insn_prev (&begin, context - covered);
805 }
afedecd3
MM
806 }
807 else
808 {
23a7fe75
MM
809 begin = history->begin;
810 end = history->end;
afedecd3 811
23a7fe75
MM
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
813 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 814
23a7fe75
MM
815 if (size < 0)
816 {
817 end = begin;
818 covered = btrace_insn_prev (&begin, context);
819 }
820 else
821 {
822 begin = end;
823 covered = btrace_insn_next (&end, context);
824 }
afedecd3
MM
825 }
826
23a7fe75 827 if (covered > 0)
31fd9caa 828 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
829 else
830 {
831 if (size < 0)
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
833 else
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
835 }
afedecd3 836
23a7fe75 837 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
838 do_cleanups (uiout_cleanup);
839}
840
841/* The to_insn_history_range method of target record-btrace. */
842
843static void
4e99c6b7
TT
844record_btrace_insn_history_range (struct target_ops *self,
845 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
846{
847 struct btrace_thread_info *btinfo;
23a7fe75
MM
848 struct btrace_insn_history *history;
849 struct btrace_insn_iterator begin, end;
afedecd3
MM
850 struct cleanup *uiout_cleanup;
851 struct ui_out *uiout;
23a7fe75
MM
852 unsigned int low, high;
853 int found;
afedecd3
MM
854
855 uiout = current_uiout;
856 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
857 "insn history");
23a7fe75
MM
858 low = from;
859 high = to;
afedecd3 860
23a7fe75 861 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
862
863 /* Check for wrap-arounds. */
23a7fe75 864 if (low != from || high != to)
afedecd3
MM
865 error (_("Bad range."));
866
0688d04e 867 if (high < low)
afedecd3
MM
868 error (_("Bad range."));
869
23a7fe75 870 btinfo = require_btrace ();
afedecd3 871
23a7fe75
MM
872 found = btrace_find_insn_by_number (&begin, btinfo, low);
873 if (found == 0)
874 error (_("Range out of bounds."));
afedecd3 875
23a7fe75
MM
876 found = btrace_find_insn_by_number (&end, btinfo, high);
877 if (found == 0)
0688d04e
MM
878 {
879 /* Silently truncate the range. */
880 btrace_insn_end (&end, btinfo);
881 }
882 else
883 {
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end, 1);
886 }
afedecd3 887
31fd9caa 888 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 889 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
890
891 do_cleanups (uiout_cleanup);
892}
893
894/* The to_insn_history_from method of target record-btrace. */
895
896static void
9abc3ff3
TT
897record_btrace_insn_history_from (struct target_ops *self,
898 ULONGEST from, int size, int flags)
afedecd3
MM
899{
900 ULONGEST begin, end, context;
901
902 context = abs (size);
0688d04e
MM
903 if (context == 0)
904 error (_("Bad record instruction-history-size."));
afedecd3
MM
905
906 if (size < 0)
907 {
908 end = from;
909
910 if (from < context)
911 begin = 0;
912 else
0688d04e 913 begin = from - context + 1;
afedecd3
MM
914 }
915 else
916 {
917 begin = from;
0688d04e 918 end = from + context - 1;
afedecd3
MM
919
920 /* Check for wrap-around. */
921 if (end < begin)
922 end = ULONGEST_MAX;
923 }
924
4e99c6b7 925 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
926}
927
928/* Print the instruction number range for a function call history line. */
929
930static void
23a7fe75
MM
931btrace_call_history_insn_range (struct ui_out *uiout,
932 const struct btrace_function *bfun)
afedecd3 933{
7acbe133
MM
934 unsigned int begin, end, size;
935
936 size = VEC_length (btrace_insn_s, bfun->insn);
937 gdb_assert (size > 0);
afedecd3 938
23a7fe75 939 begin = bfun->insn_offset;
7acbe133 940 end = begin + size - 1;
afedecd3 941
23a7fe75 942 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 943 ui_out_text (uiout, ",");
23a7fe75 944 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
945}
946
ce0dfbea
MM
947/* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
951
952static void
953btrace_compute_src_line_range (const struct btrace_function *bfun,
954 int *pbegin, int *pend)
955{
956 struct btrace_insn *insn;
957 struct symtab *symtab;
958 struct symbol *sym;
959 unsigned int idx;
960 int begin, end;
961
962 begin = INT_MAX;
963 end = INT_MIN;
964
965 sym = bfun->sym;
966 if (sym == NULL)
967 goto out;
968
969 symtab = symbol_symtab (sym);
970
971 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
972 {
973 struct symtab_and_line sal;
974
975 sal = find_pc_line (insn->pc, 0);
976 if (sal.symtab != symtab || sal.line == 0)
977 continue;
978
979 begin = min (begin, sal.line);
980 end = max (end, sal.line);
981 }
982
983 out:
984 *pbegin = begin;
985 *pend = end;
986}
987
afedecd3
MM
988/* Print the source line information for a function call history line. */
989
990static void
23a7fe75
MM
991btrace_call_history_src_line (struct ui_out *uiout,
992 const struct btrace_function *bfun)
afedecd3
MM
993{
994 struct symbol *sym;
23a7fe75 995 int begin, end;
afedecd3
MM
996
997 sym = bfun->sym;
998 if (sym == NULL)
999 return;
1000
1001 ui_out_field_string (uiout, "file",
08be3fe3 1002 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1003
ce0dfbea 1004 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1005 if (end < begin)
afedecd3
MM
1006 return;
1007
1008 ui_out_text (uiout, ":");
23a7fe75 1009 ui_out_field_int (uiout, "min line", begin);
afedecd3 1010
23a7fe75 1011 if (end == begin)
afedecd3
MM
1012 return;
1013
8710b709 1014 ui_out_text (uiout, ",");
23a7fe75 1015 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1016}
1017
0b722aec
MM
1018/* Get the name of a branch trace function. */
1019
1020static const char *
1021btrace_get_bfun_name (const struct btrace_function *bfun)
1022{
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 if (bfun == NULL)
1027 return "??";
1028
1029 msym = bfun->msym;
1030 sym = bfun->sym;
1031
1032 if (sym != NULL)
1033 return SYMBOL_PRINT_NAME (sym);
1034 else if (msym != NULL)
efd66ac6 1035 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1036 else
1037 return "??";
1038}
1039
afedecd3
MM
1040/* Disassemble a section of the recorded function trace. */
1041
1042static void
23a7fe75 1043btrace_call_history (struct ui_out *uiout,
8710b709 1044 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1045 const struct btrace_call_iterator *begin,
1046 const struct btrace_call_iterator *end,
afedecd3
MM
1047 enum record_print_flag flags)
1048{
23a7fe75 1049 struct btrace_call_iterator it;
afedecd3 1050
23a7fe75
MM
1051 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
1052 btrace_call_number (end));
afedecd3 1053
23a7fe75 1054 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1055 {
23a7fe75
MM
1056 const struct btrace_function *bfun;
1057 struct minimal_symbol *msym;
1058 struct symbol *sym;
1059
1060 bfun = btrace_call_get (&it);
23a7fe75 1061 sym = bfun->sym;
0b722aec 1062 msym = bfun->msym;
23a7fe75 1063
afedecd3 1064 /* Print the function index. */
23a7fe75 1065 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1066 ui_out_text (uiout, "\t");
1067
31fd9caa
MM
1068 /* Indicate gaps in the trace. */
1069 if (bfun->errcode != 0)
1070 {
1071 const struct btrace_config *conf;
1072
1073 conf = btrace_conf (btinfo);
1074
1075 /* We have trace so we must have a configuration. */
1076 gdb_assert (conf != NULL);
1077
1078 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1079
1080 continue;
1081 }
1082
8710b709
MM
1083 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1084 {
1085 int level = bfun->level + btinfo->level, i;
1086
1087 for (i = 0; i < level; ++i)
1088 ui_out_text (uiout, " ");
1089 }
1090
1091 if (sym != NULL)
1092 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1093 else if (msym != NULL)
efd66ac6 1094 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1095 else if (!ui_out_is_mi_like_p (uiout))
1096 ui_out_field_string (uiout, "function", "??");
1097
1e038f67 1098 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1099 {
8710b709 1100 ui_out_text (uiout, _("\tinst "));
23a7fe75 1101 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1102 }
1103
1e038f67 1104 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1105 {
8710b709 1106 ui_out_text (uiout, _("\tat "));
23a7fe75 1107 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1108 }
1109
afedecd3
MM
1110 ui_out_text (uiout, "\n");
1111 }
1112}
1113
1114/* The to_call_history method of target record-btrace. */
1115
1116static void
5df2fcba 1117record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
1118{
1119 struct btrace_thread_info *btinfo;
23a7fe75
MM
1120 struct btrace_call_history *history;
1121 struct btrace_call_iterator begin, end;
afedecd3
MM
1122 struct cleanup *uiout_cleanup;
1123 struct ui_out *uiout;
23a7fe75 1124 unsigned int context, covered;
afedecd3
MM
1125
1126 uiout = current_uiout;
1127 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1128 "insn history");
afedecd3 1129 context = abs (size);
afedecd3
MM
1130 if (context == 0)
1131 error (_("Bad record function-call-history-size."));
1132
23a7fe75
MM
1133 btinfo = require_btrace ();
1134 history = btinfo->call_history;
1135 if (history == NULL)
afedecd3 1136 {
07bbe694 1137 struct btrace_insn_iterator *replay;
afedecd3 1138
23a7fe75 1139 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 1140
07bbe694
MM
1141 /* If we're replaying, we start at the replay position. Otherwise, we
1142 start at the tail of the trace. */
1143 replay = btinfo->replay;
1144 if (replay != NULL)
1145 {
1146 begin.function = replay->function;
1147 begin.btinfo = btinfo;
1148 }
1149 else
1150 btrace_call_end (&begin, btinfo);
1151
1152 /* We start from here and expand in the requested direction. Then we
1153 expand in the other direction, as well, to fill up any remaining
1154 context. */
1155 end = begin;
1156 if (size < 0)
1157 {
1158 /* We want the current position covered, as well. */
1159 covered = btrace_call_next (&end, 1);
1160 covered += btrace_call_prev (&begin, context - covered);
1161 covered += btrace_call_next (&end, context - covered);
1162 }
1163 else
1164 {
1165 covered = btrace_call_next (&end, context);
1166 covered += btrace_call_prev (&begin, context- covered);
1167 }
afedecd3
MM
1168 }
1169 else
1170 {
23a7fe75
MM
1171 begin = history->begin;
1172 end = history->end;
afedecd3 1173
23a7fe75
MM
1174 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1175 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1176
23a7fe75
MM
1177 if (size < 0)
1178 {
1179 end = begin;
1180 covered = btrace_call_prev (&begin, context);
1181 }
1182 else
1183 {
1184 begin = end;
1185 covered = btrace_call_next (&end, context);
1186 }
afedecd3
MM
1187 }
1188
23a7fe75 1189 if (covered > 0)
8710b709 1190 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1191 else
1192 {
1193 if (size < 0)
1194 printf_unfiltered (_("At the start of the branch trace record.\n"));
1195 else
1196 printf_unfiltered (_("At the end of the branch trace record.\n"));
1197 }
afedecd3 1198
23a7fe75 1199 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1200 do_cleanups (uiout_cleanup);
1201}
1202
1203/* The to_call_history_range method of target record-btrace. */
1204
1205static void
f0d960ea
TT
1206record_btrace_call_history_range (struct target_ops *self,
1207 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1208{
1209 struct btrace_thread_info *btinfo;
23a7fe75
MM
1210 struct btrace_call_history *history;
1211 struct btrace_call_iterator begin, end;
afedecd3
MM
1212 struct cleanup *uiout_cleanup;
1213 struct ui_out *uiout;
23a7fe75
MM
1214 unsigned int low, high;
1215 int found;
afedecd3
MM
1216
1217 uiout = current_uiout;
1218 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1219 "func history");
23a7fe75
MM
1220 low = from;
1221 high = to;
afedecd3 1222
23a7fe75 1223 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1224
1225 /* Check for wrap-arounds. */
23a7fe75 1226 if (low != from || high != to)
afedecd3
MM
1227 error (_("Bad range."));
1228
0688d04e 1229 if (high < low)
afedecd3
MM
1230 error (_("Bad range."));
1231
23a7fe75 1232 btinfo = require_btrace ();
afedecd3 1233
23a7fe75
MM
1234 found = btrace_find_call_by_number (&begin, btinfo, low);
1235 if (found == 0)
1236 error (_("Range out of bounds."));
afedecd3 1237
23a7fe75
MM
1238 found = btrace_find_call_by_number (&end, btinfo, high);
1239 if (found == 0)
0688d04e
MM
1240 {
1241 /* Silently truncate the range. */
1242 btrace_call_end (&end, btinfo);
1243 }
1244 else
1245 {
1246 /* We want both begin and end to be inclusive. */
1247 btrace_call_next (&end, 1);
1248 }
afedecd3 1249
8710b709 1250 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1251 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1252
1253 do_cleanups (uiout_cleanup);
1254}
1255
1256/* The to_call_history_from method of target record-btrace. */
1257
1258static void
ec0aea04
TT
1259record_btrace_call_history_from (struct target_ops *self,
1260 ULONGEST from, int size, int flags)
afedecd3
MM
1261{
1262 ULONGEST begin, end, context;
1263
1264 context = abs (size);
0688d04e
MM
1265 if (context == 0)
1266 error (_("Bad record function-call-history-size."));
afedecd3
MM
1267
1268 if (size < 0)
1269 {
1270 end = from;
1271
1272 if (from < context)
1273 begin = 0;
1274 else
0688d04e 1275 begin = from - context + 1;
afedecd3
MM
1276 }
1277 else
1278 {
1279 begin = from;
0688d04e 1280 end = from + context - 1;
afedecd3
MM
1281
1282 /* Check for wrap-around. */
1283 if (end < begin)
1284 end = ULONGEST_MAX;
1285 }
1286
f0d960ea 1287 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1288}
1289
07bbe694
MM
1290/* The to_record_is_replaying method of target record-btrace. */
1291
1292static int
a52eab48 1293record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1294{
1295 struct thread_info *tp;
1296
034f788c 1297 ALL_NON_EXITED_THREADS (tp)
a52eab48 1298 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1299 return 1;
1300
1301 return 0;
1302}
1303
7ff27e9b
MM
1304/* The to_record_will_replay method of target record-btrace. */
1305
1306static int
1307record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1308{
1309 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1310}
1311
633785ff
MM
1312/* The to_xfer_partial method of target record-btrace. */
1313
9b409511 1314static enum target_xfer_status
633785ff
MM
1315record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1316 const char *annex, gdb_byte *readbuf,
1317 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1318 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1319{
1320 struct target_ops *t;
1321
1322 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1323 if (replay_memory_access == replay_memory_access_read_only
aef92902 1324 && !record_btrace_generating_corefile
4d10e986 1325 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1326 {
1327 switch (object)
1328 {
1329 case TARGET_OBJECT_MEMORY:
1330 {
1331 struct target_section *section;
1332
1333 /* We do not allow writing memory in general. */
1334 if (writebuf != NULL)
9b409511
YQ
1335 {
1336 *xfered_len = len;
bc113b4e 1337 return TARGET_XFER_UNAVAILABLE;
9b409511 1338 }
633785ff
MM
1339
1340 /* We allow reading readonly memory. */
1341 section = target_section_by_addr (ops, offset);
1342 if (section != NULL)
1343 {
1344 /* Check if the section we found is readonly. */
1345 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1346 section->the_bfd_section)
1347 & SEC_READONLY) != 0)
1348 {
1349 /* Truncate the request to fit into this section. */
1350 len = min (len, section->endaddr - offset);
1351 break;
1352 }
1353 }
1354
9b409511 1355 *xfered_len = len;
bc113b4e 1356 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1357 }
1358 }
1359 }
1360
1361 /* Forward the request. */
e75fdfca
TT
1362 ops = ops->beneath;
1363 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1364 offset, len, xfered_len);
633785ff
MM
1365}
1366
1367/* The to_insert_breakpoint method of target record-btrace. */
1368
1369static int
1370record_btrace_insert_breakpoint (struct target_ops *ops,
1371 struct gdbarch *gdbarch,
1372 struct bp_target_info *bp_tgt)
1373{
67b5c0c1
MM
1374 const char *old;
1375 int ret;
633785ff
MM
1376
1377 /* Inserting breakpoints requires accessing memory. Allow it for the
1378 duration of this function. */
67b5c0c1
MM
1379 old = replay_memory_access;
1380 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1381
1382 ret = 0;
492d29ea
PA
1383 TRY
1384 {
1385 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1386 }
492d29ea
PA
1387 CATCH (except, RETURN_MASK_ALL)
1388 {
6c63c96a 1389 replay_memory_access = old;
492d29ea
PA
1390 throw_exception (except);
1391 }
1392 END_CATCH
6c63c96a 1393 replay_memory_access = old;
633785ff
MM
1394
1395 return ret;
1396}
1397
1398/* The to_remove_breakpoint method of target record-btrace. */
1399
1400static int
1401record_btrace_remove_breakpoint (struct target_ops *ops,
1402 struct gdbarch *gdbarch,
1403 struct bp_target_info *bp_tgt)
1404{
67b5c0c1
MM
1405 const char *old;
1406 int ret;
633785ff
MM
1407
1408 /* Removing breakpoints requires accessing memory. Allow it for the
1409 duration of this function. */
67b5c0c1
MM
1410 old = replay_memory_access;
1411 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1412
1413 ret = 0;
492d29ea
PA
1414 TRY
1415 {
1416 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1417 }
492d29ea
PA
1418 CATCH (except, RETURN_MASK_ALL)
1419 {
6c63c96a 1420 replay_memory_access = old;
492d29ea
PA
1421 throw_exception (except);
1422 }
1423 END_CATCH
6c63c96a 1424 replay_memory_access = old;
633785ff
MM
1425
1426 return ret;
1427}
1428
1f3ef581
MM
1429/* The to_fetch_registers method of target record-btrace. */
1430
1431static void
1432record_btrace_fetch_registers (struct target_ops *ops,
1433 struct regcache *regcache, int regno)
1434{
1435 struct btrace_insn_iterator *replay;
1436 struct thread_info *tp;
1437
1438 tp = find_thread_ptid (inferior_ptid);
1439 gdb_assert (tp != NULL);
1440
1441 replay = tp->btrace.replay;
aef92902 1442 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1443 {
1444 const struct btrace_insn *insn;
1445 struct gdbarch *gdbarch;
1446 int pcreg;
1447
1448 gdbarch = get_regcache_arch (regcache);
1449 pcreg = gdbarch_pc_regnum (gdbarch);
1450 if (pcreg < 0)
1451 return;
1452
1453 /* We can only provide the PC register. */
1454 if (regno >= 0 && regno != pcreg)
1455 return;
1456
1457 insn = btrace_insn_get (replay);
1458 gdb_assert (insn != NULL);
1459
1460 regcache_raw_supply (regcache, regno, &insn->pc);
1461 }
1462 else
1463 {
e75fdfca 1464 struct target_ops *t = ops->beneath;
1f3ef581 1465
e75fdfca 1466 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1467 }
1468}
1469
1470/* The to_store_registers method of target record-btrace. */
1471
1472static void
1473record_btrace_store_registers (struct target_ops *ops,
1474 struct regcache *regcache, int regno)
1475{
1476 struct target_ops *t;
1477
a52eab48 1478 if (!record_btrace_generating_corefile
4d10e986
MM
1479 && record_btrace_is_replaying (ops, inferior_ptid))
1480 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1481
1482 gdb_assert (may_write_registers != 0);
1483
e75fdfca
TT
1484 t = ops->beneath;
1485 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1486}
1487
1488/* The to_prepare_to_store method of target record-btrace. */
1489
1490static void
1491record_btrace_prepare_to_store (struct target_ops *ops,
1492 struct regcache *regcache)
1493{
1494 struct target_ops *t;
1495
a52eab48 1496 if (!record_btrace_generating_corefile
4d10e986 1497 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1498 return;
1499
e75fdfca
TT
1500 t = ops->beneath;
1501 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1502}
1503
0b722aec
MM
1504/* The branch trace frame cache. */
1505
1506struct btrace_frame_cache
1507{
1508 /* The thread. */
1509 struct thread_info *tp;
1510
1511 /* The frame info. */
1512 struct frame_info *frame;
1513
1514 /* The branch trace function segment. */
1515 const struct btrace_function *bfun;
1516};
1517
1518/* A struct btrace_frame_cache hash table indexed by NEXT. */
1519
1520static htab_t bfcache;
1521
1522/* hash_f for htab_create_alloc of bfcache. */
1523
1524static hashval_t
1525bfcache_hash (const void *arg)
1526{
19ba03f4
SM
1527 const struct btrace_frame_cache *cache
1528 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1529
1530 return htab_hash_pointer (cache->frame);
1531}
1532
1533/* eq_f for htab_create_alloc of bfcache. */
1534
1535static int
1536bfcache_eq (const void *arg1, const void *arg2)
1537{
19ba03f4
SM
1538 const struct btrace_frame_cache *cache1
1539 = (const struct btrace_frame_cache *) arg1;
1540 const struct btrace_frame_cache *cache2
1541 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1542
1543 return cache1->frame == cache2->frame;
1544}
1545
1546/* Create a new btrace frame cache. */
1547
1548static struct btrace_frame_cache *
1549bfcache_new (struct frame_info *frame)
1550{
1551 struct btrace_frame_cache *cache;
1552 void **slot;
1553
1554 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1555 cache->frame = frame;
1556
1557 slot = htab_find_slot (bfcache, cache, INSERT);
1558 gdb_assert (*slot == NULL);
1559 *slot = cache;
1560
1561 return cache;
1562}
1563
1564/* Extract the branch trace function from a branch trace frame. */
1565
1566static const struct btrace_function *
1567btrace_get_frame_function (struct frame_info *frame)
1568{
1569 const struct btrace_frame_cache *cache;
1570 const struct btrace_function *bfun;
1571 struct btrace_frame_cache pattern;
1572 void **slot;
1573
1574 pattern.frame = frame;
1575
1576 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1577 if (slot == NULL)
1578 return NULL;
1579
19ba03f4 1580 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1581 return cache->bfun;
1582}
1583
cecac1ab
MM
1584/* Implement stop_reason method for record_btrace_frame_unwind. */
1585
1586static enum unwind_stop_reason
1587record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1588 void **this_cache)
1589{
0b722aec
MM
1590 const struct btrace_frame_cache *cache;
1591 const struct btrace_function *bfun;
1592
19ba03f4 1593 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1594 bfun = cache->bfun;
1595 gdb_assert (bfun != NULL);
1596
1597 if (bfun->up == NULL)
1598 return UNWIND_UNAVAILABLE;
1599
1600 return UNWIND_NO_REASON;
cecac1ab
MM
1601}
1602
1603/* Implement this_id method for record_btrace_frame_unwind. */
1604
1605static void
1606record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1607 struct frame_id *this_id)
1608{
0b722aec
MM
1609 const struct btrace_frame_cache *cache;
1610 const struct btrace_function *bfun;
1611 CORE_ADDR code, special;
1612
19ba03f4 1613 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1614
1615 bfun = cache->bfun;
1616 gdb_assert (bfun != NULL);
1617
1618 while (bfun->segment.prev != NULL)
1619 bfun = bfun->segment.prev;
1620
1621 code = get_frame_func (this_frame);
1622 special = bfun->number;
1623
1624 *this_id = frame_id_build_unavailable_stack_special (code, special);
1625
1626 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1627 btrace_get_bfun_name (cache->bfun),
1628 core_addr_to_string_nz (this_id->code_addr),
1629 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1630}
1631
1632/* Implement prev_register method for record_btrace_frame_unwind. */
1633
1634static struct value *
1635record_btrace_frame_prev_register (struct frame_info *this_frame,
1636 void **this_cache,
1637 int regnum)
1638{
0b722aec
MM
1639 const struct btrace_frame_cache *cache;
1640 const struct btrace_function *bfun, *caller;
1641 const struct btrace_insn *insn;
1642 struct gdbarch *gdbarch;
1643 CORE_ADDR pc;
1644 int pcreg;
1645
1646 gdbarch = get_frame_arch (this_frame);
1647 pcreg = gdbarch_pc_regnum (gdbarch);
1648 if (pcreg < 0 || regnum != pcreg)
1649 throw_error (NOT_AVAILABLE_ERROR,
1650 _("Registers are not available in btrace record history"));
1651
19ba03f4 1652 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1653 bfun = cache->bfun;
1654 gdb_assert (bfun != NULL);
1655
1656 caller = bfun->up;
1657 if (caller == NULL)
1658 throw_error (NOT_AVAILABLE_ERROR,
1659 _("No caller in btrace record history"));
1660
1661 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1662 {
1663 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1664 pc = insn->pc;
1665 }
1666 else
1667 {
1668 insn = VEC_last (btrace_insn_s, caller->insn);
1669 pc = insn->pc;
1670
1671 pc += gdb_insn_length (gdbarch, pc);
1672 }
1673
1674 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1675 btrace_get_bfun_name (bfun), bfun->level,
1676 core_addr_to_string_nz (pc));
1677
1678 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1679}
1680
1681/* Implement sniffer method for record_btrace_frame_unwind. */
1682
1683static int
1684record_btrace_frame_sniffer (const struct frame_unwind *self,
1685 struct frame_info *this_frame,
1686 void **this_cache)
1687{
0b722aec
MM
1688 const struct btrace_function *bfun;
1689 struct btrace_frame_cache *cache;
cecac1ab 1690 struct thread_info *tp;
0b722aec 1691 struct frame_info *next;
cecac1ab
MM
1692
1693 /* THIS_FRAME does not contain a reference to its thread. */
1694 tp = find_thread_ptid (inferior_ptid);
1695 gdb_assert (tp != NULL);
1696
0b722aec
MM
1697 bfun = NULL;
1698 next = get_next_frame (this_frame);
1699 if (next == NULL)
1700 {
1701 const struct btrace_insn_iterator *replay;
1702
1703 replay = tp->btrace.replay;
1704 if (replay != NULL)
1705 bfun = replay->function;
1706 }
1707 else
1708 {
1709 const struct btrace_function *callee;
1710
1711 callee = btrace_get_frame_function (next);
1712 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1713 bfun = callee->up;
1714 }
1715
1716 if (bfun == NULL)
1717 return 0;
1718
1719 DEBUG ("[frame] sniffed frame for %s on level %d",
1720 btrace_get_bfun_name (bfun), bfun->level);
1721
1722 /* This is our frame. Initialize the frame cache. */
1723 cache = bfcache_new (this_frame);
1724 cache->tp = tp;
1725 cache->bfun = bfun;
1726
1727 *this_cache = cache;
1728 return 1;
1729}
1730
1731/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1732
1733static int
1734record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1735 struct frame_info *this_frame,
1736 void **this_cache)
1737{
1738 const struct btrace_function *bfun, *callee;
1739 struct btrace_frame_cache *cache;
1740 struct frame_info *next;
1741
1742 next = get_next_frame (this_frame);
1743 if (next == NULL)
1744 return 0;
1745
1746 callee = btrace_get_frame_function (next);
1747 if (callee == NULL)
1748 return 0;
1749
1750 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1751 return 0;
1752
1753 bfun = callee->up;
1754 if (bfun == NULL)
1755 return 0;
1756
1757 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1758 btrace_get_bfun_name (bfun), bfun->level);
1759
1760 /* This is our frame. Initialize the frame cache. */
1761 cache = bfcache_new (this_frame);
1762 cache->tp = find_thread_ptid (inferior_ptid);
1763 cache->bfun = bfun;
1764
1765 *this_cache = cache;
1766 return 1;
1767}
1768
1769static void
1770record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1771{
1772 struct btrace_frame_cache *cache;
1773 void **slot;
1774
19ba03f4 1775 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1776
1777 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1778 gdb_assert (slot != NULL);
1779
1780 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1781}
1782
1783/* btrace recording does not store previous memory content, neither the stack
1784 frames content. Any unwinding would return errorneous results as the stack
1785 contents no longer matches the changed PC value restored from history.
1786 Therefore this unwinder reports any possibly unwound registers as
1787 <unavailable>. */
1788
0b722aec 1789const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1790{
1791 NORMAL_FRAME,
1792 record_btrace_frame_unwind_stop_reason,
1793 record_btrace_frame_this_id,
1794 record_btrace_frame_prev_register,
1795 NULL,
0b722aec
MM
1796 record_btrace_frame_sniffer,
1797 record_btrace_frame_dealloc_cache
1798};
1799
1800const struct frame_unwind record_btrace_tailcall_frame_unwind =
1801{
1802 TAILCALL_FRAME,
1803 record_btrace_frame_unwind_stop_reason,
1804 record_btrace_frame_this_id,
1805 record_btrace_frame_prev_register,
1806 NULL,
1807 record_btrace_tailcall_frame_sniffer,
1808 record_btrace_frame_dealloc_cache
cecac1ab 1809};
b2f4cfde 1810
ac01945b
TT
1811/* Implement the to_get_unwinder method. */
1812
1813static const struct frame_unwind *
1814record_btrace_to_get_unwinder (struct target_ops *self)
1815{
1816 return &record_btrace_frame_unwind;
1817}
1818
1819/* Implement the to_get_tailcall_unwinder method. */
1820
1821static const struct frame_unwind *
1822record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1823{
1824 return &record_btrace_tailcall_frame_unwind;
1825}
1826
987e68b1
MM
1827/* Return a human-readable string for FLAG. */
1828
1829static const char *
1830btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1831{
1832 switch (flag)
1833 {
1834 case BTHR_STEP:
1835 return "step";
1836
1837 case BTHR_RSTEP:
1838 return "reverse-step";
1839
1840 case BTHR_CONT:
1841 return "cont";
1842
1843 case BTHR_RCONT:
1844 return "reverse-cont";
1845
1846 case BTHR_STOP:
1847 return "stop";
1848 }
1849
1850 return "<invalid>";
1851}
1852
52834460
MM
1853/* Indicate that TP should be resumed according to FLAG. */
1854
1855static void
1856record_btrace_resume_thread (struct thread_info *tp,
1857 enum btrace_thread_flag flag)
1858{
1859 struct btrace_thread_info *btinfo;
1860
987e68b1
MM
1861 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1862 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1863
1864 btinfo = &tp->btrace;
1865
52834460
MM
1866 /* Fetch the latest branch trace. */
1867 btrace_fetch (tp);
1868
0ca912df
MM
1869 /* A resume request overwrites a preceding resume or stop request. */
1870 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1871 btinfo->flags |= flag;
1872}
1873
ec71cc2f
MM
1874/* Get the current frame for TP. */
1875
1876static struct frame_info *
1877get_thread_current_frame (struct thread_info *tp)
1878{
1879 struct frame_info *frame;
1880 ptid_t old_inferior_ptid;
1881 int executing;
1882
1883 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1884 old_inferior_ptid = inferior_ptid;
1885 inferior_ptid = tp->ptid;
1886
1887 /* Clear the executing flag to allow changes to the current frame.
1888 We are not actually running, yet. We just started a reverse execution
1889 command or a record goto command.
1890 For the latter, EXECUTING is false and this has no effect.
1891 For the former, EXECUTING is true and we're in to_wait, about to
1892 move the thread. Since we need to recompute the stack, we temporarily
1893 set EXECUTING to flase. */
1894 executing = is_executing (inferior_ptid);
1895 set_executing (inferior_ptid, 0);
1896
1897 frame = NULL;
1898 TRY
1899 {
1900 frame = get_current_frame ();
1901 }
1902 CATCH (except, RETURN_MASK_ALL)
1903 {
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1906
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1909
1910 throw_exception (except);
1911 }
1912 END_CATCH
1913
1914 /* Restore the previous execution state. */
1915 set_executing (inferior_ptid, executing);
1916
1917 /* Restore the previous inferior_ptid. */
1918 inferior_ptid = old_inferior_ptid;
1919
1920 return frame;
1921}
1922
52834460
MM
1923/* Start replaying a thread. */
1924
1925static struct btrace_insn_iterator *
1926record_btrace_start_replaying (struct thread_info *tp)
1927{
52834460
MM
1928 struct btrace_insn_iterator *replay;
1929 struct btrace_thread_info *btinfo;
52834460
MM
1930
1931 btinfo = &tp->btrace;
1932 replay = NULL;
1933
1934 /* We can't start replaying without trace. */
1935 if (btinfo->begin == NULL)
1936 return NULL;
1937
52834460
MM
1938 /* GDB stores the current frame_id when stepping in order to detects steps
1939 into subroutines.
1940 Since frames are computed differently when we're replaying, we need to
1941 recompute those stored frames and fix them up so we can still detect
1942 subroutines after we started replaying. */
492d29ea 1943 TRY
52834460
MM
1944 {
1945 struct frame_info *frame;
1946 struct frame_id frame_id;
1947 int upd_step_frame_id, upd_step_stack_frame_id;
1948
1949 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1950 frame = get_thread_current_frame (tp);
52834460
MM
1951 frame_id = get_frame_id (frame);
1952
1953 /* Check if we need to update any stepping-related frame id's. */
1954 upd_step_frame_id = frame_id_eq (frame_id,
1955 tp->control.step_frame_id);
1956 upd_step_stack_frame_id = frame_id_eq (frame_id,
1957 tp->control.step_stack_frame_id);
1958
1959 /* We start replaying at the end of the branch trace. This corresponds
1960 to the current instruction. */
8d749320 1961 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1962 btrace_insn_end (replay, btinfo);
1963
31fd9caa
MM
1964 /* Skip gaps at the end of the trace. */
1965 while (btrace_insn_get (replay) == NULL)
1966 {
1967 unsigned int steps;
1968
1969 steps = btrace_insn_prev (replay, 1);
1970 if (steps == 0)
1971 error (_("No trace."));
1972 }
1973
52834460
MM
1974 /* We're not replaying, yet. */
1975 gdb_assert (btinfo->replay == NULL);
1976 btinfo->replay = replay;
1977
1978 /* Make sure we're not using any stale registers. */
1979 registers_changed_ptid (tp->ptid);
1980
1981 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1982 frame = get_thread_current_frame (tp);
52834460
MM
1983 frame_id = get_frame_id (frame);
1984
1985 /* Replace stepping related frames where necessary. */
1986 if (upd_step_frame_id)
1987 tp->control.step_frame_id = frame_id;
1988 if (upd_step_stack_frame_id)
1989 tp->control.step_stack_frame_id = frame_id;
1990 }
492d29ea 1991 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1992 {
1993 xfree (btinfo->replay);
1994 btinfo->replay = NULL;
1995
1996 registers_changed_ptid (tp->ptid);
1997
1998 throw_exception (except);
1999 }
492d29ea 2000 END_CATCH
52834460
MM
2001
2002 return replay;
2003}
2004
2005/* Stop replaying a thread. */
2006
2007static void
2008record_btrace_stop_replaying (struct thread_info *tp)
2009{
2010 struct btrace_thread_info *btinfo;
2011
2012 btinfo = &tp->btrace;
2013
2014 xfree (btinfo->replay);
2015 btinfo->replay = NULL;
2016
2017 /* Make sure we're not leaving any stale registers. */
2018 registers_changed_ptid (tp->ptid);
2019}
2020
e3cfc1c7
MM
2021/* Stop replaying TP if it is at the end of its execution history. */
2022
2023static void
2024record_btrace_stop_replaying_at_end (struct thread_info *tp)
2025{
2026 struct btrace_insn_iterator *replay, end;
2027 struct btrace_thread_info *btinfo;
2028
2029 btinfo = &tp->btrace;
2030 replay = btinfo->replay;
2031
2032 if (replay == NULL)
2033 return;
2034
2035 btrace_insn_end (&end, btinfo);
2036
2037 if (btrace_insn_cmp (replay, &end) == 0)
2038 record_btrace_stop_replaying (tp);
2039}
2040
b2f4cfde
MM
2041/* The to_resume method of target record-btrace. */
2042
2043static void
2044record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2045 enum gdb_signal signal)
2046{
0ca912df 2047 struct thread_info *tp;
d2939ba2 2048 enum btrace_thread_flag flag, cflag;
52834460 2049
987e68b1
MM
2050 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2051 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2052 step ? "step" : "cont");
52834460 2053
0ca912df
MM
2054 /* Store the execution direction of the last resume.
2055
2056 If there is more than one to_resume call, we have to rely on infrun
2057 to not change the execution direction in-between. */
70ad5bff
MM
2058 record_btrace_resume_exec_dir = execution_direction;
2059
0ca912df 2060 /* As long as we're not replaying, just forward the request.
52834460 2061
0ca912df
MM
2062 For non-stop targets this means that no thread is replaying. In order to
2063 make progress, we may need to explicitly move replaying threads to the end
2064 of their execution history. */
a52eab48
MM
2065 if ((execution_direction != EXEC_REVERSE)
2066 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2067 {
e75fdfca 2068 ops = ops->beneath;
04c4fe8c
MM
2069 ops->to_resume (ops, ptid, step, signal);
2070 return;
b2f4cfde
MM
2071 }
2072
52834460 2073 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2074 if (execution_direction == EXEC_REVERSE)
2075 {
2076 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2077 cflag = BTHR_RCONT;
2078 }
52834460 2079 else
d2939ba2
MM
2080 {
2081 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2082 cflag = BTHR_CONT;
2083 }
52834460 2084
52834460 2085 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2086 record_btrace_wait below.
2087
2088 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2089 if (!target_is_non_stop_p ())
2090 {
2091 gdb_assert (ptid_match (inferior_ptid, ptid));
2092
2093 ALL_NON_EXITED_THREADS (tp)
2094 if (ptid_match (tp->ptid, ptid))
2095 {
2096 if (ptid_match (tp->ptid, inferior_ptid))
2097 record_btrace_resume_thread (tp, flag);
2098 else
2099 record_btrace_resume_thread (tp, cflag);
2100 }
2101 }
2102 else
2103 {
2104 ALL_NON_EXITED_THREADS (tp)
2105 if (ptid_match (tp->ptid, ptid))
2106 record_btrace_resume_thread (tp, flag);
2107 }
70ad5bff
MM
2108
2109 /* Async support. */
2110 if (target_can_async_p ())
2111 {
6a3753b3 2112 target_async (1);
70ad5bff
MM
2113 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2114 }
52834460
MM
2115}
2116
987e68b1
MM
2117/* Cancel resuming TP. */
2118
2119static void
2120record_btrace_cancel_resume (struct thread_info *tp)
2121{
2122 enum btrace_thread_flag flags;
2123
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2125 if (flags == 0)
2126 return;
2127
2128 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
2129 target_pid_to_str (tp->ptid), flags,
2130 btrace_thread_flag_to_str (flags));
2131
2132 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2133 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2134}
2135
2136/* Return a target_waitstatus indicating that we ran out of history. */
2137
2138static struct target_waitstatus
2139btrace_step_no_history (void)
2140{
2141 struct target_waitstatus status;
2142
2143 status.kind = TARGET_WAITKIND_NO_HISTORY;
2144
2145 return status;
2146}
2147
2148/* Return a target_waitstatus indicating that a step finished. */
2149
2150static struct target_waitstatus
2151btrace_step_stopped (void)
2152{
2153 struct target_waitstatus status;
2154
2155 status.kind = TARGET_WAITKIND_STOPPED;
2156 status.value.sig = GDB_SIGNAL_TRAP;
2157
2158 return status;
2159}
2160
6e4879f0
MM
2161/* Return a target_waitstatus indicating that a thread was stopped as
2162 requested. */
2163
2164static struct target_waitstatus
2165btrace_step_stopped_on_request (void)
2166{
2167 struct target_waitstatus status;
2168
2169 status.kind = TARGET_WAITKIND_STOPPED;
2170 status.value.sig = GDB_SIGNAL_0;
2171
2172 return status;
2173}
2174
d825d248
MM
2175/* Return a target_waitstatus indicating a spurious stop. */
2176
2177static struct target_waitstatus
2178btrace_step_spurious (void)
2179{
2180 struct target_waitstatus status;
2181
2182 status.kind = TARGET_WAITKIND_SPURIOUS;
2183
2184 return status;
2185}
2186
e3cfc1c7
MM
2187/* Return a target_waitstatus indicating that the thread was not resumed. */
2188
2189static struct target_waitstatus
2190btrace_step_no_resumed (void)
2191{
2192 struct target_waitstatus status;
2193
2194 status.kind = TARGET_WAITKIND_NO_RESUMED;
2195
2196 return status;
2197}
2198
2199/* Return a target_waitstatus indicating that we should wait again. */
2200
2201static struct target_waitstatus
2202btrace_step_again (void)
2203{
2204 struct target_waitstatus status;
2205
2206 status.kind = TARGET_WAITKIND_IGNORE;
2207
2208 return status;
2209}
2210
52834460
MM
2211/* Clear the record histories. */
2212
2213static void
2214record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2215{
2216 xfree (btinfo->insn_history);
2217 xfree (btinfo->call_history);
2218
2219 btinfo->insn_history = NULL;
2220 btinfo->call_history = NULL;
2221}
2222
3c615f99
MM
2223/* Check whether TP's current replay position is at a breakpoint. */
2224
2225static int
2226record_btrace_replay_at_breakpoint (struct thread_info *tp)
2227{
2228 struct btrace_insn_iterator *replay;
2229 struct btrace_thread_info *btinfo;
2230 const struct btrace_insn *insn;
2231 struct inferior *inf;
2232
2233 btinfo = &tp->btrace;
2234 replay = btinfo->replay;
2235
2236 if (replay == NULL)
2237 return 0;
2238
2239 insn = btrace_insn_get (replay);
2240 if (insn == NULL)
2241 return 0;
2242
2243 inf = find_inferior_ptid (tp->ptid);
2244 if (inf == NULL)
2245 return 0;
2246
2247 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2248 &btinfo->stop_reason);
2249}
2250
d825d248 2251/* Step one instruction in forward direction. */
52834460
MM
2252
2253static struct target_waitstatus
d825d248 2254record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2255{
2256 struct btrace_insn_iterator *replay, end;
2257 struct btrace_thread_info *btinfo;
52834460 2258
d825d248
MM
2259 btinfo = &tp->btrace;
2260 replay = btinfo->replay;
2261
2262 /* We're done if we're not replaying. */
2263 if (replay == NULL)
2264 return btrace_step_no_history ();
2265
011c71b6
MM
2266 /* Check if we're stepping a breakpoint. */
2267 if (record_btrace_replay_at_breakpoint (tp))
2268 return btrace_step_stopped ();
2269
d825d248
MM
2270 /* Skip gaps during replay. */
2271 do
2272 {
2273 unsigned int steps;
2274
e3cfc1c7
MM
2275 /* We will bail out here if we continue stepping after reaching the end
2276 of the execution history. */
d825d248
MM
2277 steps = btrace_insn_next (replay, 1);
2278 if (steps == 0)
e3cfc1c7 2279 return btrace_step_no_history ();
d825d248
MM
2280 }
2281 while (btrace_insn_get (replay) == NULL);
2282
2283 /* Determine the end of the instruction trace. */
2284 btrace_insn_end (&end, btinfo);
2285
e3cfc1c7
MM
2286 /* The execution trace contains (and ends with) the current instruction.
2287 This instruction has not been executed, yet, so the trace really ends
2288 one instruction earlier. */
d825d248 2289 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2290 return btrace_step_no_history ();
d825d248
MM
2291
2292 return btrace_step_spurious ();
2293}
2294
2295/* Step one instruction in backward direction. */
2296
2297static struct target_waitstatus
2298record_btrace_single_step_backward (struct thread_info *tp)
2299{
2300 struct btrace_insn_iterator *replay;
2301 struct btrace_thread_info *btinfo;
e59fa00f 2302
52834460
MM
2303 btinfo = &tp->btrace;
2304 replay = btinfo->replay;
2305
d825d248
MM
2306 /* Start replaying if we're not already doing so. */
2307 if (replay == NULL)
2308 replay = record_btrace_start_replaying (tp);
2309
2310 /* If we can't step any further, we reached the end of the history.
2311 Skip gaps during replay. */
2312 do
2313 {
2314 unsigned int steps;
2315
2316 steps = btrace_insn_prev (replay, 1);
2317 if (steps == 0)
2318 return btrace_step_no_history ();
2319 }
2320 while (btrace_insn_get (replay) == NULL);
2321
011c71b6
MM
2322 /* Check if we're stepping a breakpoint.
2323
2324 For reverse-stepping, this check is after the step. There is logic in
2325 infrun.c that handles reverse-stepping separately. See, for example,
2326 proceed and adjust_pc_after_break.
2327
2328 This code assumes that for reverse-stepping, PC points to the last
2329 de-executed instruction, whereas for forward-stepping PC points to the
2330 next to-be-executed instruction. */
2331 if (record_btrace_replay_at_breakpoint (tp))
2332 return btrace_step_stopped ();
2333
d825d248
MM
2334 return btrace_step_spurious ();
2335}
2336
2337/* Step a single thread. */
2338
2339static struct target_waitstatus
2340record_btrace_step_thread (struct thread_info *tp)
2341{
2342 struct btrace_thread_info *btinfo;
2343 struct target_waitstatus status;
2344 enum btrace_thread_flag flags;
2345
2346 btinfo = &tp->btrace;
2347
6e4879f0
MM
2348 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2349 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2350
987e68b1
MM
2351 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2352 target_pid_to_str (tp->ptid), flags,
2353 btrace_thread_flag_to_str (flags));
52834460 2354
6e4879f0
MM
2355 /* We can't step without an execution history. */
2356 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2357 return btrace_step_no_history ();
2358
52834460
MM
2359 switch (flags)
2360 {
2361 default:
2362 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2363
6e4879f0
MM
2364 case BTHR_STOP:
2365 return btrace_step_stopped_on_request ();
2366
52834460 2367 case BTHR_STEP:
d825d248
MM
2368 status = record_btrace_single_step_forward (tp);
2369 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2370 break;
52834460
MM
2371
2372 return btrace_step_stopped ();
2373
2374 case BTHR_RSTEP:
d825d248
MM
2375 status = record_btrace_single_step_backward (tp);
2376 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2377 break;
52834460
MM
2378
2379 return btrace_step_stopped ();
2380
2381 case BTHR_CONT:
e3cfc1c7
MM
2382 status = record_btrace_single_step_forward (tp);
2383 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2384 break;
52834460 2385
e3cfc1c7
MM
2386 btinfo->flags |= flags;
2387 return btrace_step_again ();
52834460
MM
2388
2389 case BTHR_RCONT:
e3cfc1c7
MM
2390 status = record_btrace_single_step_backward (tp);
2391 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2392 break;
52834460 2393
e3cfc1c7
MM
2394 btinfo->flags |= flags;
2395 return btrace_step_again ();
2396 }
d825d248 2397
e3cfc1c7
MM
2398 /* We keep threads moving at the end of their execution history. The to_wait
2399 method will stop the thread for whom the event is reported. */
2400 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2401 btinfo->flags |= flags;
52834460 2402
e3cfc1c7 2403 return status;
b2f4cfde
MM
2404}
2405
e3cfc1c7
MM
2406/* A vector of threads. */
2407
2408typedef struct thread_info * tp_t;
2409DEF_VEC_P (tp_t);
2410
a6b5be76
MM
2411/* Announce further events if necessary. */
2412
2413static void
2414record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2415 const VEC (tp_t) *no_history)
2416{
2417 int more_moving, more_no_history;
2418
2419 more_moving = !VEC_empty (tp_t, moving);
2420 more_no_history = !VEC_empty (tp_t, no_history);
2421
2422 if (!more_moving && !more_no_history)
2423 return;
2424
2425 if (more_moving)
2426 DEBUG ("movers pending");
2427
2428 if (more_no_history)
2429 DEBUG ("no-history pending");
2430
2431 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2432}
2433
b2f4cfde
MM
2434/* The to_wait method of target record-btrace. */
2435
2436static ptid_t
2437record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2438 struct target_waitstatus *status, int options)
2439{
e3cfc1c7
MM
2440 VEC (tp_t) *moving, *no_history;
2441 struct thread_info *tp, *eventing;
2442 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2443
2444 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2445
b2f4cfde 2446 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2447 if ((execution_direction != EXEC_REVERSE)
2448 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2449 {
e75fdfca
TT
2450 ops = ops->beneath;
2451 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2452 }
2453
e3cfc1c7
MM
2454 moving = NULL;
2455 no_history = NULL;
2456
2457 make_cleanup (VEC_cleanup (tp_t), &moving);
2458 make_cleanup (VEC_cleanup (tp_t), &no_history);
2459
2460 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp)
2462 if (ptid_match (tp->ptid, ptid)
2463 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2464 VEC_safe_push (tp_t, moving, tp);
2465
2466 if (VEC_empty (tp_t, moving))
52834460 2467 {
e3cfc1c7 2468 *status = btrace_step_no_resumed ();
52834460 2469
e3cfc1c7
MM
2470 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2471 target_waitstatus_to_string (status));
2472
2473 do_cleanups (cleanups);
2474 return null_ptid;
52834460
MM
2475 }
2476
e3cfc1c7
MM
2477 /* Step moving threads one by one, one step each, until either one thread
2478 reports an event or we run out of threads to step.
2479
2480 When stepping more than one thread, chances are that some threads reach
2481 the end of their execution history earlier than others. If we reported
2482 this immediately, all-stop on top of non-stop would stop all threads and
2483 resume the same threads next time. And we would report the same thread
2484 having reached the end of its execution history again.
2485
2486 In the worst case, this would starve the other threads. But even if other
2487 threads would be allowed to make progress, this would result in far too
2488 many intermediate stops.
2489
2490 We therefore delay the reporting of "no execution history" until we have
2491 nothing else to report. By this time, all threads should have moved to
2492 either the beginning or the end of their execution history. There will
2493 be a single user-visible stop. */
2494 eventing = NULL;
2495 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2496 {
2497 unsigned int ix;
2498
2499 ix = 0;
2500 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2501 {
2502 *status = record_btrace_step_thread (tp);
2503
2504 switch (status->kind)
2505 {
2506 case TARGET_WAITKIND_IGNORE:
2507 ix++;
2508 break;
2509
2510 case TARGET_WAITKIND_NO_HISTORY:
2511 VEC_safe_push (tp_t, no_history,
2512 VEC_ordered_remove (tp_t, moving, ix));
2513 break;
2514
2515 default:
2516 eventing = VEC_unordered_remove (tp_t, moving, ix);
2517 break;
2518 }
2519 }
2520 }
2521
2522 if (eventing == NULL)
2523 {
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2526
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!VEC_empty (tp_t, no_history));
2530
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2535
2536 *status = btrace_step_no_history ();
2537 }
2538
2539 gdb_assert (eventing != NULL);
2540
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2544
2545 /* Stop all other threads. */
5953356c 2546 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2547 ALL_NON_EXITED_THREADS (tp)
2548 record_btrace_cancel_resume (tp);
52834460 2549
a6b5be76
MM
2550 /* In async mode, we need to announce further events. */
2551 if (target_is_async_p ())
2552 record_btrace_maybe_mark_async_event (moving, no_history);
2553
52834460 2554 /* Start record histories anew from the current position. */
e3cfc1c7 2555 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2556
2557 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2558 registers_changed_ptid (eventing->ptid);
2559
2560 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2561 target_pid_to_str (eventing->ptid),
2562 target_waitstatus_to_string (status));
52834460 2563
e3cfc1c7
MM
2564 do_cleanups (cleanups);
2565 return eventing->ptid;
52834460
MM
2566}
2567
6e4879f0
MM
2568/* The to_stop method of target record-btrace. */
2569
2570static void
2571record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2572{
2573 DEBUG ("stop %s", target_pid_to_str (ptid));
2574
2575 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2576 if ((execution_direction != EXEC_REVERSE)
2577 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2578 {
2579 ops = ops->beneath;
2580 ops->to_stop (ops, ptid);
2581 }
2582 else
2583 {
2584 struct thread_info *tp;
2585
2586 ALL_NON_EXITED_THREADS (tp)
2587 if (ptid_match (tp->ptid, ptid))
2588 {
2589 tp->btrace.flags &= ~BTHR_MOVE;
2590 tp->btrace.flags |= BTHR_STOP;
2591 }
2592 }
2593 }
2594
52834460
MM
2595/* The to_can_execute_reverse method of target record-btrace. */
2596
2597static int
19db3e69 2598record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2599{
2600 return 1;
2601}
2602
9e8915c6 2603/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2604
9e8915c6
PA
2605static int
2606record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2607{
a52eab48 2608 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2609 {
2610 struct thread_info *tp = inferior_thread ();
2611
2612 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2613 }
2614
2615 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2616}
2617
2618/* The to_supports_stopped_by_sw_breakpoint method of target
2619 record-btrace. */
2620
2621static int
2622record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2623{
a52eab48 2624 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2625 return 1;
2626
2627 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2628}
2629
2630/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2631
2632static int
2633record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2634{
a52eab48 2635 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2636 {
2637 struct thread_info *tp = inferior_thread ();
2638
2639 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2640 }
2641
2642 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2643}
2644
2645/* The to_supports_stopped_by_hw_breakpoint method of target
2646 record-btrace. */
2647
2648static int
2649record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2650{
a52eab48 2651 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2652 return 1;
52834460 2653
9e8915c6 2654 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2655}
2656
e8032dde 2657/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2658
2659static void
e8032dde 2660record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2661{
e8032dde 2662 /* We don't add or remove threads during replay. */
a52eab48 2663 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2664 return;
2665
2666 /* Forward the request. */
e75fdfca 2667 ops = ops->beneath;
e8032dde 2668 ops->to_update_thread_list (ops);
e2887aa3
MM
2669}
2670
2671/* The to_thread_alive method of target record-btrace. */
2672
2673static int
2674record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2675{
2676 /* We don't add or remove threads during replay. */
a52eab48 2677 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2678 return find_thread_ptid (ptid) != NULL;
2679
2680 /* Forward the request. */
e75fdfca
TT
2681 ops = ops->beneath;
2682 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2683}
2684
066ce621
MM
2685/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2686 is stopped. */
2687
2688static void
2689record_btrace_set_replay (struct thread_info *tp,
2690 const struct btrace_insn_iterator *it)
2691{
2692 struct btrace_thread_info *btinfo;
2693
2694 btinfo = &tp->btrace;
2695
2696 if (it == NULL || it->function == NULL)
52834460 2697 record_btrace_stop_replaying (tp);
066ce621
MM
2698 else
2699 {
2700 if (btinfo->replay == NULL)
52834460 2701 record_btrace_start_replaying (tp);
066ce621
MM
2702 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2703 return;
2704
2705 *btinfo->replay = *it;
52834460 2706 registers_changed_ptid (tp->ptid);
066ce621
MM
2707 }
2708
52834460
MM
2709 /* Start anew from the new replay position. */
2710 record_btrace_clear_histories (btinfo);
485668e5
MM
2711
2712 stop_pc = regcache_read_pc (get_current_regcache ());
2713 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2714}
2715
2716/* The to_goto_record_begin method of target record-btrace. */
2717
2718static void
08475817 2719record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2720{
2721 struct thread_info *tp;
2722 struct btrace_insn_iterator begin;
2723
2724 tp = require_btrace_thread ();
2725
2726 btrace_insn_begin (&begin, &tp->btrace);
2727 record_btrace_set_replay (tp, &begin);
066ce621
MM
2728}
2729
2730/* The to_goto_record_end method of target record-btrace. */
2731
2732static void
307a1b91 2733record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2734{
2735 struct thread_info *tp;
2736
2737 tp = require_btrace_thread ();
2738
2739 record_btrace_set_replay (tp, NULL);
066ce621
MM
2740}
2741
2742/* The to_goto_record method of target record-btrace. */
2743
2744static void
606183ac 2745record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2746{
2747 struct thread_info *tp;
2748 struct btrace_insn_iterator it;
2749 unsigned int number;
2750 int found;
2751
2752 number = insn;
2753
2754 /* Check for wrap-arounds. */
2755 if (number != insn)
2756 error (_("Instruction number out of range."));
2757
2758 tp = require_btrace_thread ();
2759
2760 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2761 if (found == 0)
2762 error (_("No such instruction."));
2763
2764 record_btrace_set_replay (tp, &it);
066ce621
MM
2765}
2766
797094dd
MM
2767/* The to_record_stop_replaying method of target record-btrace. */
2768
2769static void
2770record_btrace_stop_replaying_all (struct target_ops *self)
2771{
2772 struct thread_info *tp;
2773
2774 ALL_NON_EXITED_THREADS (tp)
2775 record_btrace_stop_replaying (tp);
2776}
2777
70ad5bff
MM
2778/* The to_execution_direction target method. */
2779
2780static enum exec_direction_kind
2781record_btrace_execution_direction (struct target_ops *self)
2782{
2783 return record_btrace_resume_exec_dir;
2784}
2785
aef92902
MM
2786/* The to_prepare_to_generate_core target method. */
2787
2788static void
2789record_btrace_prepare_to_generate_core (struct target_ops *self)
2790{
2791 record_btrace_generating_corefile = 1;
2792}
2793
2794/* The to_done_generating_core target method. */
2795
2796static void
2797record_btrace_done_generating_core (struct target_ops *self)
2798{
2799 record_btrace_generating_corefile = 0;
2800}
2801
afedecd3
MM
2802/* Initialize the record-btrace target ops. */
2803
2804static void
2805init_record_btrace_ops (void)
2806{
2807 struct target_ops *ops;
2808
2809 ops = &record_btrace_ops;
2810 ops->to_shortname = "record-btrace";
2811 ops->to_longname = "Branch tracing target";
2812 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2813 ops->to_open = record_btrace_open;
2814 ops->to_close = record_btrace_close;
b7d2e916 2815 ops->to_async = record_btrace_async;
afedecd3
MM
2816 ops->to_detach = record_detach;
2817 ops->to_disconnect = record_disconnect;
2818 ops->to_mourn_inferior = record_mourn_inferior;
2819 ops->to_kill = record_kill;
afedecd3
MM
2820 ops->to_stop_recording = record_btrace_stop_recording;
2821 ops->to_info_record = record_btrace_info;
2822 ops->to_insn_history = record_btrace_insn_history;
2823 ops->to_insn_history_from = record_btrace_insn_history_from;
2824 ops->to_insn_history_range = record_btrace_insn_history_range;
2825 ops->to_call_history = record_btrace_call_history;
2826 ops->to_call_history_from = record_btrace_call_history_from;
2827 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2828 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2829 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2830 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2831 ops->to_xfer_partial = record_btrace_xfer_partial;
2832 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2833 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2834 ops->to_fetch_registers = record_btrace_fetch_registers;
2835 ops->to_store_registers = record_btrace_store_registers;
2836 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2837 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2838 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2839 ops->to_resume = record_btrace_resume;
2840 ops->to_wait = record_btrace_wait;
6e4879f0 2841 ops->to_stop = record_btrace_stop;
e8032dde 2842 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2843 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2844 ops->to_goto_record_begin = record_btrace_goto_begin;
2845 ops->to_goto_record_end = record_btrace_goto_end;
2846 ops->to_goto_record = record_btrace_goto;
52834460 2847 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2848 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2849 ops->to_supports_stopped_by_sw_breakpoint
2850 = record_btrace_supports_stopped_by_sw_breakpoint;
2851 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2852 ops->to_supports_stopped_by_hw_breakpoint
2853 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2854 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2855 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2856 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2857 ops->to_stratum = record_stratum;
2858 ops->to_magic = OPS_MAGIC;
2859}
2860
f4abbc16
MM
2861/* Start recording in BTS format. */
2862
2863static void
2864cmd_record_btrace_bts_start (char *args, int from_tty)
2865{
f4abbc16
MM
2866 if (args != NULL && *args != 0)
2867 error (_("Invalid argument."));
2868
2869 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2870
492d29ea
PA
2871 TRY
2872 {
2873 execute_command ("target record-btrace", from_tty);
2874 }
2875 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2876 {
2877 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2878 throw_exception (exception);
2879 }
492d29ea 2880 END_CATCH
f4abbc16
MM
2881}
2882
b20a6524 2883/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2884
2885static void
b20a6524 2886cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2887{
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
b20a6524 2891 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2892
492d29ea
PA
2893 TRY
2894 {
2895 execute_command ("target record-btrace", from_tty);
2896 }
2897 CATCH (exception, RETURN_MASK_ALL)
2898 {
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2900 throw_exception (exception);
2901 }
2902 END_CATCH
afedecd3
MM
2903}
2904
b20a6524
MM
2905/* Alias for "target record". */
2906
2907static void
2908cmd_record_btrace_start (char *args, int from_tty)
2909{
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2912
2913 record_btrace_conf.format = BTRACE_FORMAT_PT;
2914
2915 TRY
2916 {
2917 execute_command ("target record-btrace", from_tty);
2918 }
2919 CATCH (exception, RETURN_MASK_ALL)
2920 {
2921 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2922
2923 TRY
2924 {
2925 execute_command ("target record-btrace", from_tty);
2926 }
2927 CATCH (exception, RETURN_MASK_ALL)
2928 {
2929 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2930 throw_exception (exception);
2931 }
2932 END_CATCH
2933 }
2934 END_CATCH
2935}
2936
67b5c0c1
MM
2937/* The "set record btrace" command. */
2938
2939static void
2940cmd_set_record_btrace (char *args, int from_tty)
2941{
2942 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2943}
2944
2945/* The "show record btrace" command. */
2946
2947static void
2948cmd_show_record_btrace (char *args, int from_tty)
2949{
2950 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2951}
2952
2953/* The "show record btrace replay-memory-access" command. */
2954
2955static void
2956cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2957 struct cmd_list_element *c, const char *value)
2958{
2959 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2960 replay_memory_access);
2961}
2962
d33501a5
MM
2963/* The "set record btrace bts" command. */
2964
2965static void
2966cmd_set_record_btrace_bts (char *args, int from_tty)
2967{
2968 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2969 "by an appropriate subcommand.\n"));
d33501a5
MM
2970 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2971 all_commands, gdb_stdout);
2972}
2973
2974/* The "show record btrace bts" command. */
2975
2976static void
2977cmd_show_record_btrace_bts (char *args, int from_tty)
2978{
2979 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2980}
2981
b20a6524
MM
2982/* The "set record btrace pt" command. */
2983
2984static void
2985cmd_set_record_btrace_pt (char *args, int from_tty)
2986{
2987 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2988 "by an appropriate subcommand.\n"));
2989 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2990 all_commands, gdb_stdout);
2991}
2992
2993/* The "show record btrace pt" command. */
2994
2995static void
2996cmd_show_record_btrace_pt (char *args, int from_tty)
2997{
2998 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2999}
3000
3001/* The "record bts buffer-size" show value function. */
3002
3003static void
3004show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3005 struct cmd_list_element *c,
3006 const char *value)
3007{
3008 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3009 value);
3010}
3011
3012/* The "record pt buffer-size" show value function. */
3013
3014static void
3015show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3016 struct cmd_list_element *c,
3017 const char *value)
3018{
3019 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3020 value);
3021}
3022
afedecd3
MM
3023void _initialize_record_btrace (void);
3024
3025/* Initialize btrace commands. */
3026
3027void
3028_initialize_record_btrace (void)
3029{
f4abbc16
MM
3030 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3031 _("Start branch trace recording."), &record_btrace_cmdlist,
3032 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3033 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3034
f4abbc16
MM
3035 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3036 _("\
3037Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3038The processor stores a from/to record for each branch into a cyclic buffer.\n\
3039This format may not be available on all processors."),
3040 &record_btrace_cmdlist);
3041 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3042
b20a6524
MM
3043 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3044 _("\
3045Start branch trace recording in Intel(R) Processor Trace format.\n\n\
3046This format may not be available on all processors."),
3047 &record_btrace_cmdlist);
3048 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3049
67b5c0c1
MM
3050 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3051 _("Set record options"), &set_record_btrace_cmdlist,
3052 "set record btrace ", 0, &set_record_cmdlist);
3053
3054 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3055 _("Show record options"), &show_record_btrace_cmdlist,
3056 "show record btrace ", 0, &show_record_cmdlist);
3057
3058 add_setshow_enum_cmd ("replay-memory-access", no_class,
3059 replay_memory_access_types, &replay_memory_access, _("\
3060Set what memory accesses are allowed during replay."), _("\
3061Show what memory accesses are allowed during replay."),
3062 _("Default is READ-ONLY.\n\n\
3063The btrace record target does not trace data.\n\
3064The memory therefore corresponds to the live target and not \
3065to the current replay position.\n\n\
3066When READ-ONLY, allow accesses to read-only memory during replay.\n\
3067When READ-WRITE, allow accesses to read-only and read-write memory during \
3068replay."),
3069 NULL, cmd_show_replay_memory_access,
3070 &set_record_btrace_cmdlist,
3071 &show_record_btrace_cmdlist);
3072
d33501a5
MM
3073 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3074 _("Set record btrace bts options"),
3075 &set_record_btrace_bts_cmdlist,
3076 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3077
3078 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3079 _("Show record btrace bts options"),
3080 &show_record_btrace_bts_cmdlist,
3081 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3082
3083 add_setshow_uinteger_cmd ("buffer-size", no_class,
3084 &record_btrace_conf.bts.size,
3085 _("Set the record/replay bts buffer size."),
3086 _("Show the record/replay bts buffer size."), _("\
3087When starting recording request a trace buffer of this size. \
3088The actual buffer size may differ from the requested size. \
3089Use \"info record\" to see the actual buffer size.\n\n\
3090Bigger buffers allow longer recording but also take more time to process \
3091the recorded execution trace.\n\n\
b20a6524
MM
3092The trace buffer size may not be changed while recording."), NULL,
3093 show_record_bts_buffer_size_value,
d33501a5
MM
3094 &set_record_btrace_bts_cmdlist,
3095 &show_record_btrace_bts_cmdlist);
3096
b20a6524
MM
3097 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3098 _("Set record btrace pt options"),
3099 &set_record_btrace_pt_cmdlist,
3100 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3101
3102 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3103 _("Show record btrace pt options"),
3104 &show_record_btrace_pt_cmdlist,
3105 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3106
3107 add_setshow_uinteger_cmd ("buffer-size", no_class,
3108 &record_btrace_conf.pt.size,
3109 _("Set the record/replay pt buffer size."),
3110 _("Show the record/replay pt buffer size."), _("\
3111Bigger buffers allow longer recording but also take more time to process \
3112the recorded execution.\n\
3113The actual buffer size may differ from the requested size. Use \"info record\" \
3114to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3115 &set_record_btrace_pt_cmdlist,
3116 &show_record_btrace_pt_cmdlist);
3117
afedecd3
MM
3118 init_record_btrace_ops ();
3119 add_target (&record_btrace_ops);
0b722aec
MM
3120
3121 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3122 xcalloc, xfree);
d33501a5
MM
3123
3124 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3125 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3126}
This page took 0.720839 seconds and 4 git commands to generate.