Document the GDB 7.12 release in gdb/ChangeLog
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 btrace_fetch (tp);
121
6e07b1d2 122 if (btrace_is_empty (tp))
afedecd3
MM
123 error (_("No trace."));
124
066ce621
MM
125 return tp;
126}
127
128/* Update the branch trace for the current thread and return a pointer to its
129 branch trace information struct.
130
131 Throws an error if there is no thread or no trace. This function never
132 returns NULL. */
133
134static struct btrace_thread_info *
135require_btrace (void)
136{
137 struct thread_info *tp;
138
139 tp = require_btrace_thread ();
140
141 return &tp->btrace;
afedecd3
MM
142}
143
144/* Enable branch tracing for one thread. Warn on errors. */
145
146static void
147record_btrace_enable_warn (struct thread_info *tp)
148{
492d29ea
PA
149 TRY
150 {
151 btrace_enable (tp, &record_btrace_conf);
152 }
153 CATCH (error, RETURN_MASK_ERROR)
154 {
155 warning ("%s", error.message);
156 }
157 END_CATCH
afedecd3
MM
158}
159
160/* Callback function to disable branch tracing for one thread. */
161
162static void
163record_btrace_disable_callback (void *arg)
164{
19ba03f4 165 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
c0272db5
TW
204/* See record-btrace.h. */
205
206void
207record_btrace_push_target (void)
208{
209 const char *format;
210
211 record_btrace_auto_enable ();
212
213 push_target (&record_btrace_ops);
214
215 record_btrace_async_inferior_event_handler
216 = create_async_event_handler (record_btrace_handle_async_inferior_event,
217 NULL);
218 record_btrace_generating_corefile = 0;
219
220 format = btrace_format_short_string (record_btrace_conf.format);
221 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
222}
223
afedecd3
MM
224/* The to_open method of target record-btrace. */
225
226static void
014f9477 227record_btrace_open (const char *args, int from_tty)
afedecd3
MM
228{
229 struct cleanup *disable_chain;
230 struct thread_info *tp;
231
232 DEBUG ("open");
233
8213266a 234 record_preopen ();
afedecd3
MM
235
236 if (!target_has_execution)
237 error (_("The program is not being run."));
238
afedecd3
MM
239 gdb_assert (record_btrace_thread_observer == NULL);
240
241 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 242 ALL_NON_EXITED_THREADS (tp)
5d5658a1 243 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 244 {
f4abbc16 245 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
246
247 make_cleanup (record_btrace_disable_callback, tp);
248 }
249
c0272db5 250 record_btrace_push_target ();
afedecd3
MM
251
252 discard_cleanups (disable_chain);
253}
254
255/* The to_stop_recording method of target record-btrace. */
256
257static void
c6cd7c02 258record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
259{
260 struct thread_info *tp;
261
262 DEBUG ("stop recording");
263
264 record_btrace_auto_disable ();
265
034f788c 266 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
267 if (tp->btrace.target != NULL)
268 btrace_disable (tp);
269}
270
c0272db5
TW
271/* The to_disconnect method of target record-btrace. */
272
273static void
274record_btrace_disconnect (struct target_ops *self, const char *args,
275 int from_tty)
276{
277 struct target_ops *beneath = self->beneath;
278
279 /* Do not stop recording, just clean up GDB side. */
280 unpush_target (self);
281
282 /* Forward disconnect. */
283 beneath->to_disconnect (beneath, args, from_tty);
284}
285
afedecd3
MM
286/* The to_close method of target record-btrace. */
287
288static void
de90e03d 289record_btrace_close (struct target_ops *self)
afedecd3 290{
568e808b
MM
291 struct thread_info *tp;
292
70ad5bff
MM
293 if (record_btrace_async_inferior_event_handler != NULL)
294 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
295
99c819ee
MM
296 /* Make sure automatic recording gets disabled even if we did not stop
297 recording before closing the record-btrace target. */
298 record_btrace_auto_disable ();
299
568e808b
MM
300 /* We should have already stopped recording.
301 Tear down btrace in case we have not. */
034f788c 302 ALL_NON_EXITED_THREADS (tp)
568e808b 303 btrace_teardown (tp);
afedecd3
MM
304}
305
b7d2e916
PA
306/* The to_async method of target record-btrace. */
307
308static void
6a3753b3 309record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 310{
6a3753b3 311 if (enable)
b7d2e916
PA
312 mark_async_event_handler (record_btrace_async_inferior_event_handler);
313 else
314 clear_async_event_handler (record_btrace_async_inferior_event_handler);
315
6a3753b3 316 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
317}
318
d33501a5
MM
319/* Adjusts the size and returns a human readable size suffix. */
320
321static const char *
322record_btrace_adjust_size (unsigned int *size)
323{
324 unsigned int sz;
325
326 sz = *size;
327
328 if ((sz & ((1u << 30) - 1)) == 0)
329 {
330 *size = sz >> 30;
331 return "GB";
332 }
333 else if ((sz & ((1u << 20) - 1)) == 0)
334 {
335 *size = sz >> 20;
336 return "MB";
337 }
338 else if ((sz & ((1u << 10) - 1)) == 0)
339 {
340 *size = sz >> 10;
341 return "kB";
342 }
343 else
344 return "";
345}
346
347/* Print a BTS configuration. */
348
349static void
350record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
351{
352 const char *suffix;
353 unsigned int size;
354
355 size = conf->size;
356 if (size > 0)
357 {
358 suffix = record_btrace_adjust_size (&size);
359 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
360 }
361}
362
bc504a31 363/* Print an Intel Processor Trace configuration. */
b20a6524
MM
364
365static void
366record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
367{
368 const char *suffix;
369 unsigned int size;
370
371 size = conf->size;
372 if (size > 0)
373 {
374 suffix = record_btrace_adjust_size (&size);
375 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
376 }
377}
378
d33501a5
MM
379/* Print a branch tracing configuration. */
380
381static void
382record_btrace_print_conf (const struct btrace_config *conf)
383{
384 printf_unfiltered (_("Recording format: %s.\n"),
385 btrace_format_string (conf->format));
386
387 switch (conf->format)
388 {
389 case BTRACE_FORMAT_NONE:
390 return;
391
392 case BTRACE_FORMAT_BTS:
393 record_btrace_print_bts_conf (&conf->bts);
394 return;
b20a6524
MM
395
396 case BTRACE_FORMAT_PT:
397 record_btrace_print_pt_conf (&conf->pt);
398 return;
d33501a5
MM
399 }
400
401 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
402}
403
afedecd3
MM
404/* The to_info_record method of target record-btrace. */
405
406static void
630d6a4a 407record_btrace_info (struct target_ops *self)
afedecd3
MM
408{
409 struct btrace_thread_info *btinfo;
f4abbc16 410 const struct btrace_config *conf;
afedecd3 411 struct thread_info *tp;
31fd9caa 412 unsigned int insns, calls, gaps;
afedecd3
MM
413
414 DEBUG ("info");
415
416 tp = find_thread_ptid (inferior_ptid);
417 if (tp == NULL)
418 error (_("No thread."));
419
f4abbc16
MM
420 btinfo = &tp->btrace;
421
422 conf = btrace_conf (btinfo);
423 if (conf != NULL)
d33501a5 424 record_btrace_print_conf (conf);
f4abbc16 425
afedecd3
MM
426 btrace_fetch (tp);
427
23a7fe75
MM
428 insns = 0;
429 calls = 0;
31fd9caa 430 gaps = 0;
23a7fe75 431
6e07b1d2 432 if (!btrace_is_empty (tp))
23a7fe75
MM
433 {
434 struct btrace_call_iterator call;
435 struct btrace_insn_iterator insn;
436
437 btrace_call_end (&call, btinfo);
438 btrace_call_prev (&call, 1);
5de9129b 439 calls = btrace_call_number (&call);
23a7fe75
MM
440
441 btrace_insn_end (&insn, btinfo);
31fd9caa 442
5de9129b 443 insns = btrace_insn_number (&insn);
31fd9caa
MM
444 if (insns != 0)
445 {
446 /* The last instruction does not really belong to the trace. */
447 insns -= 1;
448 }
449 else
450 {
451 unsigned int steps;
452
453 /* Skip gaps at the end. */
454 do
455 {
456 steps = btrace_insn_prev (&insn, 1);
457 if (steps == 0)
458 break;
459
460 insns = btrace_insn_number (&insn);
461 }
462 while (insns == 0);
463 }
464
465 gaps = btinfo->ngaps;
23a7fe75 466 }
afedecd3 467
31fd9caa 468 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
469 "for thread %s (%s).\n"), insns, calls, gaps,
470 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
471
472 if (btrace_is_replaying (tp))
473 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
474 btrace_insn_number (btinfo->replay));
afedecd3
MM
475}
476
31fd9caa
MM
477/* Print a decode error. */
478
479static void
480btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
481 enum btrace_format format)
482{
483 const char *errstr;
484 int is_error;
485
486 errstr = _("unknown");
487 is_error = 1;
488
489 switch (format)
490 {
491 default:
492 break;
493
494 case BTRACE_FORMAT_BTS:
495 switch (errcode)
496 {
497 default:
498 break;
499
500 case BDE_BTS_OVERFLOW:
501 errstr = _("instruction overflow");
502 break;
503
504 case BDE_BTS_INSN_SIZE:
505 errstr = _("unknown instruction");
506 break;
507 }
508 break;
b20a6524
MM
509
510#if defined (HAVE_LIBIPT)
511 case BTRACE_FORMAT_PT:
512 switch (errcode)
513 {
514 case BDE_PT_USER_QUIT:
515 is_error = 0;
516 errstr = _("trace decode cancelled");
517 break;
518
519 case BDE_PT_DISABLED:
520 is_error = 0;
521 errstr = _("disabled");
522 break;
523
524 case BDE_PT_OVERFLOW:
525 is_error = 0;
526 errstr = _("overflow");
527 break;
528
529 default:
530 if (errcode < 0)
531 errstr = pt_errstr (pt_errcode (errcode));
532 break;
533 }
534 break;
535#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
536 }
537
538 ui_out_text (uiout, _("["));
539 if (is_error)
540 {
541 ui_out_text (uiout, _("decode error ("));
542 ui_out_field_int (uiout, "errcode", errcode);
543 ui_out_text (uiout, _("): "));
544 }
545 ui_out_text (uiout, errstr);
546 ui_out_text (uiout, _("]\n"));
547}
548
afedecd3
MM
549/* Print an unsigned int. */
550
551static void
552ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
553{
554 ui_out_field_fmt (uiout, fld, "%u", val);
555}
556
f94cc897
MM
557/* A range of source lines. */
558
559struct btrace_line_range
560{
561 /* The symtab this line is from. */
562 struct symtab *symtab;
563
564 /* The first line (inclusive). */
565 int begin;
566
567 /* The last line (exclusive). */
568 int end;
569};
570
571/* Construct a line range. */
572
573static struct btrace_line_range
574btrace_mk_line_range (struct symtab *symtab, int begin, int end)
575{
576 struct btrace_line_range range;
577
578 range.symtab = symtab;
579 range.begin = begin;
580 range.end = end;
581
582 return range;
583}
584
585/* Add a line to a line range. */
586
587static struct btrace_line_range
588btrace_line_range_add (struct btrace_line_range range, int line)
589{
590 if (range.end <= range.begin)
591 {
592 /* This is the first entry. */
593 range.begin = line;
594 range.end = line + 1;
595 }
596 else if (line < range.begin)
597 range.begin = line;
598 else if (range.end < line)
599 range.end = line;
600
601 return range;
602}
603
604/* Return non-zero if RANGE is empty, zero otherwise. */
605
606static int
607btrace_line_range_is_empty (struct btrace_line_range range)
608{
609 return range.end <= range.begin;
610}
611
612/* Return non-zero if LHS contains RHS, zero otherwise. */
613
614static int
615btrace_line_range_contains_range (struct btrace_line_range lhs,
616 struct btrace_line_range rhs)
617{
618 return ((lhs.symtab == rhs.symtab)
619 && (lhs.begin <= rhs.begin)
620 && (rhs.end <= lhs.end));
621}
622
623/* Find the line range associated with PC. */
624
625static struct btrace_line_range
626btrace_find_line_range (CORE_ADDR pc)
627{
628 struct btrace_line_range range;
629 struct linetable_entry *lines;
630 struct linetable *ltable;
631 struct symtab *symtab;
632 int nlines, i;
633
634 symtab = find_pc_line_symtab (pc);
635 if (symtab == NULL)
636 return btrace_mk_line_range (NULL, 0, 0);
637
638 ltable = SYMTAB_LINETABLE (symtab);
639 if (ltable == NULL)
640 return btrace_mk_line_range (symtab, 0, 0);
641
642 nlines = ltable->nitems;
643 lines = ltable->item;
644 if (nlines <= 0)
645 return btrace_mk_line_range (symtab, 0, 0);
646
647 range = btrace_mk_line_range (symtab, 0, 0);
648 for (i = 0; i < nlines - 1; i++)
649 {
650 if ((lines[i].pc == pc) && (lines[i].line != 0))
651 range = btrace_line_range_add (range, lines[i].line);
652 }
653
654 return range;
655}
656
657/* Print source lines in LINES to UIOUT.
658
659 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
660 instructions corresponding to that source line. When printing a new source
661 line, we do the cleanups for the open chain and open a new cleanup chain for
662 the new source line. If the source line range in LINES is not empty, this
663 function will leave the cleanup chain for the last printed source line open
664 so instructions can be added to it. */
665
666static void
667btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
668 struct cleanup **ui_item_chain, int flags)
669{
8d297bbf 670 print_source_lines_flags psl_flags;
f94cc897
MM
671 int line;
672
673 psl_flags = 0;
674 if (flags & DISASSEMBLY_FILENAME)
675 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
676
677 for (line = lines.begin; line < lines.end; ++line)
678 {
679 if (*ui_item_chain != NULL)
680 do_cleanups (*ui_item_chain);
681
682 *ui_item_chain
683 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
684
685 print_source_lines (lines.symtab, line, line + 1, psl_flags);
686
687 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
688 }
689}
690
afedecd3
MM
691/* Disassemble a section of the recorded instruction trace. */
692
693static void
23a7fe75 694btrace_insn_history (struct ui_out *uiout,
31fd9caa 695 const struct btrace_thread_info *btinfo,
23a7fe75
MM
696 const struct btrace_insn_iterator *begin,
697 const struct btrace_insn_iterator *end, int flags)
afedecd3 698{
f94cc897
MM
699 struct ui_file *stb;
700 struct cleanup *cleanups, *ui_item_chain;
701 struct disassemble_info di;
afedecd3 702 struct gdbarch *gdbarch;
23a7fe75 703 struct btrace_insn_iterator it;
f94cc897 704 struct btrace_line_range last_lines;
afedecd3 705
23a7fe75
MM
706 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
707 btrace_insn_number (end));
afedecd3 708
f94cc897
MM
709 flags |= DISASSEMBLY_SPECULATIVE;
710
afedecd3 711 gdbarch = target_gdbarch ();
f94cc897
MM
712 stb = mem_fileopen ();
713 cleanups = make_cleanup_ui_file_delete (stb);
714 di = gdb_disassemble_info (gdbarch, stb);
715 last_lines = btrace_mk_line_range (NULL, 0, 0);
716
717 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
718
719 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
720 instructions corresponding to that line. */
721 ui_item_chain = NULL;
afedecd3 722
23a7fe75 723 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 724 {
23a7fe75
MM
725 const struct btrace_insn *insn;
726
727 insn = btrace_insn_get (&it);
728
31fd9caa
MM
729 /* A NULL instruction indicates a gap in the trace. */
730 if (insn == NULL)
731 {
732 const struct btrace_config *conf;
733
734 conf = btrace_conf (btinfo);
afedecd3 735
31fd9caa
MM
736 /* We have trace so we must have a configuration. */
737 gdb_assert (conf != NULL);
738
739 btrace_ui_out_decode_error (uiout, it.function->errcode,
740 conf->format);
741 }
742 else
743 {
f94cc897 744 struct disasm_insn dinsn;
da8c46d2 745
f94cc897 746 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 747 {
f94cc897
MM
748 struct btrace_line_range lines;
749
750 lines = btrace_find_line_range (insn->pc);
751 if (!btrace_line_range_is_empty (lines)
752 && !btrace_line_range_contains_range (last_lines, lines))
753 {
754 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
755 last_lines = lines;
756 }
757 else if (ui_item_chain == NULL)
758 {
759 ui_item_chain
760 = make_cleanup_ui_out_tuple_begin_end (uiout,
761 "src_and_asm_line");
762 /* No source information. */
763 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
764 }
765
766 gdb_assert (ui_item_chain != NULL);
da8c46d2 767 }
da8c46d2 768
f94cc897
MM
769 memset (&dinsn, 0, sizeof (dinsn));
770 dinsn.number = btrace_insn_number (&it);
771 dinsn.addr = insn->pc;
31fd9caa 772
da8c46d2 773 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 774 dinsn.is_speculative = 1;
da8c46d2 775
f94cc897 776 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 777 }
afedecd3 778 }
f94cc897
MM
779
780 do_cleanups (cleanups);
afedecd3
MM
781}
782
783/* The to_insn_history method of target record-btrace. */
784
785static void
7a6c5609 786record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
787{
788 struct btrace_thread_info *btinfo;
23a7fe75
MM
789 struct btrace_insn_history *history;
790 struct btrace_insn_iterator begin, end;
afedecd3
MM
791 struct cleanup *uiout_cleanup;
792 struct ui_out *uiout;
23a7fe75 793 unsigned int context, covered;
afedecd3
MM
794
795 uiout = current_uiout;
796 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
797 "insn history");
afedecd3 798 context = abs (size);
afedecd3
MM
799 if (context == 0)
800 error (_("Bad record instruction-history-size."));
801
23a7fe75
MM
802 btinfo = require_btrace ();
803 history = btinfo->insn_history;
804 if (history == NULL)
afedecd3 805 {
07bbe694 806 struct btrace_insn_iterator *replay;
afedecd3 807
23a7fe75 808 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 809
07bbe694
MM
810 /* If we're replaying, we start at the replay position. Otherwise, we
811 start at the tail of the trace. */
812 replay = btinfo->replay;
813 if (replay != NULL)
814 begin = *replay;
815 else
816 btrace_insn_end (&begin, btinfo);
817
818 /* We start from here and expand in the requested direction. Then we
819 expand in the other direction, as well, to fill up any remaining
820 context. */
821 end = begin;
822 if (size < 0)
823 {
824 /* We want the current position covered, as well. */
825 covered = btrace_insn_next (&end, 1);
826 covered += btrace_insn_prev (&begin, context - covered);
827 covered += btrace_insn_next (&end, context - covered);
828 }
829 else
830 {
831 covered = btrace_insn_next (&end, context);
832 covered += btrace_insn_prev (&begin, context - covered);
833 }
afedecd3
MM
834 }
835 else
836 {
23a7fe75
MM
837 begin = history->begin;
838 end = history->end;
afedecd3 839
23a7fe75
MM
840 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
841 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 842
23a7fe75
MM
843 if (size < 0)
844 {
845 end = begin;
846 covered = btrace_insn_prev (&begin, context);
847 }
848 else
849 {
850 begin = end;
851 covered = btrace_insn_next (&end, context);
852 }
afedecd3
MM
853 }
854
23a7fe75 855 if (covered > 0)
31fd9caa 856 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
857 else
858 {
859 if (size < 0)
860 printf_unfiltered (_("At the start of the branch trace record.\n"));
861 else
862 printf_unfiltered (_("At the end of the branch trace record.\n"));
863 }
afedecd3 864
23a7fe75 865 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
866 do_cleanups (uiout_cleanup);
867}
868
869/* The to_insn_history_range method of target record-btrace. */
870
871static void
4e99c6b7
TT
872record_btrace_insn_history_range (struct target_ops *self,
873 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
874{
875 struct btrace_thread_info *btinfo;
23a7fe75
MM
876 struct btrace_insn_history *history;
877 struct btrace_insn_iterator begin, end;
afedecd3
MM
878 struct cleanup *uiout_cleanup;
879 struct ui_out *uiout;
23a7fe75
MM
880 unsigned int low, high;
881 int found;
afedecd3
MM
882
883 uiout = current_uiout;
884 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
885 "insn history");
23a7fe75
MM
886 low = from;
887 high = to;
afedecd3 888
23a7fe75 889 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
890
891 /* Check for wrap-arounds. */
23a7fe75 892 if (low != from || high != to)
afedecd3
MM
893 error (_("Bad range."));
894
0688d04e 895 if (high < low)
afedecd3
MM
896 error (_("Bad range."));
897
23a7fe75 898 btinfo = require_btrace ();
afedecd3 899
23a7fe75
MM
900 found = btrace_find_insn_by_number (&begin, btinfo, low);
901 if (found == 0)
902 error (_("Range out of bounds."));
afedecd3 903
23a7fe75
MM
904 found = btrace_find_insn_by_number (&end, btinfo, high);
905 if (found == 0)
0688d04e
MM
906 {
907 /* Silently truncate the range. */
908 btrace_insn_end (&end, btinfo);
909 }
910 else
911 {
912 /* We want both begin and end to be inclusive. */
913 btrace_insn_next (&end, 1);
914 }
afedecd3 915
31fd9caa 916 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 917 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
918
919 do_cleanups (uiout_cleanup);
920}
921
922/* The to_insn_history_from method of target record-btrace. */
923
924static void
9abc3ff3
TT
925record_btrace_insn_history_from (struct target_ops *self,
926 ULONGEST from, int size, int flags)
afedecd3
MM
927{
928 ULONGEST begin, end, context;
929
930 context = abs (size);
0688d04e
MM
931 if (context == 0)
932 error (_("Bad record instruction-history-size."));
afedecd3
MM
933
934 if (size < 0)
935 {
936 end = from;
937
938 if (from < context)
939 begin = 0;
940 else
0688d04e 941 begin = from - context + 1;
afedecd3
MM
942 }
943 else
944 {
945 begin = from;
0688d04e 946 end = from + context - 1;
afedecd3
MM
947
948 /* Check for wrap-around. */
949 if (end < begin)
950 end = ULONGEST_MAX;
951 }
952
4e99c6b7 953 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
954}
955
956/* Print the instruction number range for a function call history line. */
957
958static void
23a7fe75
MM
959btrace_call_history_insn_range (struct ui_out *uiout,
960 const struct btrace_function *bfun)
afedecd3 961{
7acbe133
MM
962 unsigned int begin, end, size;
963
964 size = VEC_length (btrace_insn_s, bfun->insn);
965 gdb_assert (size > 0);
afedecd3 966
23a7fe75 967 begin = bfun->insn_offset;
7acbe133 968 end = begin + size - 1;
afedecd3 969
23a7fe75 970 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 971 ui_out_text (uiout, ",");
23a7fe75 972 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
973}
974
ce0dfbea
MM
975/* Compute the lowest and highest source line for the instructions in BFUN
976 and return them in PBEGIN and PEND.
977 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
978 result from inlining or macro expansion. */
979
980static void
981btrace_compute_src_line_range (const struct btrace_function *bfun,
982 int *pbegin, int *pend)
983{
984 struct btrace_insn *insn;
985 struct symtab *symtab;
986 struct symbol *sym;
987 unsigned int idx;
988 int begin, end;
989
990 begin = INT_MAX;
991 end = INT_MIN;
992
993 sym = bfun->sym;
994 if (sym == NULL)
995 goto out;
996
997 symtab = symbol_symtab (sym);
998
999 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1000 {
1001 struct symtab_and_line sal;
1002
1003 sal = find_pc_line (insn->pc, 0);
1004 if (sal.symtab != symtab || sal.line == 0)
1005 continue;
1006
325fac50
PA
1007 begin = std::min (begin, sal.line);
1008 end = std::max (end, sal.line);
ce0dfbea
MM
1009 }
1010
1011 out:
1012 *pbegin = begin;
1013 *pend = end;
1014}
1015
afedecd3
MM
1016/* Print the source line information for a function call history line. */
1017
1018static void
23a7fe75
MM
1019btrace_call_history_src_line (struct ui_out *uiout,
1020 const struct btrace_function *bfun)
afedecd3
MM
1021{
1022 struct symbol *sym;
23a7fe75 1023 int begin, end;
afedecd3
MM
1024
1025 sym = bfun->sym;
1026 if (sym == NULL)
1027 return;
1028
1029 ui_out_field_string (uiout, "file",
08be3fe3 1030 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1031
ce0dfbea 1032 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1033 if (end < begin)
afedecd3
MM
1034 return;
1035
1036 ui_out_text (uiout, ":");
23a7fe75 1037 ui_out_field_int (uiout, "min line", begin);
afedecd3 1038
23a7fe75 1039 if (end == begin)
afedecd3
MM
1040 return;
1041
8710b709 1042 ui_out_text (uiout, ",");
23a7fe75 1043 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1044}
1045
0b722aec
MM
1046/* Get the name of a branch trace function. */
1047
1048static const char *
1049btrace_get_bfun_name (const struct btrace_function *bfun)
1050{
1051 struct minimal_symbol *msym;
1052 struct symbol *sym;
1053
1054 if (bfun == NULL)
1055 return "??";
1056
1057 msym = bfun->msym;
1058 sym = bfun->sym;
1059
1060 if (sym != NULL)
1061 return SYMBOL_PRINT_NAME (sym);
1062 else if (msym != NULL)
efd66ac6 1063 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1064 else
1065 return "??";
1066}
1067
afedecd3
MM
1068/* Disassemble a section of the recorded function trace. */
1069
1070static void
23a7fe75 1071btrace_call_history (struct ui_out *uiout,
8710b709 1072 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1073 const struct btrace_call_iterator *begin,
1074 const struct btrace_call_iterator *end,
8d297bbf 1075 int int_flags)
afedecd3 1076{
23a7fe75 1077 struct btrace_call_iterator it;
8d297bbf 1078 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1079
8d297bbf 1080 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1081 btrace_call_number (end));
afedecd3 1082
23a7fe75 1083 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1084 {
23a7fe75
MM
1085 const struct btrace_function *bfun;
1086 struct minimal_symbol *msym;
1087 struct symbol *sym;
1088
1089 bfun = btrace_call_get (&it);
23a7fe75 1090 sym = bfun->sym;
0b722aec 1091 msym = bfun->msym;
23a7fe75 1092
afedecd3 1093 /* Print the function index. */
23a7fe75 1094 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1095 ui_out_text (uiout, "\t");
1096
31fd9caa
MM
1097 /* Indicate gaps in the trace. */
1098 if (bfun->errcode != 0)
1099 {
1100 const struct btrace_config *conf;
1101
1102 conf = btrace_conf (btinfo);
1103
1104 /* We have trace so we must have a configuration. */
1105 gdb_assert (conf != NULL);
1106
1107 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1108
1109 continue;
1110 }
1111
8710b709
MM
1112 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1113 {
1114 int level = bfun->level + btinfo->level, i;
1115
1116 for (i = 0; i < level; ++i)
1117 ui_out_text (uiout, " ");
1118 }
1119
1120 if (sym != NULL)
1121 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1122 else if (msym != NULL)
efd66ac6 1123 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1124 else if (!ui_out_is_mi_like_p (uiout))
1125 ui_out_field_string (uiout, "function", "??");
1126
1e038f67 1127 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1128 {
8710b709 1129 ui_out_text (uiout, _("\tinst "));
23a7fe75 1130 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1131 }
1132
1e038f67 1133 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1134 {
8710b709 1135 ui_out_text (uiout, _("\tat "));
23a7fe75 1136 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1137 }
1138
afedecd3
MM
1139 ui_out_text (uiout, "\n");
1140 }
1141}
1142
1143/* The to_call_history method of target record-btrace. */
1144
1145static void
8d297bbf 1146record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1147{
1148 struct btrace_thread_info *btinfo;
23a7fe75
MM
1149 struct btrace_call_history *history;
1150 struct btrace_call_iterator begin, end;
afedecd3
MM
1151 struct cleanup *uiout_cleanup;
1152 struct ui_out *uiout;
23a7fe75 1153 unsigned int context, covered;
8d297bbf 1154 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1155
1156 uiout = current_uiout;
1157 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1158 "insn history");
afedecd3 1159 context = abs (size);
afedecd3
MM
1160 if (context == 0)
1161 error (_("Bad record function-call-history-size."));
1162
23a7fe75
MM
1163 btinfo = require_btrace ();
1164 history = btinfo->call_history;
1165 if (history == NULL)
afedecd3 1166 {
07bbe694 1167 struct btrace_insn_iterator *replay;
afedecd3 1168
8d297bbf 1169 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1170
07bbe694
MM
1171 /* If we're replaying, we start at the replay position. Otherwise, we
1172 start at the tail of the trace. */
1173 replay = btinfo->replay;
1174 if (replay != NULL)
1175 {
1176 begin.function = replay->function;
1177 begin.btinfo = btinfo;
1178 }
1179 else
1180 btrace_call_end (&begin, btinfo);
1181
1182 /* We start from here and expand in the requested direction. Then we
1183 expand in the other direction, as well, to fill up any remaining
1184 context. */
1185 end = begin;
1186 if (size < 0)
1187 {
1188 /* We want the current position covered, as well. */
1189 covered = btrace_call_next (&end, 1);
1190 covered += btrace_call_prev (&begin, context - covered);
1191 covered += btrace_call_next (&end, context - covered);
1192 }
1193 else
1194 {
1195 covered = btrace_call_next (&end, context);
1196 covered += btrace_call_prev (&begin, context- covered);
1197 }
afedecd3
MM
1198 }
1199 else
1200 {
23a7fe75
MM
1201 begin = history->begin;
1202 end = history->end;
afedecd3 1203
8d297bbf 1204 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1205 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1206
23a7fe75
MM
1207 if (size < 0)
1208 {
1209 end = begin;
1210 covered = btrace_call_prev (&begin, context);
1211 }
1212 else
1213 {
1214 begin = end;
1215 covered = btrace_call_next (&end, context);
1216 }
afedecd3
MM
1217 }
1218
23a7fe75 1219 if (covered > 0)
8710b709 1220 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1221 else
1222 {
1223 if (size < 0)
1224 printf_unfiltered (_("At the start of the branch trace record.\n"));
1225 else
1226 printf_unfiltered (_("At the end of the branch trace record.\n"));
1227 }
afedecd3 1228
23a7fe75 1229 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1230 do_cleanups (uiout_cleanup);
1231}
1232
1233/* The to_call_history_range method of target record-btrace. */
1234
1235static void
f0d960ea 1236record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1237 ULONGEST from, ULONGEST to,
1238 int int_flags)
afedecd3
MM
1239{
1240 struct btrace_thread_info *btinfo;
23a7fe75
MM
1241 struct btrace_call_history *history;
1242 struct btrace_call_iterator begin, end;
afedecd3
MM
1243 struct cleanup *uiout_cleanup;
1244 struct ui_out *uiout;
23a7fe75
MM
1245 unsigned int low, high;
1246 int found;
8d297bbf 1247 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1248
1249 uiout = current_uiout;
1250 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1251 "func history");
23a7fe75
MM
1252 low = from;
1253 high = to;
afedecd3 1254
8d297bbf 1255 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1256
1257 /* Check for wrap-arounds. */
23a7fe75 1258 if (low != from || high != to)
afedecd3
MM
1259 error (_("Bad range."));
1260
0688d04e 1261 if (high < low)
afedecd3
MM
1262 error (_("Bad range."));
1263
23a7fe75 1264 btinfo = require_btrace ();
afedecd3 1265
23a7fe75
MM
1266 found = btrace_find_call_by_number (&begin, btinfo, low);
1267 if (found == 0)
1268 error (_("Range out of bounds."));
afedecd3 1269
23a7fe75
MM
1270 found = btrace_find_call_by_number (&end, btinfo, high);
1271 if (found == 0)
0688d04e
MM
1272 {
1273 /* Silently truncate the range. */
1274 btrace_call_end (&end, btinfo);
1275 }
1276 else
1277 {
1278 /* We want both begin and end to be inclusive. */
1279 btrace_call_next (&end, 1);
1280 }
afedecd3 1281
8710b709 1282 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1283 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1284
1285 do_cleanups (uiout_cleanup);
1286}
1287
1288/* The to_call_history_from method of target record-btrace. */
1289
1290static void
ec0aea04 1291record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1292 ULONGEST from, int size,
1293 int int_flags)
afedecd3
MM
1294{
1295 ULONGEST begin, end, context;
8d297bbf 1296 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1297
1298 context = abs (size);
0688d04e
MM
1299 if (context == 0)
1300 error (_("Bad record function-call-history-size."));
afedecd3
MM
1301
1302 if (size < 0)
1303 {
1304 end = from;
1305
1306 if (from < context)
1307 begin = 0;
1308 else
0688d04e 1309 begin = from - context + 1;
afedecd3
MM
1310 }
1311 else
1312 {
1313 begin = from;
0688d04e 1314 end = from + context - 1;
afedecd3
MM
1315
1316 /* Check for wrap-around. */
1317 if (end < begin)
1318 end = ULONGEST_MAX;
1319 }
1320
f0d960ea 1321 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1322}
1323
07bbe694
MM
1324/* The to_record_is_replaying method of target record-btrace. */
1325
1326static int
a52eab48 1327record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1328{
1329 struct thread_info *tp;
1330
034f788c 1331 ALL_NON_EXITED_THREADS (tp)
a52eab48 1332 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1333 return 1;
1334
1335 return 0;
1336}
1337
7ff27e9b
MM
1338/* The to_record_will_replay method of target record-btrace. */
1339
1340static int
1341record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1342{
1343 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1344}
1345
633785ff
MM
1346/* The to_xfer_partial method of target record-btrace. */
1347
9b409511 1348static enum target_xfer_status
633785ff
MM
1349record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1350 const char *annex, gdb_byte *readbuf,
1351 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1352 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1353{
1354 struct target_ops *t;
1355
1356 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1357 if (replay_memory_access == replay_memory_access_read_only
aef92902 1358 && !record_btrace_generating_corefile
4d10e986 1359 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1360 {
1361 switch (object)
1362 {
1363 case TARGET_OBJECT_MEMORY:
1364 {
1365 struct target_section *section;
1366
1367 /* We do not allow writing memory in general. */
1368 if (writebuf != NULL)
9b409511
YQ
1369 {
1370 *xfered_len = len;
bc113b4e 1371 return TARGET_XFER_UNAVAILABLE;
9b409511 1372 }
633785ff
MM
1373
1374 /* We allow reading readonly memory. */
1375 section = target_section_by_addr (ops, offset);
1376 if (section != NULL)
1377 {
1378 /* Check if the section we found is readonly. */
1379 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1380 section->the_bfd_section)
1381 & SEC_READONLY) != 0)
1382 {
1383 /* Truncate the request to fit into this section. */
325fac50 1384 len = std::min (len, section->endaddr - offset);
633785ff
MM
1385 break;
1386 }
1387 }
1388
9b409511 1389 *xfered_len = len;
bc113b4e 1390 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1391 }
1392 }
1393 }
1394
1395 /* Forward the request. */
e75fdfca
TT
1396 ops = ops->beneath;
1397 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1398 offset, len, xfered_len);
633785ff
MM
1399}
1400
1401/* The to_insert_breakpoint method of target record-btrace. */
1402
1403static int
1404record_btrace_insert_breakpoint (struct target_ops *ops,
1405 struct gdbarch *gdbarch,
1406 struct bp_target_info *bp_tgt)
1407{
67b5c0c1
MM
1408 const char *old;
1409 int ret;
633785ff
MM
1410
1411 /* Inserting breakpoints requires accessing memory. Allow it for the
1412 duration of this function. */
67b5c0c1
MM
1413 old = replay_memory_access;
1414 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1415
1416 ret = 0;
492d29ea
PA
1417 TRY
1418 {
1419 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1420 }
492d29ea
PA
1421 CATCH (except, RETURN_MASK_ALL)
1422 {
6c63c96a 1423 replay_memory_access = old;
492d29ea
PA
1424 throw_exception (except);
1425 }
1426 END_CATCH
6c63c96a 1427 replay_memory_access = old;
633785ff
MM
1428
1429 return ret;
1430}
1431
1432/* The to_remove_breakpoint method of target record-btrace. */
1433
1434static int
1435record_btrace_remove_breakpoint (struct target_ops *ops,
1436 struct gdbarch *gdbarch,
73971819
PA
1437 struct bp_target_info *bp_tgt,
1438 enum remove_bp_reason reason)
633785ff 1439{
67b5c0c1
MM
1440 const char *old;
1441 int ret;
633785ff
MM
1442
1443 /* Removing breakpoints requires accessing memory. Allow it for the
1444 duration of this function. */
67b5c0c1
MM
1445 old = replay_memory_access;
1446 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1447
1448 ret = 0;
492d29ea
PA
1449 TRY
1450 {
73971819
PA
1451 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1452 reason);
492d29ea 1453 }
492d29ea
PA
1454 CATCH (except, RETURN_MASK_ALL)
1455 {
6c63c96a 1456 replay_memory_access = old;
492d29ea
PA
1457 throw_exception (except);
1458 }
1459 END_CATCH
6c63c96a 1460 replay_memory_access = old;
633785ff
MM
1461
1462 return ret;
1463}
1464
1f3ef581
MM
1465/* The to_fetch_registers method of target record-btrace. */
1466
1467static void
1468record_btrace_fetch_registers (struct target_ops *ops,
1469 struct regcache *regcache, int regno)
1470{
1471 struct btrace_insn_iterator *replay;
1472 struct thread_info *tp;
1473
1474 tp = find_thread_ptid (inferior_ptid);
1475 gdb_assert (tp != NULL);
1476
1477 replay = tp->btrace.replay;
aef92902 1478 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1479 {
1480 const struct btrace_insn *insn;
1481 struct gdbarch *gdbarch;
1482 int pcreg;
1483
1484 gdbarch = get_regcache_arch (regcache);
1485 pcreg = gdbarch_pc_regnum (gdbarch);
1486 if (pcreg < 0)
1487 return;
1488
1489 /* We can only provide the PC register. */
1490 if (regno >= 0 && regno != pcreg)
1491 return;
1492
1493 insn = btrace_insn_get (replay);
1494 gdb_assert (insn != NULL);
1495
1496 regcache_raw_supply (regcache, regno, &insn->pc);
1497 }
1498 else
1499 {
e75fdfca 1500 struct target_ops *t = ops->beneath;
1f3ef581 1501
e75fdfca 1502 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1503 }
1504}
1505
1506/* The to_store_registers method of target record-btrace. */
1507
1508static void
1509record_btrace_store_registers (struct target_ops *ops,
1510 struct regcache *regcache, int regno)
1511{
1512 struct target_ops *t;
1513
a52eab48 1514 if (!record_btrace_generating_corefile
4d10e986
MM
1515 && record_btrace_is_replaying (ops, inferior_ptid))
1516 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1517
1518 gdb_assert (may_write_registers != 0);
1519
e75fdfca
TT
1520 t = ops->beneath;
1521 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1522}
1523
1524/* The to_prepare_to_store method of target record-btrace. */
1525
1526static void
1527record_btrace_prepare_to_store (struct target_ops *ops,
1528 struct regcache *regcache)
1529{
1530 struct target_ops *t;
1531
a52eab48 1532 if (!record_btrace_generating_corefile
4d10e986 1533 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1534 return;
1535
e75fdfca
TT
1536 t = ops->beneath;
1537 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1538}
1539
0b722aec
MM
1540/* The branch trace frame cache. */
1541
1542struct btrace_frame_cache
1543{
1544 /* The thread. */
1545 struct thread_info *tp;
1546
1547 /* The frame info. */
1548 struct frame_info *frame;
1549
1550 /* The branch trace function segment. */
1551 const struct btrace_function *bfun;
1552};
1553
1554/* A struct btrace_frame_cache hash table indexed by NEXT. */
1555
1556static htab_t bfcache;
1557
1558/* hash_f for htab_create_alloc of bfcache. */
1559
1560static hashval_t
1561bfcache_hash (const void *arg)
1562{
19ba03f4
SM
1563 const struct btrace_frame_cache *cache
1564 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1565
1566 return htab_hash_pointer (cache->frame);
1567}
1568
1569/* eq_f for htab_create_alloc of bfcache. */
1570
1571static int
1572bfcache_eq (const void *arg1, const void *arg2)
1573{
19ba03f4
SM
1574 const struct btrace_frame_cache *cache1
1575 = (const struct btrace_frame_cache *) arg1;
1576 const struct btrace_frame_cache *cache2
1577 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1578
1579 return cache1->frame == cache2->frame;
1580}
1581
1582/* Create a new btrace frame cache. */
1583
1584static struct btrace_frame_cache *
1585bfcache_new (struct frame_info *frame)
1586{
1587 struct btrace_frame_cache *cache;
1588 void **slot;
1589
1590 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1591 cache->frame = frame;
1592
1593 slot = htab_find_slot (bfcache, cache, INSERT);
1594 gdb_assert (*slot == NULL);
1595 *slot = cache;
1596
1597 return cache;
1598}
1599
1600/* Extract the branch trace function from a branch trace frame. */
1601
1602static const struct btrace_function *
1603btrace_get_frame_function (struct frame_info *frame)
1604{
1605 const struct btrace_frame_cache *cache;
1606 const struct btrace_function *bfun;
1607 struct btrace_frame_cache pattern;
1608 void **slot;
1609
1610 pattern.frame = frame;
1611
1612 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1613 if (slot == NULL)
1614 return NULL;
1615
19ba03f4 1616 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1617 return cache->bfun;
1618}
1619
cecac1ab
MM
1620/* Implement stop_reason method for record_btrace_frame_unwind. */
1621
1622static enum unwind_stop_reason
1623record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1624 void **this_cache)
1625{
0b722aec
MM
1626 const struct btrace_frame_cache *cache;
1627 const struct btrace_function *bfun;
1628
19ba03f4 1629 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1630 bfun = cache->bfun;
1631 gdb_assert (bfun != NULL);
1632
1633 if (bfun->up == NULL)
1634 return UNWIND_UNAVAILABLE;
1635
1636 return UNWIND_NO_REASON;
cecac1ab
MM
1637}
1638
1639/* Implement this_id method for record_btrace_frame_unwind. */
1640
1641static void
1642record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1643 struct frame_id *this_id)
1644{
0b722aec
MM
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun;
1647 CORE_ADDR code, special;
1648
19ba03f4 1649 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1650
1651 bfun = cache->bfun;
1652 gdb_assert (bfun != NULL);
1653
1654 while (bfun->segment.prev != NULL)
1655 bfun = bfun->segment.prev;
1656
1657 code = get_frame_func (this_frame);
1658 special = bfun->number;
1659
1660 *this_id = frame_id_build_unavailable_stack_special (code, special);
1661
1662 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1663 btrace_get_bfun_name (cache->bfun),
1664 core_addr_to_string_nz (this_id->code_addr),
1665 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1666}
1667
1668/* Implement prev_register method for record_btrace_frame_unwind. */
1669
1670static struct value *
1671record_btrace_frame_prev_register (struct frame_info *this_frame,
1672 void **this_cache,
1673 int regnum)
1674{
0b722aec
MM
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun, *caller;
1677 const struct btrace_insn *insn;
1678 struct gdbarch *gdbarch;
1679 CORE_ADDR pc;
1680 int pcreg;
1681
1682 gdbarch = get_frame_arch (this_frame);
1683 pcreg = gdbarch_pc_regnum (gdbarch);
1684 if (pcreg < 0 || regnum != pcreg)
1685 throw_error (NOT_AVAILABLE_ERROR,
1686 _("Registers are not available in btrace record history"));
1687
19ba03f4 1688 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
1692 caller = bfun->up;
1693 if (caller == NULL)
1694 throw_error (NOT_AVAILABLE_ERROR,
1695 _("No caller in btrace record history"));
1696
1697 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1698 {
1699 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1700 pc = insn->pc;
1701 }
1702 else
1703 {
1704 insn = VEC_last (btrace_insn_s, caller->insn);
1705 pc = insn->pc;
1706
1707 pc += gdb_insn_length (gdbarch, pc);
1708 }
1709
1710 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1711 btrace_get_bfun_name (bfun), bfun->level,
1712 core_addr_to_string_nz (pc));
1713
1714 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1715}
1716
1717/* Implement sniffer method for record_btrace_frame_unwind. */
1718
1719static int
1720record_btrace_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1722 void **this_cache)
1723{
0b722aec
MM
1724 const struct btrace_function *bfun;
1725 struct btrace_frame_cache *cache;
cecac1ab 1726 struct thread_info *tp;
0b722aec 1727 struct frame_info *next;
cecac1ab
MM
1728
1729 /* THIS_FRAME does not contain a reference to its thread. */
1730 tp = find_thread_ptid (inferior_ptid);
1731 gdb_assert (tp != NULL);
1732
0b722aec
MM
1733 bfun = NULL;
1734 next = get_next_frame (this_frame);
1735 if (next == NULL)
1736 {
1737 const struct btrace_insn_iterator *replay;
1738
1739 replay = tp->btrace.replay;
1740 if (replay != NULL)
1741 bfun = replay->function;
1742 }
1743 else
1744 {
1745 const struct btrace_function *callee;
1746
1747 callee = btrace_get_frame_function (next);
1748 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1749 bfun = callee->up;
1750 }
1751
1752 if (bfun == NULL)
1753 return 0;
1754
1755 DEBUG ("[frame] sniffed frame for %s on level %d",
1756 btrace_get_bfun_name (bfun), bfun->level);
1757
1758 /* This is our frame. Initialize the frame cache. */
1759 cache = bfcache_new (this_frame);
1760 cache->tp = tp;
1761 cache->bfun = bfun;
1762
1763 *this_cache = cache;
1764 return 1;
1765}
1766
1767/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1768
1769static int
1770record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1772 void **this_cache)
1773{
1774 const struct btrace_function *bfun, *callee;
1775 struct btrace_frame_cache *cache;
1776 struct frame_info *next;
1777
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 return 0;
1781
1782 callee = btrace_get_frame_function (next);
1783 if (callee == NULL)
1784 return 0;
1785
1786 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1787 return 0;
1788
1789 bfun = callee->up;
1790 if (bfun == NULL)
1791 return 0;
1792
1793 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1794 btrace_get_bfun_name (bfun), bfun->level);
1795
1796 /* This is our frame. Initialize the frame cache. */
1797 cache = bfcache_new (this_frame);
1798 cache->tp = find_thread_ptid (inferior_ptid);
1799 cache->bfun = bfun;
1800
1801 *this_cache = cache;
1802 return 1;
1803}
1804
1805static void
1806record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1807{
1808 struct btrace_frame_cache *cache;
1809 void **slot;
1810
19ba03f4 1811 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1812
1813 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1814 gdb_assert (slot != NULL);
1815
1816 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1817}
1818
1819/* btrace recording does not store previous memory content, neither the stack
1820 frames content. Any unwinding would return errorneous results as the stack
1821 contents no longer matches the changed PC value restored from history.
1822 Therefore this unwinder reports any possibly unwound registers as
1823 <unavailable>. */
1824
0b722aec 1825const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1826{
1827 NORMAL_FRAME,
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1831 NULL,
0b722aec
MM
1832 record_btrace_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1834};
1835
1836const struct frame_unwind record_btrace_tailcall_frame_unwind =
1837{
1838 TAILCALL_FRAME,
1839 record_btrace_frame_unwind_stop_reason,
1840 record_btrace_frame_this_id,
1841 record_btrace_frame_prev_register,
1842 NULL,
1843 record_btrace_tailcall_frame_sniffer,
1844 record_btrace_frame_dealloc_cache
cecac1ab 1845};
b2f4cfde 1846
ac01945b
TT
1847/* Implement the to_get_unwinder method. */
1848
1849static const struct frame_unwind *
1850record_btrace_to_get_unwinder (struct target_ops *self)
1851{
1852 return &record_btrace_frame_unwind;
1853}
1854
1855/* Implement the to_get_tailcall_unwinder method. */
1856
1857static const struct frame_unwind *
1858record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1859{
1860 return &record_btrace_tailcall_frame_unwind;
1861}
1862
987e68b1
MM
1863/* Return a human-readable string for FLAG. */
1864
1865static const char *
1866btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1867{
1868 switch (flag)
1869 {
1870 case BTHR_STEP:
1871 return "step";
1872
1873 case BTHR_RSTEP:
1874 return "reverse-step";
1875
1876 case BTHR_CONT:
1877 return "cont";
1878
1879 case BTHR_RCONT:
1880 return "reverse-cont";
1881
1882 case BTHR_STOP:
1883 return "stop";
1884 }
1885
1886 return "<invalid>";
1887}
1888
52834460
MM
1889/* Indicate that TP should be resumed according to FLAG. */
1890
1891static void
1892record_btrace_resume_thread (struct thread_info *tp,
1893 enum btrace_thread_flag flag)
1894{
1895 struct btrace_thread_info *btinfo;
1896
43792cf0 1897 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1898 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1899
1900 btinfo = &tp->btrace;
1901
52834460
MM
1902 /* Fetch the latest branch trace. */
1903 btrace_fetch (tp);
1904
0ca912df
MM
1905 /* A resume request overwrites a preceding resume or stop request. */
1906 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1907 btinfo->flags |= flag;
1908}
1909
ec71cc2f
MM
1910/* Get the current frame for TP. */
1911
1912static struct frame_info *
1913get_thread_current_frame (struct thread_info *tp)
1914{
1915 struct frame_info *frame;
1916 ptid_t old_inferior_ptid;
1917 int executing;
1918
1919 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1920 old_inferior_ptid = inferior_ptid;
1921 inferior_ptid = tp->ptid;
1922
1923 /* Clear the executing flag to allow changes to the current frame.
1924 We are not actually running, yet. We just started a reverse execution
1925 command or a record goto command.
1926 For the latter, EXECUTING is false and this has no effect.
1927 For the former, EXECUTING is true and we're in to_wait, about to
1928 move the thread. Since we need to recompute the stack, we temporarily
1929 set EXECUTING to flase. */
1930 executing = is_executing (inferior_ptid);
1931 set_executing (inferior_ptid, 0);
1932
1933 frame = NULL;
1934 TRY
1935 {
1936 frame = get_current_frame ();
1937 }
1938 CATCH (except, RETURN_MASK_ALL)
1939 {
1940 /* Restore the previous execution state. */
1941 set_executing (inferior_ptid, executing);
1942
1943 /* Restore the previous inferior_ptid. */
1944 inferior_ptid = old_inferior_ptid;
1945
1946 throw_exception (except);
1947 }
1948 END_CATCH
1949
1950 /* Restore the previous execution state. */
1951 set_executing (inferior_ptid, executing);
1952
1953 /* Restore the previous inferior_ptid. */
1954 inferior_ptid = old_inferior_ptid;
1955
1956 return frame;
1957}
1958
52834460
MM
1959/* Start replaying a thread. */
1960
1961static struct btrace_insn_iterator *
1962record_btrace_start_replaying (struct thread_info *tp)
1963{
52834460
MM
1964 struct btrace_insn_iterator *replay;
1965 struct btrace_thread_info *btinfo;
52834460
MM
1966
1967 btinfo = &tp->btrace;
1968 replay = NULL;
1969
1970 /* We can't start replaying without trace. */
1971 if (btinfo->begin == NULL)
1972 return NULL;
1973
52834460
MM
1974 /* GDB stores the current frame_id when stepping in order to detects steps
1975 into subroutines.
1976 Since frames are computed differently when we're replaying, we need to
1977 recompute those stored frames and fix them up so we can still detect
1978 subroutines after we started replaying. */
492d29ea 1979 TRY
52834460
MM
1980 {
1981 struct frame_info *frame;
1982 struct frame_id frame_id;
1983 int upd_step_frame_id, upd_step_stack_frame_id;
1984
1985 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1986 frame = get_thread_current_frame (tp);
52834460
MM
1987 frame_id = get_frame_id (frame);
1988
1989 /* Check if we need to update any stepping-related frame id's. */
1990 upd_step_frame_id = frame_id_eq (frame_id,
1991 tp->control.step_frame_id);
1992 upd_step_stack_frame_id = frame_id_eq (frame_id,
1993 tp->control.step_stack_frame_id);
1994
1995 /* We start replaying at the end of the branch trace. This corresponds
1996 to the current instruction. */
8d749320 1997 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1998 btrace_insn_end (replay, btinfo);
1999
31fd9caa
MM
2000 /* Skip gaps at the end of the trace. */
2001 while (btrace_insn_get (replay) == NULL)
2002 {
2003 unsigned int steps;
2004
2005 steps = btrace_insn_prev (replay, 1);
2006 if (steps == 0)
2007 error (_("No trace."));
2008 }
2009
52834460
MM
2010 /* We're not replaying, yet. */
2011 gdb_assert (btinfo->replay == NULL);
2012 btinfo->replay = replay;
2013
2014 /* Make sure we're not using any stale registers. */
2015 registers_changed_ptid (tp->ptid);
2016
2017 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2018 frame = get_thread_current_frame (tp);
52834460
MM
2019 frame_id = get_frame_id (frame);
2020
2021 /* Replace stepping related frames where necessary. */
2022 if (upd_step_frame_id)
2023 tp->control.step_frame_id = frame_id;
2024 if (upd_step_stack_frame_id)
2025 tp->control.step_stack_frame_id = frame_id;
2026 }
492d29ea 2027 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2028 {
2029 xfree (btinfo->replay);
2030 btinfo->replay = NULL;
2031
2032 registers_changed_ptid (tp->ptid);
2033
2034 throw_exception (except);
2035 }
492d29ea 2036 END_CATCH
52834460
MM
2037
2038 return replay;
2039}
2040
2041/* Stop replaying a thread. */
2042
2043static void
2044record_btrace_stop_replaying (struct thread_info *tp)
2045{
2046 struct btrace_thread_info *btinfo;
2047
2048 btinfo = &tp->btrace;
2049
2050 xfree (btinfo->replay);
2051 btinfo->replay = NULL;
2052
2053 /* Make sure we're not leaving any stale registers. */
2054 registers_changed_ptid (tp->ptid);
2055}
2056
e3cfc1c7
MM
2057/* Stop replaying TP if it is at the end of its execution history. */
2058
2059static void
2060record_btrace_stop_replaying_at_end (struct thread_info *tp)
2061{
2062 struct btrace_insn_iterator *replay, end;
2063 struct btrace_thread_info *btinfo;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return;
2070
2071 btrace_insn_end (&end, btinfo);
2072
2073 if (btrace_insn_cmp (replay, &end) == 0)
2074 record_btrace_stop_replaying (tp);
2075}
2076
b2f4cfde
MM
2077/* The to_resume method of target record-btrace. */
2078
2079static void
2080record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2081 enum gdb_signal signal)
2082{
0ca912df 2083 struct thread_info *tp;
d2939ba2 2084 enum btrace_thread_flag flag, cflag;
52834460 2085
987e68b1
MM
2086 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2087 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2088 step ? "step" : "cont");
52834460 2089
0ca912df
MM
2090 /* Store the execution direction of the last resume.
2091
2092 If there is more than one to_resume call, we have to rely on infrun
2093 to not change the execution direction in-between. */
70ad5bff
MM
2094 record_btrace_resume_exec_dir = execution_direction;
2095
0ca912df 2096 /* As long as we're not replaying, just forward the request.
52834460 2097
0ca912df
MM
2098 For non-stop targets this means that no thread is replaying. In order to
2099 make progress, we may need to explicitly move replaying threads to the end
2100 of their execution history. */
a52eab48
MM
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2103 {
e75fdfca 2104 ops = ops->beneath;
04c4fe8c
MM
2105 ops->to_resume (ops, ptid, step, signal);
2106 return;
b2f4cfde
MM
2107 }
2108
52834460 2109 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2110 if (execution_direction == EXEC_REVERSE)
2111 {
2112 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2113 cflag = BTHR_RCONT;
2114 }
52834460 2115 else
d2939ba2
MM
2116 {
2117 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2118 cflag = BTHR_CONT;
2119 }
52834460 2120
52834460 2121 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2122 record_btrace_wait below.
2123
2124 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2125 if (!target_is_non_stop_p ())
2126 {
2127 gdb_assert (ptid_match (inferior_ptid, ptid));
2128
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2131 {
2132 if (ptid_match (tp->ptid, inferior_ptid))
2133 record_btrace_resume_thread (tp, flag);
2134 else
2135 record_btrace_resume_thread (tp, cflag);
2136 }
2137 }
2138 else
2139 {
2140 ALL_NON_EXITED_THREADS (tp)
2141 if (ptid_match (tp->ptid, ptid))
2142 record_btrace_resume_thread (tp, flag);
2143 }
70ad5bff
MM
2144
2145 /* Async support. */
2146 if (target_can_async_p ())
2147 {
6a3753b3 2148 target_async (1);
70ad5bff
MM
2149 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2150 }
52834460
MM
2151}
2152
987e68b1
MM
2153/* Cancel resuming TP. */
2154
2155static void
2156record_btrace_cancel_resume (struct thread_info *tp)
2157{
2158 enum btrace_thread_flag flags;
2159
2160 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2161 if (flags == 0)
2162 return;
2163
43792cf0
PA
2164 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2165 print_thread_id (tp),
987e68b1
MM
2166 target_pid_to_str (tp->ptid), flags,
2167 btrace_thread_flag_to_str (flags));
2168
2169 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2170 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2171}
2172
2173/* Return a target_waitstatus indicating that we ran out of history. */
2174
2175static struct target_waitstatus
2176btrace_step_no_history (void)
2177{
2178 struct target_waitstatus status;
2179
2180 status.kind = TARGET_WAITKIND_NO_HISTORY;
2181
2182 return status;
2183}
2184
2185/* Return a target_waitstatus indicating that a step finished. */
2186
2187static struct target_waitstatus
2188btrace_step_stopped (void)
2189{
2190 struct target_waitstatus status;
2191
2192 status.kind = TARGET_WAITKIND_STOPPED;
2193 status.value.sig = GDB_SIGNAL_TRAP;
2194
2195 return status;
2196}
2197
6e4879f0
MM
2198/* Return a target_waitstatus indicating that a thread was stopped as
2199 requested. */
2200
2201static struct target_waitstatus
2202btrace_step_stopped_on_request (void)
2203{
2204 struct target_waitstatus status;
2205
2206 status.kind = TARGET_WAITKIND_STOPPED;
2207 status.value.sig = GDB_SIGNAL_0;
2208
2209 return status;
2210}
2211
d825d248
MM
2212/* Return a target_waitstatus indicating a spurious stop. */
2213
2214static struct target_waitstatus
2215btrace_step_spurious (void)
2216{
2217 struct target_waitstatus status;
2218
2219 status.kind = TARGET_WAITKIND_SPURIOUS;
2220
2221 return status;
2222}
2223
e3cfc1c7
MM
2224/* Return a target_waitstatus indicating that the thread was not resumed. */
2225
2226static struct target_waitstatus
2227btrace_step_no_resumed (void)
2228{
2229 struct target_waitstatus status;
2230
2231 status.kind = TARGET_WAITKIND_NO_RESUMED;
2232
2233 return status;
2234}
2235
2236/* Return a target_waitstatus indicating that we should wait again. */
2237
2238static struct target_waitstatus
2239btrace_step_again (void)
2240{
2241 struct target_waitstatus status;
2242
2243 status.kind = TARGET_WAITKIND_IGNORE;
2244
2245 return status;
2246}
2247
52834460
MM
2248/* Clear the record histories. */
2249
2250static void
2251record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2252{
2253 xfree (btinfo->insn_history);
2254 xfree (btinfo->call_history);
2255
2256 btinfo->insn_history = NULL;
2257 btinfo->call_history = NULL;
2258}
2259
3c615f99
MM
2260/* Check whether TP's current replay position is at a breakpoint. */
2261
2262static int
2263record_btrace_replay_at_breakpoint (struct thread_info *tp)
2264{
2265 struct btrace_insn_iterator *replay;
2266 struct btrace_thread_info *btinfo;
2267 const struct btrace_insn *insn;
2268 struct inferior *inf;
2269
2270 btinfo = &tp->btrace;
2271 replay = btinfo->replay;
2272
2273 if (replay == NULL)
2274 return 0;
2275
2276 insn = btrace_insn_get (replay);
2277 if (insn == NULL)
2278 return 0;
2279
2280 inf = find_inferior_ptid (tp->ptid);
2281 if (inf == NULL)
2282 return 0;
2283
2284 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2285 &btinfo->stop_reason);
2286}
2287
d825d248 2288/* Step one instruction in forward direction. */
52834460
MM
2289
2290static struct target_waitstatus
d825d248 2291record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2292{
2293 struct btrace_insn_iterator *replay, end;
2294 struct btrace_thread_info *btinfo;
52834460 2295
d825d248
MM
2296 btinfo = &tp->btrace;
2297 replay = btinfo->replay;
2298
2299 /* We're done if we're not replaying. */
2300 if (replay == NULL)
2301 return btrace_step_no_history ();
2302
011c71b6
MM
2303 /* Check if we're stepping a breakpoint. */
2304 if (record_btrace_replay_at_breakpoint (tp))
2305 return btrace_step_stopped ();
2306
d825d248
MM
2307 /* Skip gaps during replay. */
2308 do
2309 {
2310 unsigned int steps;
2311
e3cfc1c7
MM
2312 /* We will bail out here if we continue stepping after reaching the end
2313 of the execution history. */
d825d248
MM
2314 steps = btrace_insn_next (replay, 1);
2315 if (steps == 0)
e3cfc1c7 2316 return btrace_step_no_history ();
d825d248
MM
2317 }
2318 while (btrace_insn_get (replay) == NULL);
2319
2320 /* Determine the end of the instruction trace. */
2321 btrace_insn_end (&end, btinfo);
2322
e3cfc1c7
MM
2323 /* The execution trace contains (and ends with) the current instruction.
2324 This instruction has not been executed, yet, so the trace really ends
2325 one instruction earlier. */
d825d248 2326 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2327 return btrace_step_no_history ();
d825d248
MM
2328
2329 return btrace_step_spurious ();
2330}
2331
2332/* Step one instruction in backward direction. */
2333
2334static struct target_waitstatus
2335record_btrace_single_step_backward (struct thread_info *tp)
2336{
2337 struct btrace_insn_iterator *replay;
2338 struct btrace_thread_info *btinfo;
e59fa00f 2339
52834460
MM
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
d825d248
MM
2343 /* Start replaying if we're not already doing so. */
2344 if (replay == NULL)
2345 replay = record_btrace_start_replaying (tp);
2346
2347 /* If we can't step any further, we reached the end of the history.
2348 Skip gaps during replay. */
2349 do
2350 {
2351 unsigned int steps;
2352
2353 steps = btrace_insn_prev (replay, 1);
2354 if (steps == 0)
2355 return btrace_step_no_history ();
2356 }
2357 while (btrace_insn_get (replay) == NULL);
2358
011c71b6
MM
2359 /* Check if we're stepping a breakpoint.
2360
2361 For reverse-stepping, this check is after the step. There is logic in
2362 infrun.c that handles reverse-stepping separately. See, for example,
2363 proceed and adjust_pc_after_break.
2364
2365 This code assumes that for reverse-stepping, PC points to the last
2366 de-executed instruction, whereas for forward-stepping PC points to the
2367 next to-be-executed instruction. */
2368 if (record_btrace_replay_at_breakpoint (tp))
2369 return btrace_step_stopped ();
2370
d825d248
MM
2371 return btrace_step_spurious ();
2372}
2373
2374/* Step a single thread. */
2375
2376static struct target_waitstatus
2377record_btrace_step_thread (struct thread_info *tp)
2378{
2379 struct btrace_thread_info *btinfo;
2380 struct target_waitstatus status;
2381 enum btrace_thread_flag flags;
2382
2383 btinfo = &tp->btrace;
2384
6e4879f0
MM
2385 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2386 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2387
43792cf0 2388 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2389 target_pid_to_str (tp->ptid), flags,
2390 btrace_thread_flag_to_str (flags));
52834460 2391
6e4879f0
MM
2392 /* We can't step without an execution history. */
2393 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2394 return btrace_step_no_history ();
2395
52834460
MM
2396 switch (flags)
2397 {
2398 default:
2399 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2400
6e4879f0
MM
2401 case BTHR_STOP:
2402 return btrace_step_stopped_on_request ();
2403
52834460 2404 case BTHR_STEP:
d825d248
MM
2405 status = record_btrace_single_step_forward (tp);
2406 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2407 break;
52834460
MM
2408
2409 return btrace_step_stopped ();
2410
2411 case BTHR_RSTEP:
d825d248
MM
2412 status = record_btrace_single_step_backward (tp);
2413 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2414 break;
52834460
MM
2415
2416 return btrace_step_stopped ();
2417
2418 case BTHR_CONT:
e3cfc1c7
MM
2419 status = record_btrace_single_step_forward (tp);
2420 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2421 break;
52834460 2422
e3cfc1c7
MM
2423 btinfo->flags |= flags;
2424 return btrace_step_again ();
52834460
MM
2425
2426 case BTHR_RCONT:
e3cfc1c7
MM
2427 status = record_btrace_single_step_backward (tp);
2428 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2429 break;
52834460 2430
e3cfc1c7
MM
2431 btinfo->flags |= flags;
2432 return btrace_step_again ();
2433 }
d825d248 2434
e3cfc1c7
MM
2435 /* We keep threads moving at the end of their execution history. The to_wait
2436 method will stop the thread for whom the event is reported. */
2437 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2438 btinfo->flags |= flags;
52834460 2439
e3cfc1c7 2440 return status;
b2f4cfde
MM
2441}
2442
e3cfc1c7
MM
2443/* A vector of threads. */
2444
2445typedef struct thread_info * tp_t;
2446DEF_VEC_P (tp_t);
2447
a6b5be76
MM
2448/* Announce further events if necessary. */
2449
2450static void
2451record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2452 const VEC (tp_t) *no_history)
2453{
2454 int more_moving, more_no_history;
2455
2456 more_moving = !VEC_empty (tp_t, moving);
2457 more_no_history = !VEC_empty (tp_t, no_history);
2458
2459 if (!more_moving && !more_no_history)
2460 return;
2461
2462 if (more_moving)
2463 DEBUG ("movers pending");
2464
2465 if (more_no_history)
2466 DEBUG ("no-history pending");
2467
2468 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2469}
2470
b2f4cfde
MM
2471/* The to_wait method of target record-btrace. */
2472
2473static ptid_t
2474record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2475 struct target_waitstatus *status, int options)
2476{
e3cfc1c7
MM
2477 VEC (tp_t) *moving, *no_history;
2478 struct thread_info *tp, *eventing;
2479 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2480
2481 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2482
b2f4cfde 2483 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2484 if ((execution_direction != EXEC_REVERSE)
2485 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2486 {
e75fdfca
TT
2487 ops = ops->beneath;
2488 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2489 }
2490
e3cfc1c7
MM
2491 moving = NULL;
2492 no_history = NULL;
2493
2494 make_cleanup (VEC_cleanup (tp_t), &moving);
2495 make_cleanup (VEC_cleanup (tp_t), &no_history);
2496
2497 /* Keep a work list of moving threads. */
2498 ALL_NON_EXITED_THREADS (tp)
2499 if (ptid_match (tp->ptid, ptid)
2500 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2501 VEC_safe_push (tp_t, moving, tp);
2502
2503 if (VEC_empty (tp_t, moving))
52834460 2504 {
e3cfc1c7 2505 *status = btrace_step_no_resumed ();
52834460 2506
e3cfc1c7
MM
2507 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2508 target_waitstatus_to_string (status));
2509
2510 do_cleanups (cleanups);
2511 return null_ptid;
52834460
MM
2512 }
2513
e3cfc1c7
MM
2514 /* Step moving threads one by one, one step each, until either one thread
2515 reports an event or we run out of threads to step.
2516
2517 When stepping more than one thread, chances are that some threads reach
2518 the end of their execution history earlier than others. If we reported
2519 this immediately, all-stop on top of non-stop would stop all threads and
2520 resume the same threads next time. And we would report the same thread
2521 having reached the end of its execution history again.
2522
2523 In the worst case, this would starve the other threads. But even if other
2524 threads would be allowed to make progress, this would result in far too
2525 many intermediate stops.
2526
2527 We therefore delay the reporting of "no execution history" until we have
2528 nothing else to report. By this time, all threads should have moved to
2529 either the beginning or the end of their execution history. There will
2530 be a single user-visible stop. */
2531 eventing = NULL;
2532 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2533 {
2534 unsigned int ix;
2535
2536 ix = 0;
2537 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2538 {
2539 *status = record_btrace_step_thread (tp);
2540
2541 switch (status->kind)
2542 {
2543 case TARGET_WAITKIND_IGNORE:
2544 ix++;
2545 break;
2546
2547 case TARGET_WAITKIND_NO_HISTORY:
2548 VEC_safe_push (tp_t, no_history,
2549 VEC_ordered_remove (tp_t, moving, ix));
2550 break;
2551
2552 default:
2553 eventing = VEC_unordered_remove (tp_t, moving, ix);
2554 break;
2555 }
2556 }
2557 }
2558
2559 if (eventing == NULL)
2560 {
2561 /* We started with at least one moving thread. This thread must have
2562 either stopped or reached the end of its execution history.
2563
2564 In the former case, EVENTING must not be NULL.
2565 In the latter case, NO_HISTORY must not be empty. */
2566 gdb_assert (!VEC_empty (tp_t, no_history));
2567
2568 /* We kept threads moving at the end of their execution history. Stop
2569 EVENTING now that we are going to report its stop. */
2570 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2571 eventing->btrace.flags &= ~BTHR_MOVE;
2572
2573 *status = btrace_step_no_history ();
2574 }
2575
2576 gdb_assert (eventing != NULL);
2577
2578 /* We kept threads replaying at the end of their execution history. Stop
2579 replaying EVENTING now that we are going to report its stop. */
2580 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2581
2582 /* Stop all other threads. */
5953356c 2583 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2584 ALL_NON_EXITED_THREADS (tp)
2585 record_btrace_cancel_resume (tp);
52834460 2586
a6b5be76
MM
2587 /* In async mode, we need to announce further events. */
2588 if (target_is_async_p ())
2589 record_btrace_maybe_mark_async_event (moving, no_history);
2590
52834460 2591 /* Start record histories anew from the current position. */
e3cfc1c7 2592 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2593
2594 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2595 registers_changed_ptid (eventing->ptid);
2596
43792cf0
PA
2597 DEBUG ("wait ended by thread %s (%s): %s",
2598 print_thread_id (eventing),
e3cfc1c7
MM
2599 target_pid_to_str (eventing->ptid),
2600 target_waitstatus_to_string (status));
52834460 2601
e3cfc1c7
MM
2602 do_cleanups (cleanups);
2603 return eventing->ptid;
52834460
MM
2604}
2605
6e4879f0
MM
2606/* The to_stop method of target record-btrace. */
2607
2608static void
2609record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2610{
2611 DEBUG ("stop %s", target_pid_to_str (ptid));
2612
2613 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2614 if ((execution_direction != EXEC_REVERSE)
2615 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2616 {
2617 ops = ops->beneath;
2618 ops->to_stop (ops, ptid);
2619 }
2620 else
2621 {
2622 struct thread_info *tp;
2623
2624 ALL_NON_EXITED_THREADS (tp)
2625 if (ptid_match (tp->ptid, ptid))
2626 {
2627 tp->btrace.flags &= ~BTHR_MOVE;
2628 tp->btrace.flags |= BTHR_STOP;
2629 }
2630 }
2631 }
2632
52834460
MM
2633/* The to_can_execute_reverse method of target record-btrace. */
2634
2635static int
19db3e69 2636record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2637{
2638 return 1;
2639}
2640
9e8915c6 2641/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2642
9e8915c6
PA
2643static int
2644record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2645{
a52eab48 2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2647 {
2648 struct thread_info *tp = inferior_thread ();
2649
2650 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2651 }
2652
2653 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2654}
2655
2656/* The to_supports_stopped_by_sw_breakpoint method of target
2657 record-btrace. */
2658
2659static int
2660record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2661{
a52eab48 2662 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2663 return 1;
2664
2665 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2666}
2667
2668/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2669
2670static int
2671record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2672{
a52eab48 2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2674 {
2675 struct thread_info *tp = inferior_thread ();
2676
2677 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2678 }
2679
2680 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2681}
2682
2683/* The to_supports_stopped_by_hw_breakpoint method of target
2684 record-btrace. */
2685
2686static int
2687record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2688{
a52eab48 2689 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2690 return 1;
52834460 2691
9e8915c6 2692 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2693}
2694
e8032dde 2695/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2696
2697static void
e8032dde 2698record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2699{
e8032dde 2700 /* We don't add or remove threads during replay. */
a52eab48 2701 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2702 return;
2703
2704 /* Forward the request. */
e75fdfca 2705 ops = ops->beneath;
e8032dde 2706 ops->to_update_thread_list (ops);
e2887aa3
MM
2707}
2708
2709/* The to_thread_alive method of target record-btrace. */
2710
2711static int
2712record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2713{
2714 /* We don't add or remove threads during replay. */
a52eab48 2715 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2716 return find_thread_ptid (ptid) != NULL;
2717
2718 /* Forward the request. */
e75fdfca
TT
2719 ops = ops->beneath;
2720 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2721}
2722
066ce621
MM
2723/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2724 is stopped. */
2725
2726static void
2727record_btrace_set_replay (struct thread_info *tp,
2728 const struct btrace_insn_iterator *it)
2729{
2730 struct btrace_thread_info *btinfo;
2731
2732 btinfo = &tp->btrace;
2733
2734 if (it == NULL || it->function == NULL)
52834460 2735 record_btrace_stop_replaying (tp);
066ce621
MM
2736 else
2737 {
2738 if (btinfo->replay == NULL)
52834460 2739 record_btrace_start_replaying (tp);
066ce621
MM
2740 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2741 return;
2742
2743 *btinfo->replay = *it;
52834460 2744 registers_changed_ptid (tp->ptid);
066ce621
MM
2745 }
2746
52834460
MM
2747 /* Start anew from the new replay position. */
2748 record_btrace_clear_histories (btinfo);
485668e5
MM
2749
2750 stop_pc = regcache_read_pc (get_current_regcache ());
2751 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2752}
2753
2754/* The to_goto_record_begin method of target record-btrace. */
2755
2756static void
08475817 2757record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2758{
2759 struct thread_info *tp;
2760 struct btrace_insn_iterator begin;
2761
2762 tp = require_btrace_thread ();
2763
2764 btrace_insn_begin (&begin, &tp->btrace);
2765 record_btrace_set_replay (tp, &begin);
066ce621
MM
2766}
2767
2768/* The to_goto_record_end method of target record-btrace. */
2769
2770static void
307a1b91 2771record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2772{
2773 struct thread_info *tp;
2774
2775 tp = require_btrace_thread ();
2776
2777 record_btrace_set_replay (tp, NULL);
066ce621
MM
2778}
2779
2780/* The to_goto_record method of target record-btrace. */
2781
2782static void
606183ac 2783record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2784{
2785 struct thread_info *tp;
2786 struct btrace_insn_iterator it;
2787 unsigned int number;
2788 int found;
2789
2790 number = insn;
2791
2792 /* Check for wrap-arounds. */
2793 if (number != insn)
2794 error (_("Instruction number out of range."));
2795
2796 tp = require_btrace_thread ();
2797
2798 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2799 if (found == 0)
2800 error (_("No such instruction."));
2801
2802 record_btrace_set_replay (tp, &it);
066ce621
MM
2803}
2804
797094dd
MM
2805/* The to_record_stop_replaying method of target record-btrace. */
2806
2807static void
2808record_btrace_stop_replaying_all (struct target_ops *self)
2809{
2810 struct thread_info *tp;
2811
2812 ALL_NON_EXITED_THREADS (tp)
2813 record_btrace_stop_replaying (tp);
2814}
2815
70ad5bff
MM
2816/* The to_execution_direction target method. */
2817
2818static enum exec_direction_kind
2819record_btrace_execution_direction (struct target_ops *self)
2820{
2821 return record_btrace_resume_exec_dir;
2822}
2823
aef92902
MM
2824/* The to_prepare_to_generate_core target method. */
2825
2826static void
2827record_btrace_prepare_to_generate_core (struct target_ops *self)
2828{
2829 record_btrace_generating_corefile = 1;
2830}
2831
2832/* The to_done_generating_core target method. */
2833
2834static void
2835record_btrace_done_generating_core (struct target_ops *self)
2836{
2837 record_btrace_generating_corefile = 0;
2838}
2839
afedecd3
MM
2840/* Initialize the record-btrace target ops. */
2841
2842static void
2843init_record_btrace_ops (void)
2844{
2845 struct target_ops *ops;
2846
2847 ops = &record_btrace_ops;
2848 ops->to_shortname = "record-btrace";
2849 ops->to_longname = "Branch tracing target";
2850 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2851 ops->to_open = record_btrace_open;
2852 ops->to_close = record_btrace_close;
b7d2e916 2853 ops->to_async = record_btrace_async;
afedecd3 2854 ops->to_detach = record_detach;
c0272db5 2855 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2856 ops->to_mourn_inferior = record_mourn_inferior;
2857 ops->to_kill = record_kill;
afedecd3
MM
2858 ops->to_stop_recording = record_btrace_stop_recording;
2859 ops->to_info_record = record_btrace_info;
2860 ops->to_insn_history = record_btrace_insn_history;
2861 ops->to_insn_history_from = record_btrace_insn_history_from;
2862 ops->to_insn_history_range = record_btrace_insn_history_range;
2863 ops->to_call_history = record_btrace_call_history;
2864 ops->to_call_history_from = record_btrace_call_history_from;
2865 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2866 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2867 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2868 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2869 ops->to_xfer_partial = record_btrace_xfer_partial;
2870 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2871 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2872 ops->to_fetch_registers = record_btrace_fetch_registers;
2873 ops->to_store_registers = record_btrace_store_registers;
2874 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2875 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2876 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2877 ops->to_resume = record_btrace_resume;
2878 ops->to_wait = record_btrace_wait;
6e4879f0 2879 ops->to_stop = record_btrace_stop;
e8032dde 2880 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2881 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2882 ops->to_goto_record_begin = record_btrace_goto_begin;
2883 ops->to_goto_record_end = record_btrace_goto_end;
2884 ops->to_goto_record = record_btrace_goto;
52834460 2885 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2886 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2887 ops->to_supports_stopped_by_sw_breakpoint
2888 = record_btrace_supports_stopped_by_sw_breakpoint;
2889 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2890 ops->to_supports_stopped_by_hw_breakpoint
2891 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2892 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2893 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2894 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2895 ops->to_stratum = record_stratum;
2896 ops->to_magic = OPS_MAGIC;
2897}
2898
f4abbc16
MM
2899/* Start recording in BTS format. */
2900
2901static void
2902cmd_record_btrace_bts_start (char *args, int from_tty)
2903{
f4abbc16
MM
2904 if (args != NULL && *args != 0)
2905 error (_("Invalid argument."));
2906
2907 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2908
492d29ea
PA
2909 TRY
2910 {
2911 execute_command ("target record-btrace", from_tty);
2912 }
2913 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2914 {
2915 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2916 throw_exception (exception);
2917 }
492d29ea 2918 END_CATCH
f4abbc16
MM
2919}
2920
bc504a31 2921/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2922
2923static void
b20a6524 2924cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2925{
2926 if (args != NULL && *args != 0)
2927 error (_("Invalid argument."));
2928
b20a6524 2929 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2930
492d29ea
PA
2931 TRY
2932 {
2933 execute_command ("target record-btrace", from_tty);
2934 }
2935 CATCH (exception, RETURN_MASK_ALL)
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2938 throw_exception (exception);
2939 }
2940 END_CATCH
afedecd3
MM
2941}
2942
b20a6524
MM
2943/* Alias for "target record". */
2944
2945static void
2946cmd_record_btrace_start (char *args, int from_tty)
2947{
2948 if (args != NULL && *args != 0)
2949 error (_("Invalid argument."));
2950
2951 record_btrace_conf.format = BTRACE_FORMAT_PT;
2952
2953 TRY
2954 {
2955 execute_command ("target record-btrace", from_tty);
2956 }
2957 CATCH (exception, RETURN_MASK_ALL)
2958 {
2959 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2960
2961 TRY
2962 {
2963 execute_command ("target record-btrace", from_tty);
2964 }
2965 CATCH (exception, RETURN_MASK_ALL)
2966 {
2967 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2968 throw_exception (exception);
2969 }
2970 END_CATCH
2971 }
2972 END_CATCH
2973}
2974
67b5c0c1
MM
2975/* The "set record btrace" command. */
2976
2977static void
2978cmd_set_record_btrace (char *args, int from_tty)
2979{
2980 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2981}
2982
2983/* The "show record btrace" command. */
2984
2985static void
2986cmd_show_record_btrace (char *args, int from_tty)
2987{
2988 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2989}
2990
2991/* The "show record btrace replay-memory-access" command. */
2992
2993static void
2994cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2995 struct cmd_list_element *c, const char *value)
2996{
2997 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2998 replay_memory_access);
2999}
3000
d33501a5
MM
3001/* The "set record btrace bts" command. */
3002
3003static void
3004cmd_set_record_btrace_bts (char *args, int from_tty)
3005{
3006 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3007 "by an appropriate subcommand.\n"));
d33501a5
MM
3008 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3009 all_commands, gdb_stdout);
3010}
3011
3012/* The "show record btrace bts" command. */
3013
3014static void
3015cmd_show_record_btrace_bts (char *args, int from_tty)
3016{
3017 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3018}
3019
b20a6524
MM
3020/* The "set record btrace pt" command. */
3021
3022static void
3023cmd_set_record_btrace_pt (char *args, int from_tty)
3024{
3025 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3026 "by an appropriate subcommand.\n"));
3027 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3028 all_commands, gdb_stdout);
3029}
3030
3031/* The "show record btrace pt" command. */
3032
3033static void
3034cmd_show_record_btrace_pt (char *args, int from_tty)
3035{
3036 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3037}
3038
3039/* The "record bts buffer-size" show value function. */
3040
3041static void
3042show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3043 struct cmd_list_element *c,
3044 const char *value)
3045{
3046 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3047 value);
3048}
3049
3050/* The "record pt buffer-size" show value function. */
3051
3052static void
3053show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3054 struct cmd_list_element *c,
3055 const char *value)
3056{
3057 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3058 value);
3059}
3060
afedecd3
MM
3061void _initialize_record_btrace (void);
3062
3063/* Initialize btrace commands. */
3064
3065void
3066_initialize_record_btrace (void)
3067{
f4abbc16
MM
3068 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3069 _("Start branch trace recording."), &record_btrace_cmdlist,
3070 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3071 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3072
f4abbc16
MM
3073 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3074 _("\
3075Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3076The processor stores a from/to record for each branch into a cyclic buffer.\n\
3077This format may not be available on all processors."),
3078 &record_btrace_cmdlist);
3079 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3080
b20a6524
MM
3081 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3082 _("\
bc504a31 3083Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3084This format may not be available on all processors."),
3085 &record_btrace_cmdlist);
3086 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3087
67b5c0c1
MM
3088 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3089 _("Set record options"), &set_record_btrace_cmdlist,
3090 "set record btrace ", 0, &set_record_cmdlist);
3091
3092 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3093 _("Show record options"), &show_record_btrace_cmdlist,
3094 "show record btrace ", 0, &show_record_cmdlist);
3095
3096 add_setshow_enum_cmd ("replay-memory-access", no_class,
3097 replay_memory_access_types, &replay_memory_access, _("\
3098Set what memory accesses are allowed during replay."), _("\
3099Show what memory accesses are allowed during replay."),
3100 _("Default is READ-ONLY.\n\n\
3101The btrace record target does not trace data.\n\
3102The memory therefore corresponds to the live target and not \
3103to the current replay position.\n\n\
3104When READ-ONLY, allow accesses to read-only memory during replay.\n\
3105When READ-WRITE, allow accesses to read-only and read-write memory during \
3106replay."),
3107 NULL, cmd_show_replay_memory_access,
3108 &set_record_btrace_cmdlist,
3109 &show_record_btrace_cmdlist);
3110
d33501a5
MM
3111 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3112 _("Set record btrace bts options"),
3113 &set_record_btrace_bts_cmdlist,
3114 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3115
3116 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3117 _("Show record btrace bts options"),
3118 &show_record_btrace_bts_cmdlist,
3119 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3120
3121 add_setshow_uinteger_cmd ("buffer-size", no_class,
3122 &record_btrace_conf.bts.size,
3123 _("Set the record/replay bts buffer size."),
3124 _("Show the record/replay bts buffer size."), _("\
3125When starting recording request a trace buffer of this size. \
3126The actual buffer size may differ from the requested size. \
3127Use \"info record\" to see the actual buffer size.\n\n\
3128Bigger buffers allow longer recording but also take more time to process \
3129the recorded execution trace.\n\n\
b20a6524
MM
3130The trace buffer size may not be changed while recording."), NULL,
3131 show_record_bts_buffer_size_value,
d33501a5
MM
3132 &set_record_btrace_bts_cmdlist,
3133 &show_record_btrace_bts_cmdlist);
3134
b20a6524
MM
3135 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3136 _("Set record btrace pt options"),
3137 &set_record_btrace_pt_cmdlist,
3138 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3139
3140 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3141 _("Show record btrace pt options"),
3142 &show_record_btrace_pt_cmdlist,
3143 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3144
3145 add_setshow_uinteger_cmd ("buffer-size", no_class,
3146 &record_btrace_conf.pt.size,
3147 _("Set the record/replay pt buffer size."),
3148 _("Show the record/replay pt buffer size."), _("\
3149Bigger buffers allow longer recording but also take more time to process \
3150the recorded execution.\n\
3151The actual buffer size may differ from the requested size. Use \"info record\" \
3152to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3153 &set_record_btrace_pt_cmdlist,
3154 &show_record_btrace_pt_cmdlist);
3155
afedecd3
MM
3156 init_record_btrace_ops ();
3157 add_target (&record_btrace_ops);
0b722aec
MM
3158
3159 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3160 xcalloc, xfree);
d33501a5
MM
3161
3162 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3163 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3164}
This page took 0.721255 seconds and 4 git commands to generate.