Reuse buffers across gdb_pretty_print_insn calls
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
31fd9caa 446
5de9129b 447 insns = btrace_insn_number (&insn);
31fd9caa
MM
448 if (insns != 0)
449 {
450 /* The last instruction does not really belong to the trace. */
451 insns -= 1;
452 }
453 else
454 {
455 unsigned int steps;
456
457 /* Skip gaps at the end. */
458 do
459 {
460 steps = btrace_insn_prev (&insn, 1);
461 if (steps == 0)
462 break;
463
464 insns = btrace_insn_number (&insn);
465 }
466 while (insns == 0);
467 }
468
469 gaps = btinfo->ngaps;
23a7fe75 470 }
afedecd3 471
31fd9caa 472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
473 "for thread %s (%s).\n"), insns, calls, gaps,
474 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
475
476 if (btrace_is_replaying (tp))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo->replay));
afedecd3
MM
479}
480
31fd9caa
MM
481/* Print a decode error. */
482
483static void
484btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
485 enum btrace_format format)
486{
487 const char *errstr;
488 int is_error;
489
490 errstr = _("unknown");
491 is_error = 1;
492
493 switch (format)
494 {
495 default:
496 break;
497
498 case BTRACE_FORMAT_BTS:
499 switch (errcode)
500 {
501 default:
502 break;
503
504 case BDE_BTS_OVERFLOW:
505 errstr = _("instruction overflow");
506 break;
507
508 case BDE_BTS_INSN_SIZE:
509 errstr = _("unknown instruction");
510 break;
511 }
512 break;
b20a6524
MM
513
514#if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT:
516 switch (errcode)
517 {
518 case BDE_PT_USER_QUIT:
519 is_error = 0;
520 errstr = _("trace decode cancelled");
521 break;
522
523 case BDE_PT_DISABLED:
524 is_error = 0;
525 errstr = _("disabled");
526 break;
527
528 case BDE_PT_OVERFLOW:
529 is_error = 0;
530 errstr = _("overflow");
531 break;
532
533 default:
534 if (errcode < 0)
535 errstr = pt_errstr (pt_errcode (errcode));
536 break;
537 }
538 break;
539#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
540 }
541
112e8700 542 uiout->text (_("["));
31fd9caa
MM
543 if (is_error)
544 {
112e8700
SM
545 uiout->text (_("decode error ("));
546 uiout->field_int ("errcode", errcode);
547 uiout->text (_("): "));
31fd9caa 548 }
112e8700
SM
549 uiout->text (errstr);
550 uiout->text (_("]\n"));
31fd9caa
MM
551}
552
afedecd3
MM
553/* Print an unsigned int. */
554
555static void
556ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
557{
112e8700 558 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
559}
560
f94cc897
MM
561/* A range of source lines. */
562
563struct btrace_line_range
564{
565 /* The symtab this line is from. */
566 struct symtab *symtab;
567
568 /* The first line (inclusive). */
569 int begin;
570
571 /* The last line (exclusive). */
572 int end;
573};
574
575/* Construct a line range. */
576
577static struct btrace_line_range
578btrace_mk_line_range (struct symtab *symtab, int begin, int end)
579{
580 struct btrace_line_range range;
581
582 range.symtab = symtab;
583 range.begin = begin;
584 range.end = end;
585
586 return range;
587}
588
589/* Add a line to a line range. */
590
591static struct btrace_line_range
592btrace_line_range_add (struct btrace_line_range range, int line)
593{
594 if (range.end <= range.begin)
595 {
596 /* This is the first entry. */
597 range.begin = line;
598 range.end = line + 1;
599 }
600 else if (line < range.begin)
601 range.begin = line;
602 else if (range.end < line)
603 range.end = line;
604
605 return range;
606}
607
608/* Return non-zero if RANGE is empty, zero otherwise. */
609
610static int
611btrace_line_range_is_empty (struct btrace_line_range range)
612{
613 return range.end <= range.begin;
614}
615
616/* Return non-zero if LHS contains RHS, zero otherwise. */
617
618static int
619btrace_line_range_contains_range (struct btrace_line_range lhs,
620 struct btrace_line_range rhs)
621{
622 return ((lhs.symtab == rhs.symtab)
623 && (lhs.begin <= rhs.begin)
624 && (rhs.end <= lhs.end));
625}
626
627/* Find the line range associated with PC. */
628
629static struct btrace_line_range
630btrace_find_line_range (CORE_ADDR pc)
631{
632 struct btrace_line_range range;
633 struct linetable_entry *lines;
634 struct linetable *ltable;
635 struct symtab *symtab;
636 int nlines, i;
637
638 symtab = find_pc_line_symtab (pc);
639 if (symtab == NULL)
640 return btrace_mk_line_range (NULL, 0, 0);
641
642 ltable = SYMTAB_LINETABLE (symtab);
643 if (ltable == NULL)
644 return btrace_mk_line_range (symtab, 0, 0);
645
646 nlines = ltable->nitems;
647 lines = ltable->item;
648 if (nlines <= 0)
649 return btrace_mk_line_range (symtab, 0, 0);
650
651 range = btrace_mk_line_range (symtab, 0, 0);
652 for (i = 0; i < nlines - 1; i++)
653 {
654 if ((lines[i].pc == pc) && (lines[i].line != 0))
655 range = btrace_line_range_add (range, lines[i].line);
656 }
657
658 return range;
659}
660
661/* Print source lines in LINES to UIOUT.
662
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
669
670static void
671btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
672 struct cleanup **ui_item_chain, int flags)
673{
8d297bbf 674 print_source_lines_flags psl_flags;
f94cc897
MM
675 int line;
676
677 psl_flags = 0;
678 if (flags & DISASSEMBLY_FILENAME)
679 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
680
681 for (line = lines.begin; line < lines.end; ++line)
682 {
683 if (*ui_item_chain != NULL)
684 do_cleanups (*ui_item_chain);
685
686 *ui_item_chain
687 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
688
689 print_source_lines (lines.symtab, line, line + 1, psl_flags);
690
691 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
692 }
693}
694
afedecd3
MM
695/* Disassemble a section of the recorded instruction trace. */
696
697static void
23a7fe75 698btrace_insn_history (struct ui_out *uiout,
31fd9caa 699 const struct btrace_thread_info *btinfo,
23a7fe75
MM
700 const struct btrace_insn_iterator *begin,
701 const struct btrace_insn_iterator *end, int flags)
afedecd3 702{
f94cc897 703 struct cleanup *cleanups, *ui_item_chain;
afedecd3 704 struct gdbarch *gdbarch;
23a7fe75 705 struct btrace_insn_iterator it;
f94cc897 706 struct btrace_line_range last_lines;
afedecd3 707
23a7fe75
MM
708 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
709 btrace_insn_number (end));
afedecd3 710
f94cc897
MM
711 flags |= DISASSEMBLY_SPECULATIVE;
712
afedecd3 713 gdbarch = target_gdbarch ();
f94cc897
MM
714 last_lines = btrace_mk_line_range (NULL, 0, 0);
715
187808b0 716 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
717
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain = NULL;
afedecd3 721
8b172ce7
PA
722 gdb_pretty_print_disassembler disasm (gdbarch);
723
23a7fe75 724 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 725 {
23a7fe75
MM
726 const struct btrace_insn *insn;
727
728 insn = btrace_insn_get (&it);
729
31fd9caa
MM
730 /* A NULL instruction indicates a gap in the trace. */
731 if (insn == NULL)
732 {
733 const struct btrace_config *conf;
734
735 conf = btrace_conf (btinfo);
afedecd3 736
31fd9caa
MM
737 /* We have trace so we must have a configuration. */
738 gdb_assert (conf != NULL);
739
740 btrace_ui_out_decode_error (uiout, it.function->errcode,
741 conf->format);
742 }
743 else
744 {
f94cc897 745 struct disasm_insn dinsn;
da8c46d2 746
f94cc897 747 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 748 {
f94cc897
MM
749 struct btrace_line_range lines;
750
751 lines = btrace_find_line_range (insn->pc);
752 if (!btrace_line_range_is_empty (lines)
753 && !btrace_line_range_contains_range (last_lines, lines))
754 {
755 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
756 last_lines = lines;
757 }
758 else if (ui_item_chain == NULL)
759 {
760 ui_item_chain
761 = make_cleanup_ui_out_tuple_begin_end (uiout,
762 "src_and_asm_line");
763 /* No source information. */
764 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
765 }
766
767 gdb_assert (ui_item_chain != NULL);
da8c46d2 768 }
da8c46d2 769
f94cc897
MM
770 memset (&dinsn, 0, sizeof (dinsn));
771 dinsn.number = btrace_insn_number (&it);
772 dinsn.addr = insn->pc;
31fd9caa 773
da8c46d2 774 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 775 dinsn.is_speculative = 1;
da8c46d2 776
8b172ce7 777 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 778 }
afedecd3 779 }
f94cc897
MM
780
781 do_cleanups (cleanups);
afedecd3
MM
782}
783
784/* The to_insn_history method of target record-btrace. */
785
786static void
7a6c5609 787record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
788{
789 struct btrace_thread_info *btinfo;
23a7fe75
MM
790 struct btrace_insn_history *history;
791 struct btrace_insn_iterator begin, end;
afedecd3
MM
792 struct cleanup *uiout_cleanup;
793 struct ui_out *uiout;
23a7fe75 794 unsigned int context, covered;
afedecd3
MM
795
796 uiout = current_uiout;
797 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
798 "insn history");
afedecd3 799 context = abs (size);
afedecd3
MM
800 if (context == 0)
801 error (_("Bad record instruction-history-size."));
802
23a7fe75
MM
803 btinfo = require_btrace ();
804 history = btinfo->insn_history;
805 if (history == NULL)
afedecd3 806 {
07bbe694 807 struct btrace_insn_iterator *replay;
afedecd3 808
23a7fe75 809 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 810
07bbe694
MM
811 /* If we're replaying, we start at the replay position. Otherwise, we
812 start at the tail of the trace. */
813 replay = btinfo->replay;
814 if (replay != NULL)
815 begin = *replay;
816 else
817 btrace_insn_end (&begin, btinfo);
818
819 /* We start from here and expand in the requested direction. Then we
820 expand in the other direction, as well, to fill up any remaining
821 context. */
822 end = begin;
823 if (size < 0)
824 {
825 /* We want the current position covered, as well. */
826 covered = btrace_insn_next (&end, 1);
827 covered += btrace_insn_prev (&begin, context - covered);
828 covered += btrace_insn_next (&end, context - covered);
829 }
830 else
831 {
832 covered = btrace_insn_next (&end, context);
833 covered += btrace_insn_prev (&begin, context - covered);
834 }
afedecd3
MM
835 }
836 else
837 {
23a7fe75
MM
838 begin = history->begin;
839 end = history->end;
afedecd3 840
23a7fe75
MM
841 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
842 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 843
23a7fe75
MM
844 if (size < 0)
845 {
846 end = begin;
847 covered = btrace_insn_prev (&begin, context);
848 }
849 else
850 {
851 begin = end;
852 covered = btrace_insn_next (&end, context);
853 }
afedecd3
MM
854 }
855
23a7fe75 856 if (covered > 0)
31fd9caa 857 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
858 else
859 {
860 if (size < 0)
861 printf_unfiltered (_("At the start of the branch trace record.\n"));
862 else
863 printf_unfiltered (_("At the end of the branch trace record.\n"));
864 }
afedecd3 865
23a7fe75 866 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
867 do_cleanups (uiout_cleanup);
868}
869
870/* The to_insn_history_range method of target record-btrace. */
871
872static void
4e99c6b7
TT
873record_btrace_insn_history_range (struct target_ops *self,
874 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
875{
876 struct btrace_thread_info *btinfo;
23a7fe75
MM
877 struct btrace_insn_history *history;
878 struct btrace_insn_iterator begin, end;
afedecd3
MM
879 struct cleanup *uiout_cleanup;
880 struct ui_out *uiout;
23a7fe75
MM
881 unsigned int low, high;
882 int found;
afedecd3
MM
883
884 uiout = current_uiout;
885 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
886 "insn history");
23a7fe75
MM
887 low = from;
888 high = to;
afedecd3 889
23a7fe75 890 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
891
892 /* Check for wrap-arounds. */
23a7fe75 893 if (low != from || high != to)
afedecd3
MM
894 error (_("Bad range."));
895
0688d04e 896 if (high < low)
afedecd3
MM
897 error (_("Bad range."));
898
23a7fe75 899 btinfo = require_btrace ();
afedecd3 900
23a7fe75
MM
901 found = btrace_find_insn_by_number (&begin, btinfo, low);
902 if (found == 0)
903 error (_("Range out of bounds."));
afedecd3 904
23a7fe75
MM
905 found = btrace_find_insn_by_number (&end, btinfo, high);
906 if (found == 0)
0688d04e
MM
907 {
908 /* Silently truncate the range. */
909 btrace_insn_end (&end, btinfo);
910 }
911 else
912 {
913 /* We want both begin and end to be inclusive. */
914 btrace_insn_next (&end, 1);
915 }
afedecd3 916
31fd9caa 917 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 918 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
919
920 do_cleanups (uiout_cleanup);
921}
922
923/* The to_insn_history_from method of target record-btrace. */
924
925static void
9abc3ff3
TT
926record_btrace_insn_history_from (struct target_ops *self,
927 ULONGEST from, int size, int flags)
afedecd3
MM
928{
929 ULONGEST begin, end, context;
930
931 context = abs (size);
0688d04e
MM
932 if (context == 0)
933 error (_("Bad record instruction-history-size."));
afedecd3
MM
934
935 if (size < 0)
936 {
937 end = from;
938
939 if (from < context)
940 begin = 0;
941 else
0688d04e 942 begin = from - context + 1;
afedecd3
MM
943 }
944 else
945 {
946 begin = from;
0688d04e 947 end = from + context - 1;
afedecd3
MM
948
949 /* Check for wrap-around. */
950 if (end < begin)
951 end = ULONGEST_MAX;
952 }
953
4e99c6b7 954 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
955}
956
957/* Print the instruction number range for a function call history line. */
958
959static void
23a7fe75
MM
960btrace_call_history_insn_range (struct ui_out *uiout,
961 const struct btrace_function *bfun)
afedecd3 962{
7acbe133
MM
963 unsigned int begin, end, size;
964
965 size = VEC_length (btrace_insn_s, bfun->insn);
966 gdb_assert (size > 0);
afedecd3 967
23a7fe75 968 begin = bfun->insn_offset;
7acbe133 969 end = begin + size - 1;
afedecd3 970
23a7fe75 971 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 972 uiout->text (",");
23a7fe75 973 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
974}
975
ce0dfbea
MM
976/* Compute the lowest and highest source line for the instructions in BFUN
977 and return them in PBEGIN and PEND.
978 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
979 result from inlining or macro expansion. */
980
981static void
982btrace_compute_src_line_range (const struct btrace_function *bfun,
983 int *pbegin, int *pend)
984{
985 struct btrace_insn *insn;
986 struct symtab *symtab;
987 struct symbol *sym;
988 unsigned int idx;
989 int begin, end;
990
991 begin = INT_MAX;
992 end = INT_MIN;
993
994 sym = bfun->sym;
995 if (sym == NULL)
996 goto out;
997
998 symtab = symbol_symtab (sym);
999
1000 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1001 {
1002 struct symtab_and_line sal;
1003
1004 sal = find_pc_line (insn->pc, 0);
1005 if (sal.symtab != symtab || sal.line == 0)
1006 continue;
1007
325fac50
PA
1008 begin = std::min (begin, sal.line);
1009 end = std::max (end, sal.line);
ce0dfbea
MM
1010 }
1011
1012 out:
1013 *pbegin = begin;
1014 *pend = end;
1015}
1016
afedecd3
MM
1017/* Print the source line information for a function call history line. */
1018
1019static void
23a7fe75
MM
1020btrace_call_history_src_line (struct ui_out *uiout,
1021 const struct btrace_function *bfun)
afedecd3
MM
1022{
1023 struct symbol *sym;
23a7fe75 1024 int begin, end;
afedecd3
MM
1025
1026 sym = bfun->sym;
1027 if (sym == NULL)
1028 return;
1029
112e8700 1030 uiout->field_string ("file",
08be3fe3 1031 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1032
ce0dfbea 1033 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1034 if (end < begin)
afedecd3
MM
1035 return;
1036
112e8700
SM
1037 uiout->text (":");
1038 uiout->field_int ("min line", begin);
afedecd3 1039
23a7fe75 1040 if (end == begin)
afedecd3
MM
1041 return;
1042
112e8700
SM
1043 uiout->text (",");
1044 uiout->field_int ("max line", end);
afedecd3
MM
1045}
1046
0b722aec
MM
1047/* Get the name of a branch trace function. */
1048
1049static const char *
1050btrace_get_bfun_name (const struct btrace_function *bfun)
1051{
1052 struct minimal_symbol *msym;
1053 struct symbol *sym;
1054
1055 if (bfun == NULL)
1056 return "??";
1057
1058 msym = bfun->msym;
1059 sym = bfun->sym;
1060
1061 if (sym != NULL)
1062 return SYMBOL_PRINT_NAME (sym);
1063 else if (msym != NULL)
efd66ac6 1064 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1065 else
1066 return "??";
1067}
1068
afedecd3
MM
1069/* Disassemble a section of the recorded function trace. */
1070
1071static void
23a7fe75 1072btrace_call_history (struct ui_out *uiout,
8710b709 1073 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1074 const struct btrace_call_iterator *begin,
1075 const struct btrace_call_iterator *end,
8d297bbf 1076 int int_flags)
afedecd3 1077{
23a7fe75 1078 struct btrace_call_iterator it;
8d297bbf 1079 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1080
8d297bbf 1081 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1082 btrace_call_number (end));
afedecd3 1083
23a7fe75 1084 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1085 {
23a7fe75
MM
1086 const struct btrace_function *bfun;
1087 struct minimal_symbol *msym;
1088 struct symbol *sym;
1089
1090 bfun = btrace_call_get (&it);
23a7fe75 1091 sym = bfun->sym;
0b722aec 1092 msym = bfun->msym;
23a7fe75 1093
afedecd3 1094 /* Print the function index. */
23a7fe75 1095 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1096 uiout->text ("\t");
afedecd3 1097
31fd9caa
MM
1098 /* Indicate gaps in the trace. */
1099 if (bfun->errcode != 0)
1100 {
1101 const struct btrace_config *conf;
1102
1103 conf = btrace_conf (btinfo);
1104
1105 /* We have trace so we must have a configuration. */
1106 gdb_assert (conf != NULL);
1107
1108 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1109
1110 continue;
1111 }
1112
8710b709
MM
1113 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1114 {
1115 int level = bfun->level + btinfo->level, i;
1116
1117 for (i = 0; i < level; ++i)
112e8700 1118 uiout->text (" ");
8710b709
MM
1119 }
1120
1121 if (sym != NULL)
112e8700 1122 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1123 else if (msym != NULL)
112e8700
SM
1124 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1125 else if (!uiout->is_mi_like_p ())
1126 uiout->field_string ("function", "??");
8710b709 1127
1e038f67 1128 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1129 {
112e8700 1130 uiout->text (_("\tinst "));
23a7fe75 1131 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1132 }
1133
1e038f67 1134 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1135 {
112e8700 1136 uiout->text (_("\tat "));
23a7fe75 1137 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1138 }
1139
112e8700 1140 uiout->text ("\n");
afedecd3
MM
1141 }
1142}
1143
1144/* The to_call_history method of target record-btrace. */
1145
1146static void
8d297bbf 1147record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1148{
1149 struct btrace_thread_info *btinfo;
23a7fe75
MM
1150 struct btrace_call_history *history;
1151 struct btrace_call_iterator begin, end;
afedecd3
MM
1152 struct cleanup *uiout_cleanup;
1153 struct ui_out *uiout;
23a7fe75 1154 unsigned int context, covered;
8d297bbf 1155 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1156
1157 uiout = current_uiout;
1158 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1159 "insn history");
afedecd3 1160 context = abs (size);
afedecd3
MM
1161 if (context == 0)
1162 error (_("Bad record function-call-history-size."));
1163
23a7fe75
MM
1164 btinfo = require_btrace ();
1165 history = btinfo->call_history;
1166 if (history == NULL)
afedecd3 1167 {
07bbe694 1168 struct btrace_insn_iterator *replay;
afedecd3 1169
8d297bbf 1170 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1171
07bbe694
MM
1172 /* If we're replaying, we start at the replay position. Otherwise, we
1173 start at the tail of the trace. */
1174 replay = btinfo->replay;
1175 if (replay != NULL)
1176 {
1177 begin.function = replay->function;
1178 begin.btinfo = btinfo;
1179 }
1180 else
1181 btrace_call_end (&begin, btinfo);
1182
1183 /* We start from here and expand in the requested direction. Then we
1184 expand in the other direction, as well, to fill up any remaining
1185 context. */
1186 end = begin;
1187 if (size < 0)
1188 {
1189 /* We want the current position covered, as well. */
1190 covered = btrace_call_next (&end, 1);
1191 covered += btrace_call_prev (&begin, context - covered);
1192 covered += btrace_call_next (&end, context - covered);
1193 }
1194 else
1195 {
1196 covered = btrace_call_next (&end, context);
1197 covered += btrace_call_prev (&begin, context- covered);
1198 }
afedecd3
MM
1199 }
1200 else
1201 {
23a7fe75
MM
1202 begin = history->begin;
1203 end = history->end;
afedecd3 1204
8d297bbf 1205 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1206 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1207
23a7fe75
MM
1208 if (size < 0)
1209 {
1210 end = begin;
1211 covered = btrace_call_prev (&begin, context);
1212 }
1213 else
1214 {
1215 begin = end;
1216 covered = btrace_call_next (&end, context);
1217 }
afedecd3
MM
1218 }
1219
23a7fe75 1220 if (covered > 0)
8710b709 1221 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1222 else
1223 {
1224 if (size < 0)
1225 printf_unfiltered (_("At the start of the branch trace record.\n"));
1226 else
1227 printf_unfiltered (_("At the end of the branch trace record.\n"));
1228 }
afedecd3 1229
23a7fe75 1230 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1231 do_cleanups (uiout_cleanup);
1232}
1233
1234/* The to_call_history_range method of target record-btrace. */
1235
1236static void
f0d960ea 1237record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1238 ULONGEST from, ULONGEST to,
1239 int int_flags)
afedecd3
MM
1240{
1241 struct btrace_thread_info *btinfo;
23a7fe75
MM
1242 struct btrace_call_history *history;
1243 struct btrace_call_iterator begin, end;
afedecd3
MM
1244 struct cleanup *uiout_cleanup;
1245 struct ui_out *uiout;
23a7fe75
MM
1246 unsigned int low, high;
1247 int found;
8d297bbf 1248 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1249
1250 uiout = current_uiout;
1251 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1252 "func history");
23a7fe75
MM
1253 low = from;
1254 high = to;
afedecd3 1255
8d297bbf 1256 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1257
1258 /* Check for wrap-arounds. */
23a7fe75 1259 if (low != from || high != to)
afedecd3
MM
1260 error (_("Bad range."));
1261
0688d04e 1262 if (high < low)
afedecd3
MM
1263 error (_("Bad range."));
1264
23a7fe75 1265 btinfo = require_btrace ();
afedecd3 1266
23a7fe75
MM
1267 found = btrace_find_call_by_number (&begin, btinfo, low);
1268 if (found == 0)
1269 error (_("Range out of bounds."));
afedecd3 1270
23a7fe75
MM
1271 found = btrace_find_call_by_number (&end, btinfo, high);
1272 if (found == 0)
0688d04e
MM
1273 {
1274 /* Silently truncate the range. */
1275 btrace_call_end (&end, btinfo);
1276 }
1277 else
1278 {
1279 /* We want both begin and end to be inclusive. */
1280 btrace_call_next (&end, 1);
1281 }
afedecd3 1282
8710b709 1283 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1284 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1285
1286 do_cleanups (uiout_cleanup);
1287}
1288
1289/* The to_call_history_from method of target record-btrace. */
1290
1291static void
ec0aea04 1292record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1293 ULONGEST from, int size,
1294 int int_flags)
afedecd3
MM
1295{
1296 ULONGEST begin, end, context;
8d297bbf 1297 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1298
1299 context = abs (size);
0688d04e
MM
1300 if (context == 0)
1301 error (_("Bad record function-call-history-size."));
afedecd3
MM
1302
1303 if (size < 0)
1304 {
1305 end = from;
1306
1307 if (from < context)
1308 begin = 0;
1309 else
0688d04e 1310 begin = from - context + 1;
afedecd3
MM
1311 }
1312 else
1313 {
1314 begin = from;
0688d04e 1315 end = from + context - 1;
afedecd3
MM
1316
1317 /* Check for wrap-around. */
1318 if (end < begin)
1319 end = ULONGEST_MAX;
1320 }
1321
f0d960ea 1322 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1323}
1324
07bbe694
MM
1325/* The to_record_is_replaying method of target record-btrace. */
1326
1327static int
a52eab48 1328record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1329{
1330 struct thread_info *tp;
1331
034f788c 1332 ALL_NON_EXITED_THREADS (tp)
a52eab48 1333 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1334 return 1;
1335
1336 return 0;
1337}
1338
7ff27e9b
MM
1339/* The to_record_will_replay method of target record-btrace. */
1340
1341static int
1342record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1343{
1344 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1345}
1346
633785ff
MM
1347/* The to_xfer_partial method of target record-btrace. */
1348
9b409511 1349static enum target_xfer_status
633785ff
MM
1350record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1351 const char *annex, gdb_byte *readbuf,
1352 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1353 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1354{
1355 struct target_ops *t;
1356
1357 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1358 if (replay_memory_access == replay_memory_access_read_only
aef92902 1359 && !record_btrace_generating_corefile
4d10e986 1360 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1361 {
1362 switch (object)
1363 {
1364 case TARGET_OBJECT_MEMORY:
1365 {
1366 struct target_section *section;
1367
1368 /* We do not allow writing memory in general. */
1369 if (writebuf != NULL)
9b409511
YQ
1370 {
1371 *xfered_len = len;
bc113b4e 1372 return TARGET_XFER_UNAVAILABLE;
9b409511 1373 }
633785ff
MM
1374
1375 /* We allow reading readonly memory. */
1376 section = target_section_by_addr (ops, offset);
1377 if (section != NULL)
1378 {
1379 /* Check if the section we found is readonly. */
1380 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1381 section->the_bfd_section)
1382 & SEC_READONLY) != 0)
1383 {
1384 /* Truncate the request to fit into this section. */
325fac50 1385 len = std::min (len, section->endaddr - offset);
633785ff
MM
1386 break;
1387 }
1388 }
1389
9b409511 1390 *xfered_len = len;
bc113b4e 1391 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1392 }
1393 }
1394 }
1395
1396 /* Forward the request. */
e75fdfca
TT
1397 ops = ops->beneath;
1398 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1399 offset, len, xfered_len);
633785ff
MM
1400}
1401
1402/* The to_insert_breakpoint method of target record-btrace. */
1403
1404static int
1405record_btrace_insert_breakpoint (struct target_ops *ops,
1406 struct gdbarch *gdbarch,
1407 struct bp_target_info *bp_tgt)
1408{
67b5c0c1
MM
1409 const char *old;
1410 int ret;
633785ff
MM
1411
1412 /* Inserting breakpoints requires accessing memory. Allow it for the
1413 duration of this function. */
67b5c0c1
MM
1414 old = replay_memory_access;
1415 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1416
1417 ret = 0;
492d29ea
PA
1418 TRY
1419 {
1420 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1421 }
492d29ea
PA
1422 CATCH (except, RETURN_MASK_ALL)
1423 {
6c63c96a 1424 replay_memory_access = old;
492d29ea
PA
1425 throw_exception (except);
1426 }
1427 END_CATCH
6c63c96a 1428 replay_memory_access = old;
633785ff
MM
1429
1430 return ret;
1431}
1432
1433/* The to_remove_breakpoint method of target record-btrace. */
1434
1435static int
1436record_btrace_remove_breakpoint (struct target_ops *ops,
1437 struct gdbarch *gdbarch,
73971819
PA
1438 struct bp_target_info *bp_tgt,
1439 enum remove_bp_reason reason)
633785ff 1440{
67b5c0c1
MM
1441 const char *old;
1442 int ret;
633785ff
MM
1443
1444 /* Removing breakpoints requires accessing memory. Allow it for the
1445 duration of this function. */
67b5c0c1
MM
1446 old = replay_memory_access;
1447 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1448
1449 ret = 0;
492d29ea
PA
1450 TRY
1451 {
73971819
PA
1452 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1453 reason);
492d29ea 1454 }
492d29ea
PA
1455 CATCH (except, RETURN_MASK_ALL)
1456 {
6c63c96a 1457 replay_memory_access = old;
492d29ea
PA
1458 throw_exception (except);
1459 }
1460 END_CATCH
6c63c96a 1461 replay_memory_access = old;
633785ff
MM
1462
1463 return ret;
1464}
1465
1f3ef581
MM
1466/* The to_fetch_registers method of target record-btrace. */
1467
1468static void
1469record_btrace_fetch_registers (struct target_ops *ops,
1470 struct regcache *regcache, int regno)
1471{
1472 struct btrace_insn_iterator *replay;
1473 struct thread_info *tp;
1474
1475 tp = find_thread_ptid (inferior_ptid);
1476 gdb_assert (tp != NULL);
1477
1478 replay = tp->btrace.replay;
aef92902 1479 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1480 {
1481 const struct btrace_insn *insn;
1482 struct gdbarch *gdbarch;
1483 int pcreg;
1484
1485 gdbarch = get_regcache_arch (regcache);
1486 pcreg = gdbarch_pc_regnum (gdbarch);
1487 if (pcreg < 0)
1488 return;
1489
1490 /* We can only provide the PC register. */
1491 if (regno >= 0 && regno != pcreg)
1492 return;
1493
1494 insn = btrace_insn_get (replay);
1495 gdb_assert (insn != NULL);
1496
1497 regcache_raw_supply (regcache, regno, &insn->pc);
1498 }
1499 else
1500 {
e75fdfca 1501 struct target_ops *t = ops->beneath;
1f3ef581 1502
e75fdfca 1503 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1504 }
1505}
1506
1507/* The to_store_registers method of target record-btrace. */
1508
1509static void
1510record_btrace_store_registers (struct target_ops *ops,
1511 struct regcache *regcache, int regno)
1512{
1513 struct target_ops *t;
1514
a52eab48 1515 if (!record_btrace_generating_corefile
4d10e986
MM
1516 && record_btrace_is_replaying (ops, inferior_ptid))
1517 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1518
1519 gdb_assert (may_write_registers != 0);
1520
e75fdfca
TT
1521 t = ops->beneath;
1522 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1523}
1524
1525/* The to_prepare_to_store method of target record-btrace. */
1526
1527static void
1528record_btrace_prepare_to_store (struct target_ops *ops,
1529 struct regcache *regcache)
1530{
1531 struct target_ops *t;
1532
a52eab48 1533 if (!record_btrace_generating_corefile
4d10e986 1534 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1535 return;
1536
e75fdfca
TT
1537 t = ops->beneath;
1538 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1539}
1540
0b722aec
MM
1541/* The branch trace frame cache. */
1542
1543struct btrace_frame_cache
1544{
1545 /* The thread. */
1546 struct thread_info *tp;
1547
1548 /* The frame info. */
1549 struct frame_info *frame;
1550
1551 /* The branch trace function segment. */
1552 const struct btrace_function *bfun;
1553};
1554
1555/* A struct btrace_frame_cache hash table indexed by NEXT. */
1556
1557static htab_t bfcache;
1558
1559/* hash_f for htab_create_alloc of bfcache. */
1560
1561static hashval_t
1562bfcache_hash (const void *arg)
1563{
19ba03f4
SM
1564 const struct btrace_frame_cache *cache
1565 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1566
1567 return htab_hash_pointer (cache->frame);
1568}
1569
1570/* eq_f for htab_create_alloc of bfcache. */
1571
1572static int
1573bfcache_eq (const void *arg1, const void *arg2)
1574{
19ba03f4
SM
1575 const struct btrace_frame_cache *cache1
1576 = (const struct btrace_frame_cache *) arg1;
1577 const struct btrace_frame_cache *cache2
1578 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1579
1580 return cache1->frame == cache2->frame;
1581}
1582
1583/* Create a new btrace frame cache. */
1584
1585static struct btrace_frame_cache *
1586bfcache_new (struct frame_info *frame)
1587{
1588 struct btrace_frame_cache *cache;
1589 void **slot;
1590
1591 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1592 cache->frame = frame;
1593
1594 slot = htab_find_slot (bfcache, cache, INSERT);
1595 gdb_assert (*slot == NULL);
1596 *slot = cache;
1597
1598 return cache;
1599}
1600
1601/* Extract the branch trace function from a branch trace frame. */
1602
1603static const struct btrace_function *
1604btrace_get_frame_function (struct frame_info *frame)
1605{
1606 const struct btrace_frame_cache *cache;
1607 const struct btrace_function *bfun;
1608 struct btrace_frame_cache pattern;
1609 void **slot;
1610
1611 pattern.frame = frame;
1612
1613 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1614 if (slot == NULL)
1615 return NULL;
1616
19ba03f4 1617 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1618 return cache->bfun;
1619}
1620
cecac1ab
MM
1621/* Implement stop_reason method for record_btrace_frame_unwind. */
1622
1623static enum unwind_stop_reason
1624record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1625 void **this_cache)
1626{
0b722aec
MM
1627 const struct btrace_frame_cache *cache;
1628 const struct btrace_function *bfun;
1629
19ba03f4 1630 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1631 bfun = cache->bfun;
1632 gdb_assert (bfun != NULL);
1633
1634 if (bfun->up == NULL)
1635 return UNWIND_UNAVAILABLE;
1636
1637 return UNWIND_NO_REASON;
cecac1ab
MM
1638}
1639
1640/* Implement this_id method for record_btrace_frame_unwind. */
1641
1642static void
1643record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1644 struct frame_id *this_id)
1645{
0b722aec
MM
1646 const struct btrace_frame_cache *cache;
1647 const struct btrace_function *bfun;
1648 CORE_ADDR code, special;
1649
19ba03f4 1650 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1651
1652 bfun = cache->bfun;
1653 gdb_assert (bfun != NULL);
1654
1655 while (bfun->segment.prev != NULL)
1656 bfun = bfun->segment.prev;
1657
1658 code = get_frame_func (this_frame);
1659 special = bfun->number;
1660
1661 *this_id = frame_id_build_unavailable_stack_special (code, special);
1662
1663 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1664 btrace_get_bfun_name (cache->bfun),
1665 core_addr_to_string_nz (this_id->code_addr),
1666 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1667}
1668
1669/* Implement prev_register method for record_btrace_frame_unwind. */
1670
1671static struct value *
1672record_btrace_frame_prev_register (struct frame_info *this_frame,
1673 void **this_cache,
1674 int regnum)
1675{
0b722aec
MM
1676 const struct btrace_frame_cache *cache;
1677 const struct btrace_function *bfun, *caller;
1678 const struct btrace_insn *insn;
1679 struct gdbarch *gdbarch;
1680 CORE_ADDR pc;
1681 int pcreg;
1682
1683 gdbarch = get_frame_arch (this_frame);
1684 pcreg = gdbarch_pc_regnum (gdbarch);
1685 if (pcreg < 0 || regnum != pcreg)
1686 throw_error (NOT_AVAILABLE_ERROR,
1687 _("Registers are not available in btrace record history"));
1688
19ba03f4 1689 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1690 bfun = cache->bfun;
1691 gdb_assert (bfun != NULL);
1692
1693 caller = bfun->up;
1694 if (caller == NULL)
1695 throw_error (NOT_AVAILABLE_ERROR,
1696 _("No caller in btrace record history"));
1697
1698 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1699 {
1700 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1701 pc = insn->pc;
1702 }
1703 else
1704 {
1705 insn = VEC_last (btrace_insn_s, caller->insn);
1706 pc = insn->pc;
1707
1708 pc += gdb_insn_length (gdbarch, pc);
1709 }
1710
1711 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1712 btrace_get_bfun_name (bfun), bfun->level,
1713 core_addr_to_string_nz (pc));
1714
1715 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1716}
1717
1718/* Implement sniffer method for record_btrace_frame_unwind. */
1719
1720static int
1721record_btrace_frame_sniffer (const struct frame_unwind *self,
1722 struct frame_info *this_frame,
1723 void **this_cache)
1724{
0b722aec
MM
1725 const struct btrace_function *bfun;
1726 struct btrace_frame_cache *cache;
cecac1ab 1727 struct thread_info *tp;
0b722aec 1728 struct frame_info *next;
cecac1ab
MM
1729
1730 /* THIS_FRAME does not contain a reference to its thread. */
1731 tp = find_thread_ptid (inferior_ptid);
1732 gdb_assert (tp != NULL);
1733
0b722aec
MM
1734 bfun = NULL;
1735 next = get_next_frame (this_frame);
1736 if (next == NULL)
1737 {
1738 const struct btrace_insn_iterator *replay;
1739
1740 replay = tp->btrace.replay;
1741 if (replay != NULL)
1742 bfun = replay->function;
1743 }
1744 else
1745 {
1746 const struct btrace_function *callee;
1747
1748 callee = btrace_get_frame_function (next);
1749 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1750 bfun = callee->up;
1751 }
1752
1753 if (bfun == NULL)
1754 return 0;
1755
1756 DEBUG ("[frame] sniffed frame for %s on level %d",
1757 btrace_get_bfun_name (bfun), bfun->level);
1758
1759 /* This is our frame. Initialize the frame cache. */
1760 cache = bfcache_new (this_frame);
1761 cache->tp = tp;
1762 cache->bfun = bfun;
1763
1764 *this_cache = cache;
1765 return 1;
1766}
1767
1768/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1769
1770static int
1771record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1772 struct frame_info *this_frame,
1773 void **this_cache)
1774{
1775 const struct btrace_function *bfun, *callee;
1776 struct btrace_frame_cache *cache;
1777 struct frame_info *next;
1778
1779 next = get_next_frame (this_frame);
1780 if (next == NULL)
1781 return 0;
1782
1783 callee = btrace_get_frame_function (next);
1784 if (callee == NULL)
1785 return 0;
1786
1787 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1788 return 0;
1789
1790 bfun = callee->up;
1791 if (bfun == NULL)
1792 return 0;
1793
1794 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1795 btrace_get_bfun_name (bfun), bfun->level);
1796
1797 /* This is our frame. Initialize the frame cache. */
1798 cache = bfcache_new (this_frame);
1799 cache->tp = find_thread_ptid (inferior_ptid);
1800 cache->bfun = bfun;
1801
1802 *this_cache = cache;
1803 return 1;
1804}
1805
1806static void
1807record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1808{
1809 struct btrace_frame_cache *cache;
1810 void **slot;
1811
19ba03f4 1812 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1813
1814 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1815 gdb_assert (slot != NULL);
1816
1817 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1818}
1819
1820/* btrace recording does not store previous memory content, neither the stack
1821 frames content. Any unwinding would return errorneous results as the stack
1822 contents no longer matches the changed PC value restored from history.
1823 Therefore this unwinder reports any possibly unwound registers as
1824 <unavailable>. */
1825
0b722aec 1826const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1827{
1828 NORMAL_FRAME,
1829 record_btrace_frame_unwind_stop_reason,
1830 record_btrace_frame_this_id,
1831 record_btrace_frame_prev_register,
1832 NULL,
0b722aec
MM
1833 record_btrace_frame_sniffer,
1834 record_btrace_frame_dealloc_cache
1835};
1836
1837const struct frame_unwind record_btrace_tailcall_frame_unwind =
1838{
1839 TAILCALL_FRAME,
1840 record_btrace_frame_unwind_stop_reason,
1841 record_btrace_frame_this_id,
1842 record_btrace_frame_prev_register,
1843 NULL,
1844 record_btrace_tailcall_frame_sniffer,
1845 record_btrace_frame_dealloc_cache
cecac1ab 1846};
b2f4cfde 1847
ac01945b
TT
1848/* Implement the to_get_unwinder method. */
1849
1850static const struct frame_unwind *
1851record_btrace_to_get_unwinder (struct target_ops *self)
1852{
1853 return &record_btrace_frame_unwind;
1854}
1855
1856/* Implement the to_get_tailcall_unwinder method. */
1857
1858static const struct frame_unwind *
1859record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1860{
1861 return &record_btrace_tailcall_frame_unwind;
1862}
1863
987e68b1
MM
1864/* Return a human-readable string for FLAG. */
1865
1866static const char *
1867btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1868{
1869 switch (flag)
1870 {
1871 case BTHR_STEP:
1872 return "step";
1873
1874 case BTHR_RSTEP:
1875 return "reverse-step";
1876
1877 case BTHR_CONT:
1878 return "cont";
1879
1880 case BTHR_RCONT:
1881 return "reverse-cont";
1882
1883 case BTHR_STOP:
1884 return "stop";
1885 }
1886
1887 return "<invalid>";
1888}
1889
52834460
MM
1890/* Indicate that TP should be resumed according to FLAG. */
1891
1892static void
1893record_btrace_resume_thread (struct thread_info *tp,
1894 enum btrace_thread_flag flag)
1895{
1896 struct btrace_thread_info *btinfo;
1897
43792cf0 1898 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1899 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1900
1901 btinfo = &tp->btrace;
1902
52834460
MM
1903 /* Fetch the latest branch trace. */
1904 btrace_fetch (tp);
1905
0ca912df
MM
1906 /* A resume request overwrites a preceding resume or stop request. */
1907 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1908 btinfo->flags |= flag;
1909}
1910
ec71cc2f
MM
1911/* Get the current frame for TP. */
1912
1913static struct frame_info *
1914get_thread_current_frame (struct thread_info *tp)
1915{
1916 struct frame_info *frame;
1917 ptid_t old_inferior_ptid;
1918 int executing;
1919
1920 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1921 old_inferior_ptid = inferior_ptid;
1922 inferior_ptid = tp->ptid;
1923
1924 /* Clear the executing flag to allow changes to the current frame.
1925 We are not actually running, yet. We just started a reverse execution
1926 command or a record goto command.
1927 For the latter, EXECUTING is false and this has no effect.
1928 For the former, EXECUTING is true and we're in to_wait, about to
1929 move the thread. Since we need to recompute the stack, we temporarily
1930 set EXECUTING to flase. */
1931 executing = is_executing (inferior_ptid);
1932 set_executing (inferior_ptid, 0);
1933
1934 frame = NULL;
1935 TRY
1936 {
1937 frame = get_current_frame ();
1938 }
1939 CATCH (except, RETURN_MASK_ALL)
1940 {
1941 /* Restore the previous execution state. */
1942 set_executing (inferior_ptid, executing);
1943
1944 /* Restore the previous inferior_ptid. */
1945 inferior_ptid = old_inferior_ptid;
1946
1947 throw_exception (except);
1948 }
1949 END_CATCH
1950
1951 /* Restore the previous execution state. */
1952 set_executing (inferior_ptid, executing);
1953
1954 /* Restore the previous inferior_ptid. */
1955 inferior_ptid = old_inferior_ptid;
1956
1957 return frame;
1958}
1959
52834460
MM
1960/* Start replaying a thread. */
1961
1962static struct btrace_insn_iterator *
1963record_btrace_start_replaying (struct thread_info *tp)
1964{
52834460
MM
1965 struct btrace_insn_iterator *replay;
1966 struct btrace_thread_info *btinfo;
52834460
MM
1967
1968 btinfo = &tp->btrace;
1969 replay = NULL;
1970
1971 /* We can't start replaying without trace. */
1972 if (btinfo->begin == NULL)
1973 return NULL;
1974
52834460
MM
1975 /* GDB stores the current frame_id when stepping in order to detects steps
1976 into subroutines.
1977 Since frames are computed differently when we're replaying, we need to
1978 recompute those stored frames and fix them up so we can still detect
1979 subroutines after we started replaying. */
492d29ea 1980 TRY
52834460
MM
1981 {
1982 struct frame_info *frame;
1983 struct frame_id frame_id;
1984 int upd_step_frame_id, upd_step_stack_frame_id;
1985
1986 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1987 frame = get_thread_current_frame (tp);
52834460
MM
1988 frame_id = get_frame_id (frame);
1989
1990 /* Check if we need to update any stepping-related frame id's. */
1991 upd_step_frame_id = frame_id_eq (frame_id,
1992 tp->control.step_frame_id);
1993 upd_step_stack_frame_id = frame_id_eq (frame_id,
1994 tp->control.step_stack_frame_id);
1995
1996 /* We start replaying at the end of the branch trace. This corresponds
1997 to the current instruction. */
8d749320 1998 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1999 btrace_insn_end (replay, btinfo);
2000
31fd9caa
MM
2001 /* Skip gaps at the end of the trace. */
2002 while (btrace_insn_get (replay) == NULL)
2003 {
2004 unsigned int steps;
2005
2006 steps = btrace_insn_prev (replay, 1);
2007 if (steps == 0)
2008 error (_("No trace."));
2009 }
2010
52834460
MM
2011 /* We're not replaying, yet. */
2012 gdb_assert (btinfo->replay == NULL);
2013 btinfo->replay = replay;
2014
2015 /* Make sure we're not using any stale registers. */
2016 registers_changed_ptid (tp->ptid);
2017
2018 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2019 frame = get_thread_current_frame (tp);
52834460
MM
2020 frame_id = get_frame_id (frame);
2021
2022 /* Replace stepping related frames where necessary. */
2023 if (upd_step_frame_id)
2024 tp->control.step_frame_id = frame_id;
2025 if (upd_step_stack_frame_id)
2026 tp->control.step_stack_frame_id = frame_id;
2027 }
492d29ea 2028 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2029 {
2030 xfree (btinfo->replay);
2031 btinfo->replay = NULL;
2032
2033 registers_changed_ptid (tp->ptid);
2034
2035 throw_exception (except);
2036 }
492d29ea 2037 END_CATCH
52834460
MM
2038
2039 return replay;
2040}
2041
2042/* Stop replaying a thread. */
2043
2044static void
2045record_btrace_stop_replaying (struct thread_info *tp)
2046{
2047 struct btrace_thread_info *btinfo;
2048
2049 btinfo = &tp->btrace;
2050
2051 xfree (btinfo->replay);
2052 btinfo->replay = NULL;
2053
2054 /* Make sure we're not leaving any stale registers. */
2055 registers_changed_ptid (tp->ptid);
2056}
2057
e3cfc1c7
MM
2058/* Stop replaying TP if it is at the end of its execution history. */
2059
2060static void
2061record_btrace_stop_replaying_at_end (struct thread_info *tp)
2062{
2063 struct btrace_insn_iterator *replay, end;
2064 struct btrace_thread_info *btinfo;
2065
2066 btinfo = &tp->btrace;
2067 replay = btinfo->replay;
2068
2069 if (replay == NULL)
2070 return;
2071
2072 btrace_insn_end (&end, btinfo);
2073
2074 if (btrace_insn_cmp (replay, &end) == 0)
2075 record_btrace_stop_replaying (tp);
2076}
2077
b2f4cfde
MM
2078/* The to_resume method of target record-btrace. */
2079
2080static void
2081record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2082 enum gdb_signal signal)
2083{
0ca912df 2084 struct thread_info *tp;
d2939ba2 2085 enum btrace_thread_flag flag, cflag;
52834460 2086
987e68b1
MM
2087 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2088 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2089 step ? "step" : "cont");
52834460 2090
0ca912df
MM
2091 /* Store the execution direction of the last resume.
2092
2093 If there is more than one to_resume call, we have to rely on infrun
2094 to not change the execution direction in-between. */
70ad5bff
MM
2095 record_btrace_resume_exec_dir = execution_direction;
2096
0ca912df 2097 /* As long as we're not replaying, just forward the request.
52834460 2098
0ca912df
MM
2099 For non-stop targets this means that no thread is replaying. In order to
2100 make progress, we may need to explicitly move replaying threads to the end
2101 of their execution history. */
a52eab48
MM
2102 if ((execution_direction != EXEC_REVERSE)
2103 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2104 {
e75fdfca 2105 ops = ops->beneath;
04c4fe8c
MM
2106 ops->to_resume (ops, ptid, step, signal);
2107 return;
b2f4cfde
MM
2108 }
2109
52834460 2110 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2111 if (execution_direction == EXEC_REVERSE)
2112 {
2113 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2114 cflag = BTHR_RCONT;
2115 }
52834460 2116 else
d2939ba2
MM
2117 {
2118 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2119 cflag = BTHR_CONT;
2120 }
52834460 2121
52834460 2122 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2123 record_btrace_wait below.
2124
2125 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2126 if (!target_is_non_stop_p ())
2127 {
2128 gdb_assert (ptid_match (inferior_ptid, ptid));
2129
2130 ALL_NON_EXITED_THREADS (tp)
2131 if (ptid_match (tp->ptid, ptid))
2132 {
2133 if (ptid_match (tp->ptid, inferior_ptid))
2134 record_btrace_resume_thread (tp, flag);
2135 else
2136 record_btrace_resume_thread (tp, cflag);
2137 }
2138 }
2139 else
2140 {
2141 ALL_NON_EXITED_THREADS (tp)
2142 if (ptid_match (tp->ptid, ptid))
2143 record_btrace_resume_thread (tp, flag);
2144 }
70ad5bff
MM
2145
2146 /* Async support. */
2147 if (target_can_async_p ())
2148 {
6a3753b3 2149 target_async (1);
70ad5bff
MM
2150 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2151 }
52834460
MM
2152}
2153
85ad3aaf
PA
2154/* The to_commit_resume method of target record-btrace. */
2155
2156static void
2157record_btrace_commit_resume (struct target_ops *ops)
2158{
2159 if ((execution_direction != EXEC_REVERSE)
2160 && !record_btrace_is_replaying (ops, minus_one_ptid))
2161 ops->beneath->to_commit_resume (ops->beneath);
2162}
2163
987e68b1
MM
2164/* Cancel resuming TP. */
2165
2166static void
2167record_btrace_cancel_resume (struct thread_info *tp)
2168{
2169 enum btrace_thread_flag flags;
2170
2171 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2172 if (flags == 0)
2173 return;
2174
43792cf0
PA
2175 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2176 print_thread_id (tp),
987e68b1
MM
2177 target_pid_to_str (tp->ptid), flags,
2178 btrace_thread_flag_to_str (flags));
2179
2180 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2181 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2182}
2183
2184/* Return a target_waitstatus indicating that we ran out of history. */
2185
2186static struct target_waitstatus
2187btrace_step_no_history (void)
2188{
2189 struct target_waitstatus status;
2190
2191 status.kind = TARGET_WAITKIND_NO_HISTORY;
2192
2193 return status;
2194}
2195
2196/* Return a target_waitstatus indicating that a step finished. */
2197
2198static struct target_waitstatus
2199btrace_step_stopped (void)
2200{
2201 struct target_waitstatus status;
2202
2203 status.kind = TARGET_WAITKIND_STOPPED;
2204 status.value.sig = GDB_SIGNAL_TRAP;
2205
2206 return status;
2207}
2208
6e4879f0
MM
2209/* Return a target_waitstatus indicating that a thread was stopped as
2210 requested. */
2211
2212static struct target_waitstatus
2213btrace_step_stopped_on_request (void)
2214{
2215 struct target_waitstatus status;
2216
2217 status.kind = TARGET_WAITKIND_STOPPED;
2218 status.value.sig = GDB_SIGNAL_0;
2219
2220 return status;
2221}
2222
d825d248
MM
2223/* Return a target_waitstatus indicating a spurious stop. */
2224
2225static struct target_waitstatus
2226btrace_step_spurious (void)
2227{
2228 struct target_waitstatus status;
2229
2230 status.kind = TARGET_WAITKIND_SPURIOUS;
2231
2232 return status;
2233}
2234
e3cfc1c7
MM
2235/* Return a target_waitstatus indicating that the thread was not resumed. */
2236
2237static struct target_waitstatus
2238btrace_step_no_resumed (void)
2239{
2240 struct target_waitstatus status;
2241
2242 status.kind = TARGET_WAITKIND_NO_RESUMED;
2243
2244 return status;
2245}
2246
2247/* Return a target_waitstatus indicating that we should wait again. */
2248
2249static struct target_waitstatus
2250btrace_step_again (void)
2251{
2252 struct target_waitstatus status;
2253
2254 status.kind = TARGET_WAITKIND_IGNORE;
2255
2256 return status;
2257}
2258
52834460
MM
2259/* Clear the record histories. */
2260
2261static void
2262record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2263{
2264 xfree (btinfo->insn_history);
2265 xfree (btinfo->call_history);
2266
2267 btinfo->insn_history = NULL;
2268 btinfo->call_history = NULL;
2269}
2270
3c615f99
MM
2271/* Check whether TP's current replay position is at a breakpoint. */
2272
2273static int
2274record_btrace_replay_at_breakpoint (struct thread_info *tp)
2275{
2276 struct btrace_insn_iterator *replay;
2277 struct btrace_thread_info *btinfo;
2278 const struct btrace_insn *insn;
2279 struct inferior *inf;
2280
2281 btinfo = &tp->btrace;
2282 replay = btinfo->replay;
2283
2284 if (replay == NULL)
2285 return 0;
2286
2287 insn = btrace_insn_get (replay);
2288 if (insn == NULL)
2289 return 0;
2290
2291 inf = find_inferior_ptid (tp->ptid);
2292 if (inf == NULL)
2293 return 0;
2294
2295 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2296 &btinfo->stop_reason);
2297}
2298
d825d248 2299/* Step one instruction in forward direction. */
52834460
MM
2300
2301static struct target_waitstatus
d825d248 2302record_btrace_single_step_forward (struct thread_info *tp)
52834460 2303{
b61ce85c 2304 struct btrace_insn_iterator *replay, end, start;
52834460 2305 struct btrace_thread_info *btinfo;
52834460 2306
d825d248
MM
2307 btinfo = &tp->btrace;
2308 replay = btinfo->replay;
2309
2310 /* We're done if we're not replaying. */
2311 if (replay == NULL)
2312 return btrace_step_no_history ();
2313
011c71b6
MM
2314 /* Check if we're stepping a breakpoint. */
2315 if (record_btrace_replay_at_breakpoint (tp))
2316 return btrace_step_stopped ();
2317
b61ce85c
MM
2318 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2319 jump back to the instruction at which we started. */
2320 start = *replay;
d825d248
MM
2321 do
2322 {
2323 unsigned int steps;
2324
e3cfc1c7
MM
2325 /* We will bail out here if we continue stepping after reaching the end
2326 of the execution history. */
d825d248
MM
2327 steps = btrace_insn_next (replay, 1);
2328 if (steps == 0)
b61ce85c
MM
2329 {
2330 *replay = start;
2331 return btrace_step_no_history ();
2332 }
d825d248
MM
2333 }
2334 while (btrace_insn_get (replay) == NULL);
2335
2336 /* Determine the end of the instruction trace. */
2337 btrace_insn_end (&end, btinfo);
2338
e3cfc1c7
MM
2339 /* The execution trace contains (and ends with) the current instruction.
2340 This instruction has not been executed, yet, so the trace really ends
2341 one instruction earlier. */
d825d248 2342 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2343 return btrace_step_no_history ();
d825d248
MM
2344
2345 return btrace_step_spurious ();
2346}
2347
2348/* Step one instruction in backward direction. */
2349
2350static struct target_waitstatus
2351record_btrace_single_step_backward (struct thread_info *tp)
2352{
b61ce85c 2353 struct btrace_insn_iterator *replay, start;
d825d248 2354 struct btrace_thread_info *btinfo;
e59fa00f 2355
52834460
MM
2356 btinfo = &tp->btrace;
2357 replay = btinfo->replay;
2358
d825d248
MM
2359 /* Start replaying if we're not already doing so. */
2360 if (replay == NULL)
2361 replay = record_btrace_start_replaying (tp);
2362
2363 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2364 Skip gaps during replay. If we end up at a gap (at the beginning of
2365 the trace), jump back to the instruction at which we started. */
2366 start = *replay;
d825d248
MM
2367 do
2368 {
2369 unsigned int steps;
2370
2371 steps = btrace_insn_prev (replay, 1);
2372 if (steps == 0)
b61ce85c
MM
2373 {
2374 *replay = start;
2375 return btrace_step_no_history ();
2376 }
d825d248
MM
2377 }
2378 while (btrace_insn_get (replay) == NULL);
2379
011c71b6
MM
2380 /* Check if we're stepping a breakpoint.
2381
2382 For reverse-stepping, this check is after the step. There is logic in
2383 infrun.c that handles reverse-stepping separately. See, for example,
2384 proceed and adjust_pc_after_break.
2385
2386 This code assumes that for reverse-stepping, PC points to the last
2387 de-executed instruction, whereas for forward-stepping PC points to the
2388 next to-be-executed instruction. */
2389 if (record_btrace_replay_at_breakpoint (tp))
2390 return btrace_step_stopped ();
2391
d825d248
MM
2392 return btrace_step_spurious ();
2393}
2394
2395/* Step a single thread. */
2396
2397static struct target_waitstatus
2398record_btrace_step_thread (struct thread_info *tp)
2399{
2400 struct btrace_thread_info *btinfo;
2401 struct target_waitstatus status;
2402 enum btrace_thread_flag flags;
2403
2404 btinfo = &tp->btrace;
2405
6e4879f0
MM
2406 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2407 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2408
43792cf0 2409 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2410 target_pid_to_str (tp->ptid), flags,
2411 btrace_thread_flag_to_str (flags));
52834460 2412
6e4879f0
MM
2413 /* We can't step without an execution history. */
2414 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2415 return btrace_step_no_history ();
2416
52834460
MM
2417 switch (flags)
2418 {
2419 default:
2420 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2421
6e4879f0
MM
2422 case BTHR_STOP:
2423 return btrace_step_stopped_on_request ();
2424
52834460 2425 case BTHR_STEP:
d825d248
MM
2426 status = record_btrace_single_step_forward (tp);
2427 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2428 break;
52834460
MM
2429
2430 return btrace_step_stopped ();
2431
2432 case BTHR_RSTEP:
d825d248
MM
2433 status = record_btrace_single_step_backward (tp);
2434 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2435 break;
52834460
MM
2436
2437 return btrace_step_stopped ();
2438
2439 case BTHR_CONT:
e3cfc1c7
MM
2440 status = record_btrace_single_step_forward (tp);
2441 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2442 break;
52834460 2443
e3cfc1c7
MM
2444 btinfo->flags |= flags;
2445 return btrace_step_again ();
52834460
MM
2446
2447 case BTHR_RCONT:
e3cfc1c7
MM
2448 status = record_btrace_single_step_backward (tp);
2449 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2450 break;
52834460 2451
e3cfc1c7
MM
2452 btinfo->flags |= flags;
2453 return btrace_step_again ();
2454 }
d825d248 2455
e3cfc1c7
MM
2456 /* We keep threads moving at the end of their execution history. The to_wait
2457 method will stop the thread for whom the event is reported. */
2458 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2459 btinfo->flags |= flags;
52834460 2460
e3cfc1c7 2461 return status;
b2f4cfde
MM
2462}
2463
e3cfc1c7
MM
2464/* A vector of threads. */
2465
2466typedef struct thread_info * tp_t;
2467DEF_VEC_P (tp_t);
2468
a6b5be76
MM
2469/* Announce further events if necessary. */
2470
2471static void
2472record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2473 const VEC (tp_t) *no_history)
2474{
2475 int more_moving, more_no_history;
2476
2477 more_moving = !VEC_empty (tp_t, moving);
2478 more_no_history = !VEC_empty (tp_t, no_history);
2479
2480 if (!more_moving && !more_no_history)
2481 return;
2482
2483 if (more_moving)
2484 DEBUG ("movers pending");
2485
2486 if (more_no_history)
2487 DEBUG ("no-history pending");
2488
2489 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2490}
2491
b2f4cfde
MM
2492/* The to_wait method of target record-btrace. */
2493
2494static ptid_t
2495record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2496 struct target_waitstatus *status, int options)
2497{
e3cfc1c7
MM
2498 VEC (tp_t) *moving, *no_history;
2499 struct thread_info *tp, *eventing;
2500 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2501
2502 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2503
b2f4cfde 2504 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2505 if ((execution_direction != EXEC_REVERSE)
2506 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2507 {
e75fdfca
TT
2508 ops = ops->beneath;
2509 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2510 }
2511
e3cfc1c7
MM
2512 moving = NULL;
2513 no_history = NULL;
2514
2515 make_cleanup (VEC_cleanup (tp_t), &moving);
2516 make_cleanup (VEC_cleanup (tp_t), &no_history);
2517
2518 /* Keep a work list of moving threads. */
2519 ALL_NON_EXITED_THREADS (tp)
2520 if (ptid_match (tp->ptid, ptid)
2521 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2522 VEC_safe_push (tp_t, moving, tp);
2523
2524 if (VEC_empty (tp_t, moving))
52834460 2525 {
e3cfc1c7 2526 *status = btrace_step_no_resumed ();
52834460 2527
e3cfc1c7
MM
2528 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2529 target_waitstatus_to_string (status));
2530
2531 do_cleanups (cleanups);
2532 return null_ptid;
52834460
MM
2533 }
2534
e3cfc1c7
MM
2535 /* Step moving threads one by one, one step each, until either one thread
2536 reports an event or we run out of threads to step.
2537
2538 When stepping more than one thread, chances are that some threads reach
2539 the end of their execution history earlier than others. If we reported
2540 this immediately, all-stop on top of non-stop would stop all threads and
2541 resume the same threads next time. And we would report the same thread
2542 having reached the end of its execution history again.
2543
2544 In the worst case, this would starve the other threads. But even if other
2545 threads would be allowed to make progress, this would result in far too
2546 many intermediate stops.
2547
2548 We therefore delay the reporting of "no execution history" until we have
2549 nothing else to report. By this time, all threads should have moved to
2550 either the beginning or the end of their execution history. There will
2551 be a single user-visible stop. */
2552 eventing = NULL;
2553 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2554 {
2555 unsigned int ix;
2556
2557 ix = 0;
2558 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2559 {
2560 *status = record_btrace_step_thread (tp);
2561
2562 switch (status->kind)
2563 {
2564 case TARGET_WAITKIND_IGNORE:
2565 ix++;
2566 break;
2567
2568 case TARGET_WAITKIND_NO_HISTORY:
2569 VEC_safe_push (tp_t, no_history,
2570 VEC_ordered_remove (tp_t, moving, ix));
2571 break;
2572
2573 default:
2574 eventing = VEC_unordered_remove (tp_t, moving, ix);
2575 break;
2576 }
2577 }
2578 }
2579
2580 if (eventing == NULL)
2581 {
2582 /* We started with at least one moving thread. This thread must have
2583 either stopped or reached the end of its execution history.
2584
2585 In the former case, EVENTING must not be NULL.
2586 In the latter case, NO_HISTORY must not be empty. */
2587 gdb_assert (!VEC_empty (tp_t, no_history));
2588
2589 /* We kept threads moving at the end of their execution history. Stop
2590 EVENTING now that we are going to report its stop. */
2591 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2592 eventing->btrace.flags &= ~BTHR_MOVE;
2593
2594 *status = btrace_step_no_history ();
2595 }
2596
2597 gdb_assert (eventing != NULL);
2598
2599 /* We kept threads replaying at the end of their execution history. Stop
2600 replaying EVENTING now that we are going to report its stop. */
2601 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2602
2603 /* Stop all other threads. */
5953356c 2604 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2605 ALL_NON_EXITED_THREADS (tp)
2606 record_btrace_cancel_resume (tp);
52834460 2607
a6b5be76
MM
2608 /* In async mode, we need to announce further events. */
2609 if (target_is_async_p ())
2610 record_btrace_maybe_mark_async_event (moving, no_history);
2611
52834460 2612 /* Start record histories anew from the current position. */
e3cfc1c7 2613 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2614
2615 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2616 registers_changed_ptid (eventing->ptid);
2617
43792cf0
PA
2618 DEBUG ("wait ended by thread %s (%s): %s",
2619 print_thread_id (eventing),
e3cfc1c7
MM
2620 target_pid_to_str (eventing->ptid),
2621 target_waitstatus_to_string (status));
52834460 2622
e3cfc1c7
MM
2623 do_cleanups (cleanups);
2624 return eventing->ptid;
52834460
MM
2625}
2626
6e4879f0
MM
2627/* The to_stop method of target record-btrace. */
2628
2629static void
2630record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2631{
2632 DEBUG ("stop %s", target_pid_to_str (ptid));
2633
2634 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2635 if ((execution_direction != EXEC_REVERSE)
2636 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2637 {
2638 ops = ops->beneath;
2639 ops->to_stop (ops, ptid);
2640 }
2641 else
2642 {
2643 struct thread_info *tp;
2644
2645 ALL_NON_EXITED_THREADS (tp)
2646 if (ptid_match (tp->ptid, ptid))
2647 {
2648 tp->btrace.flags &= ~BTHR_MOVE;
2649 tp->btrace.flags |= BTHR_STOP;
2650 }
2651 }
2652 }
2653
52834460
MM
2654/* The to_can_execute_reverse method of target record-btrace. */
2655
2656static int
19db3e69 2657record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2658{
2659 return 1;
2660}
2661
9e8915c6 2662/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2663
9e8915c6
PA
2664static int
2665record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2666{
a52eab48 2667 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2668 {
2669 struct thread_info *tp = inferior_thread ();
2670
2671 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2672 }
2673
2674 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2675}
2676
2677/* The to_supports_stopped_by_sw_breakpoint method of target
2678 record-btrace. */
2679
2680static int
2681record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2682{
a52eab48 2683 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2684 return 1;
2685
2686 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2687}
2688
2689/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2690
2691static int
2692record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2693{
a52eab48 2694 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2695 {
2696 struct thread_info *tp = inferior_thread ();
2697
2698 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2699 }
2700
2701 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2702}
2703
2704/* The to_supports_stopped_by_hw_breakpoint method of target
2705 record-btrace. */
2706
2707static int
2708record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2709{
a52eab48 2710 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2711 return 1;
52834460 2712
9e8915c6 2713 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2714}
2715
e8032dde 2716/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2717
2718static void
e8032dde 2719record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2720{
e8032dde 2721 /* We don't add or remove threads during replay. */
a52eab48 2722 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2723 return;
2724
2725 /* Forward the request. */
e75fdfca 2726 ops = ops->beneath;
e8032dde 2727 ops->to_update_thread_list (ops);
e2887aa3
MM
2728}
2729
2730/* The to_thread_alive method of target record-btrace. */
2731
2732static int
2733record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2734{
2735 /* We don't add or remove threads during replay. */
a52eab48 2736 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2737 return find_thread_ptid (ptid) != NULL;
2738
2739 /* Forward the request. */
e75fdfca
TT
2740 ops = ops->beneath;
2741 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2742}
2743
066ce621
MM
2744/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2745 is stopped. */
2746
2747static void
2748record_btrace_set_replay (struct thread_info *tp,
2749 const struct btrace_insn_iterator *it)
2750{
2751 struct btrace_thread_info *btinfo;
2752
2753 btinfo = &tp->btrace;
2754
2755 if (it == NULL || it->function == NULL)
52834460 2756 record_btrace_stop_replaying (tp);
066ce621
MM
2757 else
2758 {
2759 if (btinfo->replay == NULL)
52834460 2760 record_btrace_start_replaying (tp);
066ce621
MM
2761 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2762 return;
2763
2764 *btinfo->replay = *it;
52834460 2765 registers_changed_ptid (tp->ptid);
066ce621
MM
2766 }
2767
52834460
MM
2768 /* Start anew from the new replay position. */
2769 record_btrace_clear_histories (btinfo);
485668e5
MM
2770
2771 stop_pc = regcache_read_pc (get_current_regcache ());
2772 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2773}
2774
2775/* The to_goto_record_begin method of target record-btrace. */
2776
2777static void
08475817 2778record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2779{
2780 struct thread_info *tp;
2781 struct btrace_insn_iterator begin;
2782
2783 tp = require_btrace_thread ();
2784
2785 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2786
2787 /* Skip gaps at the beginning of the trace. */
2788 while (btrace_insn_get (&begin) == NULL)
2789 {
2790 unsigned int steps;
2791
2792 steps = btrace_insn_next (&begin, 1);
2793 if (steps == 0)
2794 error (_("No trace."));
2795 }
2796
066ce621 2797 record_btrace_set_replay (tp, &begin);
066ce621
MM
2798}
2799
2800/* The to_goto_record_end method of target record-btrace. */
2801
2802static void
307a1b91 2803record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2804{
2805 struct thread_info *tp;
2806
2807 tp = require_btrace_thread ();
2808
2809 record_btrace_set_replay (tp, NULL);
066ce621
MM
2810}
2811
2812/* The to_goto_record method of target record-btrace. */
2813
2814static void
606183ac 2815record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2816{
2817 struct thread_info *tp;
2818 struct btrace_insn_iterator it;
2819 unsigned int number;
2820 int found;
2821
2822 number = insn;
2823
2824 /* Check for wrap-arounds. */
2825 if (number != insn)
2826 error (_("Instruction number out of range."));
2827
2828 tp = require_btrace_thread ();
2829
2830 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2831 if (found == 0)
2832 error (_("No such instruction."));
2833
2834 record_btrace_set_replay (tp, &it);
066ce621
MM
2835}
2836
797094dd
MM
2837/* The to_record_stop_replaying method of target record-btrace. */
2838
2839static void
2840record_btrace_stop_replaying_all (struct target_ops *self)
2841{
2842 struct thread_info *tp;
2843
2844 ALL_NON_EXITED_THREADS (tp)
2845 record_btrace_stop_replaying (tp);
2846}
2847
70ad5bff
MM
2848/* The to_execution_direction target method. */
2849
2850static enum exec_direction_kind
2851record_btrace_execution_direction (struct target_ops *self)
2852{
2853 return record_btrace_resume_exec_dir;
2854}
2855
aef92902
MM
2856/* The to_prepare_to_generate_core target method. */
2857
2858static void
2859record_btrace_prepare_to_generate_core (struct target_ops *self)
2860{
2861 record_btrace_generating_corefile = 1;
2862}
2863
2864/* The to_done_generating_core target method. */
2865
2866static void
2867record_btrace_done_generating_core (struct target_ops *self)
2868{
2869 record_btrace_generating_corefile = 0;
2870}
2871
afedecd3
MM
2872/* Initialize the record-btrace target ops. */
2873
2874static void
2875init_record_btrace_ops (void)
2876{
2877 struct target_ops *ops;
2878
2879 ops = &record_btrace_ops;
2880 ops->to_shortname = "record-btrace";
2881 ops->to_longname = "Branch tracing target";
2882 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2883 ops->to_open = record_btrace_open;
2884 ops->to_close = record_btrace_close;
b7d2e916 2885 ops->to_async = record_btrace_async;
afedecd3 2886 ops->to_detach = record_detach;
c0272db5 2887 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2888 ops->to_mourn_inferior = record_mourn_inferior;
2889 ops->to_kill = record_kill;
afedecd3
MM
2890 ops->to_stop_recording = record_btrace_stop_recording;
2891 ops->to_info_record = record_btrace_info;
2892 ops->to_insn_history = record_btrace_insn_history;
2893 ops->to_insn_history_from = record_btrace_insn_history_from;
2894 ops->to_insn_history_range = record_btrace_insn_history_range;
2895 ops->to_call_history = record_btrace_call_history;
2896 ops->to_call_history_from = record_btrace_call_history_from;
2897 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2898 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2899 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2900 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2901 ops->to_xfer_partial = record_btrace_xfer_partial;
2902 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2903 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2904 ops->to_fetch_registers = record_btrace_fetch_registers;
2905 ops->to_store_registers = record_btrace_store_registers;
2906 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2907 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2908 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2909 ops->to_resume = record_btrace_resume;
85ad3aaf 2910 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2911 ops->to_wait = record_btrace_wait;
6e4879f0 2912 ops->to_stop = record_btrace_stop;
e8032dde 2913 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2914 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2915 ops->to_goto_record_begin = record_btrace_goto_begin;
2916 ops->to_goto_record_end = record_btrace_goto_end;
2917 ops->to_goto_record = record_btrace_goto;
52834460 2918 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2919 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2920 ops->to_supports_stopped_by_sw_breakpoint
2921 = record_btrace_supports_stopped_by_sw_breakpoint;
2922 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2923 ops->to_supports_stopped_by_hw_breakpoint
2924 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2925 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2926 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2927 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2928 ops->to_stratum = record_stratum;
2929 ops->to_magic = OPS_MAGIC;
2930}
2931
f4abbc16
MM
2932/* Start recording in BTS format. */
2933
2934static void
2935cmd_record_btrace_bts_start (char *args, int from_tty)
2936{
f4abbc16
MM
2937 if (args != NULL && *args != 0)
2938 error (_("Invalid argument."));
2939
2940 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2941
492d29ea
PA
2942 TRY
2943 {
2944 execute_command ("target record-btrace", from_tty);
2945 }
2946 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2947 {
2948 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2949 throw_exception (exception);
2950 }
492d29ea 2951 END_CATCH
f4abbc16
MM
2952}
2953
bc504a31 2954/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2955
2956static void
b20a6524 2957cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2958{
2959 if (args != NULL && *args != 0)
2960 error (_("Invalid argument."));
2961
b20a6524 2962 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2963
492d29ea
PA
2964 TRY
2965 {
2966 execute_command ("target record-btrace", from_tty);
2967 }
2968 CATCH (exception, RETURN_MASK_ALL)
2969 {
2970 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2971 throw_exception (exception);
2972 }
2973 END_CATCH
afedecd3
MM
2974}
2975
b20a6524
MM
2976/* Alias for "target record". */
2977
2978static void
2979cmd_record_btrace_start (char *args, int from_tty)
2980{
2981 if (args != NULL && *args != 0)
2982 error (_("Invalid argument."));
2983
2984 record_btrace_conf.format = BTRACE_FORMAT_PT;
2985
2986 TRY
2987 {
2988 execute_command ("target record-btrace", from_tty);
2989 }
2990 CATCH (exception, RETURN_MASK_ALL)
2991 {
2992 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2993
2994 TRY
2995 {
2996 execute_command ("target record-btrace", from_tty);
2997 }
2998 CATCH (exception, RETURN_MASK_ALL)
2999 {
3000 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3001 throw_exception (exception);
3002 }
3003 END_CATCH
3004 }
3005 END_CATCH
3006}
3007
67b5c0c1
MM
3008/* The "set record btrace" command. */
3009
3010static void
3011cmd_set_record_btrace (char *args, int from_tty)
3012{
3013 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3014}
3015
3016/* The "show record btrace" command. */
3017
3018static void
3019cmd_show_record_btrace (char *args, int from_tty)
3020{
3021 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3022}
3023
3024/* The "show record btrace replay-memory-access" command. */
3025
3026static void
3027cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3028 struct cmd_list_element *c, const char *value)
3029{
3030 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3031 replay_memory_access);
3032}
3033
d33501a5
MM
3034/* The "set record btrace bts" command. */
3035
3036static void
3037cmd_set_record_btrace_bts (char *args, int from_tty)
3038{
3039 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3040 "by an appropriate subcommand.\n"));
d33501a5
MM
3041 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3042 all_commands, gdb_stdout);
3043}
3044
3045/* The "show record btrace bts" command. */
3046
3047static void
3048cmd_show_record_btrace_bts (char *args, int from_tty)
3049{
3050 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3051}
3052
b20a6524
MM
3053/* The "set record btrace pt" command. */
3054
3055static void
3056cmd_set_record_btrace_pt (char *args, int from_tty)
3057{
3058 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3059 "by an appropriate subcommand.\n"));
3060 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3061 all_commands, gdb_stdout);
3062}
3063
3064/* The "show record btrace pt" command. */
3065
3066static void
3067cmd_show_record_btrace_pt (char *args, int from_tty)
3068{
3069 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3070}
3071
3072/* The "record bts buffer-size" show value function. */
3073
3074static void
3075show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3076 struct cmd_list_element *c,
3077 const char *value)
3078{
3079 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3080 value);
3081}
3082
3083/* The "record pt buffer-size" show value function. */
3084
3085static void
3086show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3087 struct cmd_list_element *c,
3088 const char *value)
3089{
3090 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3091 value);
3092}
3093
afedecd3
MM
3094void _initialize_record_btrace (void);
3095
3096/* Initialize btrace commands. */
3097
3098void
3099_initialize_record_btrace (void)
3100{
f4abbc16
MM
3101 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3102 _("Start branch trace recording."), &record_btrace_cmdlist,
3103 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3104 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3105
f4abbc16
MM
3106 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3107 _("\
3108Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3109The processor stores a from/to record for each branch into a cyclic buffer.\n\
3110This format may not be available on all processors."),
3111 &record_btrace_cmdlist);
3112 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3113
b20a6524
MM
3114 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3115 _("\
bc504a31 3116Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3117This format may not be available on all processors."),
3118 &record_btrace_cmdlist);
3119 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3120
67b5c0c1
MM
3121 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3122 _("Set record options"), &set_record_btrace_cmdlist,
3123 "set record btrace ", 0, &set_record_cmdlist);
3124
3125 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3126 _("Show record options"), &show_record_btrace_cmdlist,
3127 "show record btrace ", 0, &show_record_cmdlist);
3128
3129 add_setshow_enum_cmd ("replay-memory-access", no_class,
3130 replay_memory_access_types, &replay_memory_access, _("\
3131Set what memory accesses are allowed during replay."), _("\
3132Show what memory accesses are allowed during replay."),
3133 _("Default is READ-ONLY.\n\n\
3134The btrace record target does not trace data.\n\
3135The memory therefore corresponds to the live target and not \
3136to the current replay position.\n\n\
3137When READ-ONLY, allow accesses to read-only memory during replay.\n\
3138When READ-WRITE, allow accesses to read-only and read-write memory during \
3139replay."),
3140 NULL, cmd_show_replay_memory_access,
3141 &set_record_btrace_cmdlist,
3142 &show_record_btrace_cmdlist);
3143
d33501a5
MM
3144 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3145 _("Set record btrace bts options"),
3146 &set_record_btrace_bts_cmdlist,
3147 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3148
3149 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3150 _("Show record btrace bts options"),
3151 &show_record_btrace_bts_cmdlist,
3152 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3153
3154 add_setshow_uinteger_cmd ("buffer-size", no_class,
3155 &record_btrace_conf.bts.size,
3156 _("Set the record/replay bts buffer size."),
3157 _("Show the record/replay bts buffer size."), _("\
3158When starting recording request a trace buffer of this size. \
3159The actual buffer size may differ from the requested size. \
3160Use \"info record\" to see the actual buffer size.\n\n\
3161Bigger buffers allow longer recording but also take more time to process \
3162the recorded execution trace.\n\n\
b20a6524
MM
3163The trace buffer size may not be changed while recording."), NULL,
3164 show_record_bts_buffer_size_value,
d33501a5
MM
3165 &set_record_btrace_bts_cmdlist,
3166 &show_record_btrace_bts_cmdlist);
3167
b20a6524
MM
3168 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3169 _("Set record btrace pt options"),
3170 &set_record_btrace_pt_cmdlist,
3171 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3172
3173 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3174 _("Show record btrace pt options"),
3175 &show_record_btrace_pt_cmdlist,
3176 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3177
3178 add_setshow_uinteger_cmd ("buffer-size", no_class,
3179 &record_btrace_conf.pt.size,
3180 _("Set the record/replay pt buffer size."),
3181 _("Show the record/replay pt buffer size."), _("\
3182Bigger buffers allow longer recording but also take more time to process \
3183the recorded execution.\n\
3184The actual buffer size may differ from the requested size. Use \"info record\" \
3185to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3186 &set_record_btrace_pt_cmdlist,
3187 &show_record_btrace_pt_cmdlist);
3188
afedecd3
MM
3189 init_record_btrace_ops ();
3190 add_target (&record_btrace_ops);
0b722aec
MM
3191
3192 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3193 xcalloc, xfree);
d33501a5
MM
3194
3195 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3196 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3197}
This page took 0.539556 seconds and 4 git commands to generate.