Add record_start and record_stop functions.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
5de9129b 446 insns = btrace_insn_number (&insn);
31fd9caa 447
69090cee
TW
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
31fd9caa
MM
452
453 gaps = btinfo->ngaps;
23a7fe75 454 }
afedecd3 455
31fd9caa 456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
afedecd3
MM
463}
464
31fd9caa
MM
465/* Print a decode error. */
466
467static void
468btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470{
508352a9 471 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 472
112e8700 473 uiout->text (_("["));
508352a9
TW
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 476 {
112e8700
SM
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
31fd9caa 480 }
112e8700
SM
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
31fd9caa
MM
483}
484
afedecd3
MM
485/* Print an unsigned int. */
486
487static void
488ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489{
112e8700 490 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
491}
492
f94cc897
MM
493/* A range of source lines. */
494
495struct btrace_line_range
496{
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505};
506
507/* Construct a line range. */
508
509static struct btrace_line_range
510btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511{
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519}
520
521/* Add a line to a line range. */
522
523static struct btrace_line_range
524btrace_line_range_add (struct btrace_line_range range, int line)
525{
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538}
539
540/* Return non-zero if RANGE is empty, zero otherwise. */
541
542static int
543btrace_line_range_is_empty (struct btrace_line_range range)
544{
545 return range.end <= range.begin;
546}
547
548/* Return non-zero if LHS contains RHS, zero otherwise. */
549
550static int
551btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553{
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557}
558
559/* Find the line range associated with PC. */
560
561static struct btrace_line_range
562btrace_find_line_range (CORE_ADDR pc)
563{
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591}
592
593/* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602static void
603btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605{
8d297bbf 606 print_source_lines_flags psl_flags;
f94cc897
MM
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625}
626
afedecd3
MM
627/* Disassemble a section of the recorded instruction trace. */
628
629static void
23a7fe75 630btrace_insn_history (struct ui_out *uiout,
31fd9caa 631 const struct btrace_thread_info *btinfo,
23a7fe75
MM
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
afedecd3 634{
f94cc897 635 struct cleanup *cleanups, *ui_item_chain;
afedecd3 636 struct gdbarch *gdbarch;
23a7fe75 637 struct btrace_insn_iterator it;
f94cc897 638 struct btrace_line_range last_lines;
afedecd3 639
23a7fe75
MM
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
afedecd3 642
f94cc897
MM
643 flags |= DISASSEMBLY_SPECULATIVE;
644
afedecd3 645 gdbarch = target_gdbarch ();
f94cc897
MM
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
187808b0 648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
afedecd3 653
8b172ce7
PA
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
23a7fe75 656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 657 {
23a7fe75
MM
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
31fd9caa
MM
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
afedecd3 668
31fd9caa
MM
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
69090cee
TW
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
677 conf->format);
678 }
679 else
680 {
f94cc897 681 struct disasm_insn dinsn;
da8c46d2 682
f94cc897 683 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 684 {
f94cc897
MM
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
da8c46d2 704 }
da8c46d2 705
f94cc897
MM
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
31fd9caa 709
da8c46d2 710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 711 dinsn.is_speculative = 1;
da8c46d2 712
8b172ce7 713 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 714 }
afedecd3 715 }
f94cc897
MM
716
717 do_cleanups (cleanups);
afedecd3
MM
718}
719
720/* The to_insn_history method of target record-btrace. */
721
722static void
7a6c5609 723record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
724{
725 struct btrace_thread_info *btinfo;
23a7fe75
MM
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
afedecd3
MM
728 struct cleanup *uiout_cleanup;
729 struct ui_out *uiout;
23a7fe75 730 unsigned int context, covered;
afedecd3
MM
731
732 uiout = current_uiout;
733 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
734 "insn history");
afedecd3 735 context = abs (size);
afedecd3
MM
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
23a7fe75
MM
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
afedecd3 742 {
07bbe694 743 struct btrace_insn_iterator *replay;
afedecd3 744
23a7fe75 745 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 746
07bbe694
MM
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
afedecd3
MM
771 }
772 else
773 {
23a7fe75
MM
774 begin = history->begin;
775 end = history->end;
afedecd3 776
23a7fe75
MM
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 779
23a7fe75
MM
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
afedecd3
MM
790 }
791
23a7fe75 792 if (covered > 0)
31fd9caa 793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
afedecd3 801
23a7fe75 802 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
803 do_cleanups (uiout_cleanup);
804}
805
806/* The to_insn_history_range method of target record-btrace. */
807
808static void
4e99c6b7
TT
809record_btrace_insn_history_range (struct target_ops *self,
810 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
811{
812 struct btrace_thread_info *btinfo;
23a7fe75
MM
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
afedecd3
MM
815 struct cleanup *uiout_cleanup;
816 struct ui_out *uiout;
23a7fe75
MM
817 unsigned int low, high;
818 int found;
afedecd3
MM
819
820 uiout = current_uiout;
821 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
822 "insn history");
23a7fe75
MM
823 low = from;
824 high = to;
afedecd3 825
23a7fe75 826 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
827
828 /* Check for wrap-arounds. */
23a7fe75 829 if (low != from || high != to)
afedecd3
MM
830 error (_("Bad range."));
831
0688d04e 832 if (high < low)
afedecd3
MM
833 error (_("Bad range."));
834
23a7fe75 835 btinfo = require_btrace ();
afedecd3 836
23a7fe75
MM
837 found = btrace_find_insn_by_number (&begin, btinfo, low);
838 if (found == 0)
839 error (_("Range out of bounds."));
afedecd3 840
23a7fe75
MM
841 found = btrace_find_insn_by_number (&end, btinfo, high);
842 if (found == 0)
0688d04e
MM
843 {
844 /* Silently truncate the range. */
845 btrace_insn_end (&end, btinfo);
846 }
847 else
848 {
849 /* We want both begin and end to be inclusive. */
850 btrace_insn_next (&end, 1);
851 }
afedecd3 852
31fd9caa 853 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 854 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
855
856 do_cleanups (uiout_cleanup);
857}
858
859/* The to_insn_history_from method of target record-btrace. */
860
861static void
9abc3ff3
TT
862record_btrace_insn_history_from (struct target_ops *self,
863 ULONGEST from, int size, int flags)
afedecd3
MM
864{
865 ULONGEST begin, end, context;
866
867 context = abs (size);
0688d04e
MM
868 if (context == 0)
869 error (_("Bad record instruction-history-size."));
afedecd3
MM
870
871 if (size < 0)
872 {
873 end = from;
874
875 if (from < context)
876 begin = 0;
877 else
0688d04e 878 begin = from - context + 1;
afedecd3
MM
879 }
880 else
881 {
882 begin = from;
0688d04e 883 end = from + context - 1;
afedecd3
MM
884
885 /* Check for wrap-around. */
886 if (end < begin)
887 end = ULONGEST_MAX;
888 }
889
4e99c6b7 890 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
891}
892
893/* Print the instruction number range for a function call history line. */
894
895static void
23a7fe75
MM
896btrace_call_history_insn_range (struct ui_out *uiout,
897 const struct btrace_function *bfun)
afedecd3 898{
7acbe133
MM
899 unsigned int begin, end, size;
900
901 size = VEC_length (btrace_insn_s, bfun->insn);
902 gdb_assert (size > 0);
afedecd3 903
23a7fe75 904 begin = bfun->insn_offset;
7acbe133 905 end = begin + size - 1;
afedecd3 906
23a7fe75 907 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 908 uiout->text (",");
23a7fe75 909 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
910}
911
ce0dfbea
MM
912/* Compute the lowest and highest source line for the instructions in BFUN
913 and return them in PBEGIN and PEND.
914 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
915 result from inlining or macro expansion. */
916
917static void
918btrace_compute_src_line_range (const struct btrace_function *bfun,
919 int *pbegin, int *pend)
920{
921 struct btrace_insn *insn;
922 struct symtab *symtab;
923 struct symbol *sym;
924 unsigned int idx;
925 int begin, end;
926
927 begin = INT_MAX;
928 end = INT_MIN;
929
930 sym = bfun->sym;
931 if (sym == NULL)
932 goto out;
933
934 symtab = symbol_symtab (sym);
935
936 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
937 {
938 struct symtab_and_line sal;
939
940 sal = find_pc_line (insn->pc, 0);
941 if (sal.symtab != symtab || sal.line == 0)
942 continue;
943
325fac50
PA
944 begin = std::min (begin, sal.line);
945 end = std::max (end, sal.line);
ce0dfbea
MM
946 }
947
948 out:
949 *pbegin = begin;
950 *pend = end;
951}
952
afedecd3
MM
953/* Print the source line information for a function call history line. */
954
955static void
23a7fe75
MM
956btrace_call_history_src_line (struct ui_out *uiout,
957 const struct btrace_function *bfun)
afedecd3
MM
958{
959 struct symbol *sym;
23a7fe75 960 int begin, end;
afedecd3
MM
961
962 sym = bfun->sym;
963 if (sym == NULL)
964 return;
965
112e8700 966 uiout->field_string ("file",
08be3fe3 967 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 968
ce0dfbea 969 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 970 if (end < begin)
afedecd3
MM
971 return;
972
112e8700
SM
973 uiout->text (":");
974 uiout->field_int ("min line", begin);
afedecd3 975
23a7fe75 976 if (end == begin)
afedecd3
MM
977 return;
978
112e8700
SM
979 uiout->text (",");
980 uiout->field_int ("max line", end);
afedecd3
MM
981}
982
0b722aec
MM
983/* Get the name of a branch trace function. */
984
985static const char *
986btrace_get_bfun_name (const struct btrace_function *bfun)
987{
988 struct minimal_symbol *msym;
989 struct symbol *sym;
990
991 if (bfun == NULL)
992 return "??";
993
994 msym = bfun->msym;
995 sym = bfun->sym;
996
997 if (sym != NULL)
998 return SYMBOL_PRINT_NAME (sym);
999 else if (msym != NULL)
efd66ac6 1000 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1001 else
1002 return "??";
1003}
1004
afedecd3
MM
1005/* Disassemble a section of the recorded function trace. */
1006
1007static void
23a7fe75 1008btrace_call_history (struct ui_out *uiout,
8710b709 1009 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1010 const struct btrace_call_iterator *begin,
1011 const struct btrace_call_iterator *end,
8d297bbf 1012 int int_flags)
afedecd3 1013{
23a7fe75 1014 struct btrace_call_iterator it;
8d297bbf 1015 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1016
8d297bbf 1017 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1018 btrace_call_number (end));
afedecd3 1019
23a7fe75 1020 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1021 {
23a7fe75
MM
1022 const struct btrace_function *bfun;
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 bfun = btrace_call_get (&it);
23a7fe75 1027 sym = bfun->sym;
0b722aec 1028 msym = bfun->msym;
23a7fe75 1029
afedecd3 1030 /* Print the function index. */
23a7fe75 1031 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1032 uiout->text ("\t");
afedecd3 1033
31fd9caa
MM
1034 /* Indicate gaps in the trace. */
1035 if (bfun->errcode != 0)
1036 {
1037 const struct btrace_config *conf;
1038
1039 conf = btrace_conf (btinfo);
1040
1041 /* We have trace so we must have a configuration. */
1042 gdb_assert (conf != NULL);
1043
1044 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1045
1046 continue;
1047 }
1048
8710b709
MM
1049 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1050 {
1051 int level = bfun->level + btinfo->level, i;
1052
1053 for (i = 0; i < level; ++i)
112e8700 1054 uiout->text (" ");
8710b709
MM
1055 }
1056
1057 if (sym != NULL)
112e8700 1058 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1059 else if (msym != NULL)
112e8700
SM
1060 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1061 else if (!uiout->is_mi_like_p ())
1062 uiout->field_string ("function", "??");
8710b709 1063
1e038f67 1064 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1065 {
112e8700 1066 uiout->text (_("\tinst "));
23a7fe75 1067 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1068 }
1069
1e038f67 1070 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1071 {
112e8700 1072 uiout->text (_("\tat "));
23a7fe75 1073 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1074 }
1075
112e8700 1076 uiout->text ("\n");
afedecd3
MM
1077 }
1078}
1079
1080/* The to_call_history method of target record-btrace. */
1081
1082static void
8d297bbf 1083record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1084{
1085 struct btrace_thread_info *btinfo;
23a7fe75
MM
1086 struct btrace_call_history *history;
1087 struct btrace_call_iterator begin, end;
afedecd3
MM
1088 struct cleanup *uiout_cleanup;
1089 struct ui_out *uiout;
23a7fe75 1090 unsigned int context, covered;
8d297bbf 1091 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1092
1093 uiout = current_uiout;
1094 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1095 "insn history");
afedecd3 1096 context = abs (size);
afedecd3
MM
1097 if (context == 0)
1098 error (_("Bad record function-call-history-size."));
1099
23a7fe75
MM
1100 btinfo = require_btrace ();
1101 history = btinfo->call_history;
1102 if (history == NULL)
afedecd3 1103 {
07bbe694 1104 struct btrace_insn_iterator *replay;
afedecd3 1105
8d297bbf 1106 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1107
07bbe694
MM
1108 /* If we're replaying, we start at the replay position. Otherwise, we
1109 start at the tail of the trace. */
1110 replay = btinfo->replay;
1111 if (replay != NULL)
1112 {
1113 begin.function = replay->function;
1114 begin.btinfo = btinfo;
1115 }
1116 else
1117 btrace_call_end (&begin, btinfo);
1118
1119 /* We start from here and expand in the requested direction. Then we
1120 expand in the other direction, as well, to fill up any remaining
1121 context. */
1122 end = begin;
1123 if (size < 0)
1124 {
1125 /* We want the current position covered, as well. */
1126 covered = btrace_call_next (&end, 1);
1127 covered += btrace_call_prev (&begin, context - covered);
1128 covered += btrace_call_next (&end, context - covered);
1129 }
1130 else
1131 {
1132 covered = btrace_call_next (&end, context);
1133 covered += btrace_call_prev (&begin, context- covered);
1134 }
afedecd3
MM
1135 }
1136 else
1137 {
23a7fe75
MM
1138 begin = history->begin;
1139 end = history->end;
afedecd3 1140
8d297bbf 1141 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1142 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1143
23a7fe75
MM
1144 if (size < 0)
1145 {
1146 end = begin;
1147 covered = btrace_call_prev (&begin, context);
1148 }
1149 else
1150 {
1151 begin = end;
1152 covered = btrace_call_next (&end, context);
1153 }
afedecd3
MM
1154 }
1155
23a7fe75 1156 if (covered > 0)
8710b709 1157 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1158 else
1159 {
1160 if (size < 0)
1161 printf_unfiltered (_("At the start of the branch trace record.\n"));
1162 else
1163 printf_unfiltered (_("At the end of the branch trace record.\n"));
1164 }
afedecd3 1165
23a7fe75 1166 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1167 do_cleanups (uiout_cleanup);
1168}
1169
1170/* The to_call_history_range method of target record-btrace. */
1171
1172static void
f0d960ea 1173record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1174 ULONGEST from, ULONGEST to,
1175 int int_flags)
afedecd3
MM
1176{
1177 struct btrace_thread_info *btinfo;
23a7fe75
MM
1178 struct btrace_call_history *history;
1179 struct btrace_call_iterator begin, end;
afedecd3
MM
1180 struct cleanup *uiout_cleanup;
1181 struct ui_out *uiout;
23a7fe75
MM
1182 unsigned int low, high;
1183 int found;
8d297bbf 1184 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1185
1186 uiout = current_uiout;
1187 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1188 "func history");
23a7fe75
MM
1189 low = from;
1190 high = to;
afedecd3 1191
8d297bbf 1192 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1193
1194 /* Check for wrap-arounds. */
23a7fe75 1195 if (low != from || high != to)
afedecd3
MM
1196 error (_("Bad range."));
1197
0688d04e 1198 if (high < low)
afedecd3
MM
1199 error (_("Bad range."));
1200
23a7fe75 1201 btinfo = require_btrace ();
afedecd3 1202
23a7fe75
MM
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1204 if (found == 0)
1205 error (_("Range out of bounds."));
afedecd3 1206
23a7fe75
MM
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1208 if (found == 0)
0688d04e
MM
1209 {
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1212 }
1213 else
1214 {
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1217 }
afedecd3 1218
8710b709 1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1220 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1221
1222 do_cleanups (uiout_cleanup);
1223}
1224
1225/* The to_call_history_from method of target record-btrace. */
1226
1227static void
ec0aea04 1228record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1229 ULONGEST from, int size,
1230 int int_flags)
afedecd3
MM
1231{
1232 ULONGEST begin, end, context;
8d297bbf 1233 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1234
1235 context = abs (size);
0688d04e
MM
1236 if (context == 0)
1237 error (_("Bad record function-call-history-size."));
afedecd3
MM
1238
1239 if (size < 0)
1240 {
1241 end = from;
1242
1243 if (from < context)
1244 begin = 0;
1245 else
0688d04e 1246 begin = from - context + 1;
afedecd3
MM
1247 }
1248 else
1249 {
1250 begin = from;
0688d04e 1251 end = from + context - 1;
afedecd3
MM
1252
1253 /* Check for wrap-around. */
1254 if (end < begin)
1255 end = ULONGEST_MAX;
1256 }
1257
f0d960ea 1258 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1259}
1260
07bbe694
MM
1261/* The to_record_is_replaying method of target record-btrace. */
1262
1263static int
a52eab48 1264record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1265{
1266 struct thread_info *tp;
1267
034f788c 1268 ALL_NON_EXITED_THREADS (tp)
a52eab48 1269 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1270 return 1;
1271
1272 return 0;
1273}
1274
7ff27e9b
MM
1275/* The to_record_will_replay method of target record-btrace. */
1276
1277static int
1278record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1279{
1280 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1281}
1282
633785ff
MM
1283/* The to_xfer_partial method of target record-btrace. */
1284
9b409511 1285static enum target_xfer_status
633785ff
MM
1286record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1287 const char *annex, gdb_byte *readbuf,
1288 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1289 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1290{
1291 struct target_ops *t;
1292
1293 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1294 if (replay_memory_access == replay_memory_access_read_only
aef92902 1295 && !record_btrace_generating_corefile
4d10e986 1296 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1297 {
1298 switch (object)
1299 {
1300 case TARGET_OBJECT_MEMORY:
1301 {
1302 struct target_section *section;
1303
1304 /* We do not allow writing memory in general. */
1305 if (writebuf != NULL)
9b409511
YQ
1306 {
1307 *xfered_len = len;
bc113b4e 1308 return TARGET_XFER_UNAVAILABLE;
9b409511 1309 }
633785ff
MM
1310
1311 /* We allow reading readonly memory. */
1312 section = target_section_by_addr (ops, offset);
1313 if (section != NULL)
1314 {
1315 /* Check if the section we found is readonly. */
1316 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1317 section->the_bfd_section)
1318 & SEC_READONLY) != 0)
1319 {
1320 /* Truncate the request to fit into this section. */
325fac50 1321 len = std::min (len, section->endaddr - offset);
633785ff
MM
1322 break;
1323 }
1324 }
1325
9b409511 1326 *xfered_len = len;
bc113b4e 1327 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1328 }
1329 }
1330 }
1331
1332 /* Forward the request. */
e75fdfca
TT
1333 ops = ops->beneath;
1334 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1335 offset, len, xfered_len);
633785ff
MM
1336}
1337
1338/* The to_insert_breakpoint method of target record-btrace. */
1339
1340static int
1341record_btrace_insert_breakpoint (struct target_ops *ops,
1342 struct gdbarch *gdbarch,
1343 struct bp_target_info *bp_tgt)
1344{
67b5c0c1
MM
1345 const char *old;
1346 int ret;
633785ff
MM
1347
1348 /* Inserting breakpoints requires accessing memory. Allow it for the
1349 duration of this function. */
67b5c0c1
MM
1350 old = replay_memory_access;
1351 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1352
1353 ret = 0;
492d29ea
PA
1354 TRY
1355 {
1356 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1357 }
492d29ea
PA
1358 CATCH (except, RETURN_MASK_ALL)
1359 {
6c63c96a 1360 replay_memory_access = old;
492d29ea
PA
1361 throw_exception (except);
1362 }
1363 END_CATCH
6c63c96a 1364 replay_memory_access = old;
633785ff
MM
1365
1366 return ret;
1367}
1368
1369/* The to_remove_breakpoint method of target record-btrace. */
1370
1371static int
1372record_btrace_remove_breakpoint (struct target_ops *ops,
1373 struct gdbarch *gdbarch,
73971819
PA
1374 struct bp_target_info *bp_tgt,
1375 enum remove_bp_reason reason)
633785ff 1376{
67b5c0c1
MM
1377 const char *old;
1378 int ret;
633785ff
MM
1379
1380 /* Removing breakpoints requires accessing memory. Allow it for the
1381 duration of this function. */
67b5c0c1
MM
1382 old = replay_memory_access;
1383 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1384
1385 ret = 0;
492d29ea
PA
1386 TRY
1387 {
73971819
PA
1388 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1389 reason);
492d29ea 1390 }
492d29ea
PA
1391 CATCH (except, RETURN_MASK_ALL)
1392 {
6c63c96a 1393 replay_memory_access = old;
492d29ea
PA
1394 throw_exception (except);
1395 }
1396 END_CATCH
6c63c96a 1397 replay_memory_access = old;
633785ff
MM
1398
1399 return ret;
1400}
1401
1f3ef581
MM
1402/* The to_fetch_registers method of target record-btrace. */
1403
1404static void
1405record_btrace_fetch_registers (struct target_ops *ops,
1406 struct regcache *regcache, int regno)
1407{
1408 struct btrace_insn_iterator *replay;
1409 struct thread_info *tp;
1410
1411 tp = find_thread_ptid (inferior_ptid);
1412 gdb_assert (tp != NULL);
1413
1414 replay = tp->btrace.replay;
aef92902 1415 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1416 {
1417 const struct btrace_insn *insn;
1418 struct gdbarch *gdbarch;
1419 int pcreg;
1420
1421 gdbarch = get_regcache_arch (regcache);
1422 pcreg = gdbarch_pc_regnum (gdbarch);
1423 if (pcreg < 0)
1424 return;
1425
1426 /* We can only provide the PC register. */
1427 if (regno >= 0 && regno != pcreg)
1428 return;
1429
1430 insn = btrace_insn_get (replay);
1431 gdb_assert (insn != NULL);
1432
1433 regcache_raw_supply (regcache, regno, &insn->pc);
1434 }
1435 else
1436 {
e75fdfca 1437 struct target_ops *t = ops->beneath;
1f3ef581 1438
e75fdfca 1439 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1440 }
1441}
1442
1443/* The to_store_registers method of target record-btrace. */
1444
1445static void
1446record_btrace_store_registers (struct target_ops *ops,
1447 struct regcache *regcache, int regno)
1448{
1449 struct target_ops *t;
1450
a52eab48 1451 if (!record_btrace_generating_corefile
4d10e986
MM
1452 && record_btrace_is_replaying (ops, inferior_ptid))
1453 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1454
1455 gdb_assert (may_write_registers != 0);
1456
e75fdfca
TT
1457 t = ops->beneath;
1458 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1459}
1460
1461/* The to_prepare_to_store method of target record-btrace. */
1462
1463static void
1464record_btrace_prepare_to_store (struct target_ops *ops,
1465 struct regcache *regcache)
1466{
1467 struct target_ops *t;
1468
a52eab48 1469 if (!record_btrace_generating_corefile
4d10e986 1470 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1471 return;
1472
e75fdfca
TT
1473 t = ops->beneath;
1474 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1475}
1476
0b722aec
MM
1477/* The branch trace frame cache. */
1478
1479struct btrace_frame_cache
1480{
1481 /* The thread. */
1482 struct thread_info *tp;
1483
1484 /* The frame info. */
1485 struct frame_info *frame;
1486
1487 /* The branch trace function segment. */
1488 const struct btrace_function *bfun;
1489};
1490
1491/* A struct btrace_frame_cache hash table indexed by NEXT. */
1492
1493static htab_t bfcache;
1494
1495/* hash_f for htab_create_alloc of bfcache. */
1496
1497static hashval_t
1498bfcache_hash (const void *arg)
1499{
19ba03f4
SM
1500 const struct btrace_frame_cache *cache
1501 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1502
1503 return htab_hash_pointer (cache->frame);
1504}
1505
1506/* eq_f for htab_create_alloc of bfcache. */
1507
1508static int
1509bfcache_eq (const void *arg1, const void *arg2)
1510{
19ba03f4
SM
1511 const struct btrace_frame_cache *cache1
1512 = (const struct btrace_frame_cache *) arg1;
1513 const struct btrace_frame_cache *cache2
1514 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1515
1516 return cache1->frame == cache2->frame;
1517}
1518
1519/* Create a new btrace frame cache. */
1520
1521static struct btrace_frame_cache *
1522bfcache_new (struct frame_info *frame)
1523{
1524 struct btrace_frame_cache *cache;
1525 void **slot;
1526
1527 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1528 cache->frame = frame;
1529
1530 slot = htab_find_slot (bfcache, cache, INSERT);
1531 gdb_assert (*slot == NULL);
1532 *slot = cache;
1533
1534 return cache;
1535}
1536
1537/* Extract the branch trace function from a branch trace frame. */
1538
1539static const struct btrace_function *
1540btrace_get_frame_function (struct frame_info *frame)
1541{
1542 const struct btrace_frame_cache *cache;
1543 const struct btrace_function *bfun;
1544 struct btrace_frame_cache pattern;
1545 void **slot;
1546
1547 pattern.frame = frame;
1548
1549 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1550 if (slot == NULL)
1551 return NULL;
1552
19ba03f4 1553 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1554 return cache->bfun;
1555}
1556
cecac1ab
MM
1557/* Implement stop_reason method for record_btrace_frame_unwind. */
1558
1559static enum unwind_stop_reason
1560record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1561 void **this_cache)
1562{
0b722aec
MM
1563 const struct btrace_frame_cache *cache;
1564 const struct btrace_function *bfun;
1565
19ba03f4 1566 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1567 bfun = cache->bfun;
1568 gdb_assert (bfun != NULL);
1569
1570 if (bfun->up == NULL)
1571 return UNWIND_UNAVAILABLE;
1572
1573 return UNWIND_NO_REASON;
cecac1ab
MM
1574}
1575
1576/* Implement this_id method for record_btrace_frame_unwind. */
1577
1578static void
1579record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1580 struct frame_id *this_id)
1581{
0b722aec
MM
1582 const struct btrace_frame_cache *cache;
1583 const struct btrace_function *bfun;
1584 CORE_ADDR code, special;
1585
19ba03f4 1586 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1587
1588 bfun = cache->bfun;
1589 gdb_assert (bfun != NULL);
1590
1591 while (bfun->segment.prev != NULL)
1592 bfun = bfun->segment.prev;
1593
1594 code = get_frame_func (this_frame);
1595 special = bfun->number;
1596
1597 *this_id = frame_id_build_unavailable_stack_special (code, special);
1598
1599 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1600 btrace_get_bfun_name (cache->bfun),
1601 core_addr_to_string_nz (this_id->code_addr),
1602 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1603}
1604
1605/* Implement prev_register method for record_btrace_frame_unwind. */
1606
1607static struct value *
1608record_btrace_frame_prev_register (struct frame_info *this_frame,
1609 void **this_cache,
1610 int regnum)
1611{
0b722aec
MM
1612 const struct btrace_frame_cache *cache;
1613 const struct btrace_function *bfun, *caller;
1614 const struct btrace_insn *insn;
1615 struct gdbarch *gdbarch;
1616 CORE_ADDR pc;
1617 int pcreg;
1618
1619 gdbarch = get_frame_arch (this_frame);
1620 pcreg = gdbarch_pc_regnum (gdbarch);
1621 if (pcreg < 0 || regnum != pcreg)
1622 throw_error (NOT_AVAILABLE_ERROR,
1623 _("Registers are not available in btrace record history"));
1624
19ba03f4 1625 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1626 bfun = cache->bfun;
1627 gdb_assert (bfun != NULL);
1628
1629 caller = bfun->up;
1630 if (caller == NULL)
1631 throw_error (NOT_AVAILABLE_ERROR,
1632 _("No caller in btrace record history"));
1633
1634 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1635 {
1636 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1637 pc = insn->pc;
1638 }
1639 else
1640 {
1641 insn = VEC_last (btrace_insn_s, caller->insn);
1642 pc = insn->pc;
1643
1644 pc += gdb_insn_length (gdbarch, pc);
1645 }
1646
1647 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1648 btrace_get_bfun_name (bfun), bfun->level,
1649 core_addr_to_string_nz (pc));
1650
1651 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1652}
1653
1654/* Implement sniffer method for record_btrace_frame_unwind. */
1655
1656static int
1657record_btrace_frame_sniffer (const struct frame_unwind *self,
1658 struct frame_info *this_frame,
1659 void **this_cache)
1660{
0b722aec
MM
1661 const struct btrace_function *bfun;
1662 struct btrace_frame_cache *cache;
cecac1ab 1663 struct thread_info *tp;
0b722aec 1664 struct frame_info *next;
cecac1ab
MM
1665
1666 /* THIS_FRAME does not contain a reference to its thread. */
1667 tp = find_thread_ptid (inferior_ptid);
1668 gdb_assert (tp != NULL);
1669
0b722aec
MM
1670 bfun = NULL;
1671 next = get_next_frame (this_frame);
1672 if (next == NULL)
1673 {
1674 const struct btrace_insn_iterator *replay;
1675
1676 replay = tp->btrace.replay;
1677 if (replay != NULL)
1678 bfun = replay->function;
1679 }
1680 else
1681 {
1682 const struct btrace_function *callee;
1683
1684 callee = btrace_get_frame_function (next);
1685 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1686 bfun = callee->up;
1687 }
1688
1689 if (bfun == NULL)
1690 return 0;
1691
1692 DEBUG ("[frame] sniffed frame for %s on level %d",
1693 btrace_get_bfun_name (bfun), bfun->level);
1694
1695 /* This is our frame. Initialize the frame cache. */
1696 cache = bfcache_new (this_frame);
1697 cache->tp = tp;
1698 cache->bfun = bfun;
1699
1700 *this_cache = cache;
1701 return 1;
1702}
1703
1704/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1705
1706static int
1707record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1708 struct frame_info *this_frame,
1709 void **this_cache)
1710{
1711 const struct btrace_function *bfun, *callee;
1712 struct btrace_frame_cache *cache;
1713 struct frame_info *next;
1714
1715 next = get_next_frame (this_frame);
1716 if (next == NULL)
1717 return 0;
1718
1719 callee = btrace_get_frame_function (next);
1720 if (callee == NULL)
1721 return 0;
1722
1723 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1724 return 0;
1725
1726 bfun = callee->up;
1727 if (bfun == NULL)
1728 return 0;
1729
1730 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1731 btrace_get_bfun_name (bfun), bfun->level);
1732
1733 /* This is our frame. Initialize the frame cache. */
1734 cache = bfcache_new (this_frame);
1735 cache->tp = find_thread_ptid (inferior_ptid);
1736 cache->bfun = bfun;
1737
1738 *this_cache = cache;
1739 return 1;
1740}
1741
1742static void
1743record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1744{
1745 struct btrace_frame_cache *cache;
1746 void **slot;
1747
19ba03f4 1748 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1749
1750 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1751 gdb_assert (slot != NULL);
1752
1753 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1754}
1755
1756/* btrace recording does not store previous memory content, neither the stack
1757 frames content. Any unwinding would return errorneous results as the stack
1758 contents no longer matches the changed PC value restored from history.
1759 Therefore this unwinder reports any possibly unwound registers as
1760 <unavailable>. */
1761
0b722aec 1762const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1763{
1764 NORMAL_FRAME,
1765 record_btrace_frame_unwind_stop_reason,
1766 record_btrace_frame_this_id,
1767 record_btrace_frame_prev_register,
1768 NULL,
0b722aec
MM
1769 record_btrace_frame_sniffer,
1770 record_btrace_frame_dealloc_cache
1771};
1772
1773const struct frame_unwind record_btrace_tailcall_frame_unwind =
1774{
1775 TAILCALL_FRAME,
1776 record_btrace_frame_unwind_stop_reason,
1777 record_btrace_frame_this_id,
1778 record_btrace_frame_prev_register,
1779 NULL,
1780 record_btrace_tailcall_frame_sniffer,
1781 record_btrace_frame_dealloc_cache
cecac1ab 1782};
b2f4cfde 1783
ac01945b
TT
1784/* Implement the to_get_unwinder method. */
1785
1786static const struct frame_unwind *
1787record_btrace_to_get_unwinder (struct target_ops *self)
1788{
1789 return &record_btrace_frame_unwind;
1790}
1791
1792/* Implement the to_get_tailcall_unwinder method. */
1793
1794static const struct frame_unwind *
1795record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1796{
1797 return &record_btrace_tailcall_frame_unwind;
1798}
1799
987e68b1
MM
1800/* Return a human-readable string for FLAG. */
1801
1802static const char *
1803btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1804{
1805 switch (flag)
1806 {
1807 case BTHR_STEP:
1808 return "step";
1809
1810 case BTHR_RSTEP:
1811 return "reverse-step";
1812
1813 case BTHR_CONT:
1814 return "cont";
1815
1816 case BTHR_RCONT:
1817 return "reverse-cont";
1818
1819 case BTHR_STOP:
1820 return "stop";
1821 }
1822
1823 return "<invalid>";
1824}
1825
52834460
MM
1826/* Indicate that TP should be resumed according to FLAG. */
1827
1828static void
1829record_btrace_resume_thread (struct thread_info *tp,
1830 enum btrace_thread_flag flag)
1831{
1832 struct btrace_thread_info *btinfo;
1833
43792cf0 1834 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1835 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1836
1837 btinfo = &tp->btrace;
1838
52834460
MM
1839 /* Fetch the latest branch trace. */
1840 btrace_fetch (tp);
1841
0ca912df
MM
1842 /* A resume request overwrites a preceding resume or stop request. */
1843 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1844 btinfo->flags |= flag;
1845}
1846
ec71cc2f
MM
1847/* Get the current frame for TP. */
1848
1849static struct frame_info *
1850get_thread_current_frame (struct thread_info *tp)
1851{
1852 struct frame_info *frame;
1853 ptid_t old_inferior_ptid;
1854 int executing;
1855
1856 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1857 old_inferior_ptid = inferior_ptid;
1858 inferior_ptid = tp->ptid;
1859
1860 /* Clear the executing flag to allow changes to the current frame.
1861 We are not actually running, yet. We just started a reverse execution
1862 command or a record goto command.
1863 For the latter, EXECUTING is false and this has no effect.
1864 For the former, EXECUTING is true and we're in to_wait, about to
1865 move the thread. Since we need to recompute the stack, we temporarily
1866 set EXECUTING to flase. */
1867 executing = is_executing (inferior_ptid);
1868 set_executing (inferior_ptid, 0);
1869
1870 frame = NULL;
1871 TRY
1872 {
1873 frame = get_current_frame ();
1874 }
1875 CATCH (except, RETURN_MASK_ALL)
1876 {
1877 /* Restore the previous execution state. */
1878 set_executing (inferior_ptid, executing);
1879
1880 /* Restore the previous inferior_ptid. */
1881 inferior_ptid = old_inferior_ptid;
1882
1883 throw_exception (except);
1884 }
1885 END_CATCH
1886
1887 /* Restore the previous execution state. */
1888 set_executing (inferior_ptid, executing);
1889
1890 /* Restore the previous inferior_ptid. */
1891 inferior_ptid = old_inferior_ptid;
1892
1893 return frame;
1894}
1895
52834460
MM
1896/* Start replaying a thread. */
1897
1898static struct btrace_insn_iterator *
1899record_btrace_start_replaying (struct thread_info *tp)
1900{
52834460
MM
1901 struct btrace_insn_iterator *replay;
1902 struct btrace_thread_info *btinfo;
52834460
MM
1903
1904 btinfo = &tp->btrace;
1905 replay = NULL;
1906
1907 /* We can't start replaying without trace. */
1908 if (btinfo->begin == NULL)
1909 return NULL;
1910
52834460
MM
1911 /* GDB stores the current frame_id when stepping in order to detects steps
1912 into subroutines.
1913 Since frames are computed differently when we're replaying, we need to
1914 recompute those stored frames and fix them up so we can still detect
1915 subroutines after we started replaying. */
492d29ea 1916 TRY
52834460
MM
1917 {
1918 struct frame_info *frame;
1919 struct frame_id frame_id;
1920 int upd_step_frame_id, upd_step_stack_frame_id;
1921
1922 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1923 frame = get_thread_current_frame (tp);
52834460
MM
1924 frame_id = get_frame_id (frame);
1925
1926 /* Check if we need to update any stepping-related frame id's. */
1927 upd_step_frame_id = frame_id_eq (frame_id,
1928 tp->control.step_frame_id);
1929 upd_step_stack_frame_id = frame_id_eq (frame_id,
1930 tp->control.step_stack_frame_id);
1931
1932 /* We start replaying at the end of the branch trace. This corresponds
1933 to the current instruction. */
8d749320 1934 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1935 btrace_insn_end (replay, btinfo);
1936
31fd9caa
MM
1937 /* Skip gaps at the end of the trace. */
1938 while (btrace_insn_get (replay) == NULL)
1939 {
1940 unsigned int steps;
1941
1942 steps = btrace_insn_prev (replay, 1);
1943 if (steps == 0)
1944 error (_("No trace."));
1945 }
1946
52834460
MM
1947 /* We're not replaying, yet. */
1948 gdb_assert (btinfo->replay == NULL);
1949 btinfo->replay = replay;
1950
1951 /* Make sure we're not using any stale registers. */
1952 registers_changed_ptid (tp->ptid);
1953
1954 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1955 frame = get_thread_current_frame (tp);
52834460
MM
1956 frame_id = get_frame_id (frame);
1957
1958 /* Replace stepping related frames where necessary. */
1959 if (upd_step_frame_id)
1960 tp->control.step_frame_id = frame_id;
1961 if (upd_step_stack_frame_id)
1962 tp->control.step_stack_frame_id = frame_id;
1963 }
492d29ea 1964 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1965 {
1966 xfree (btinfo->replay);
1967 btinfo->replay = NULL;
1968
1969 registers_changed_ptid (tp->ptid);
1970
1971 throw_exception (except);
1972 }
492d29ea 1973 END_CATCH
52834460
MM
1974
1975 return replay;
1976}
1977
1978/* Stop replaying a thread. */
1979
1980static void
1981record_btrace_stop_replaying (struct thread_info *tp)
1982{
1983 struct btrace_thread_info *btinfo;
1984
1985 btinfo = &tp->btrace;
1986
1987 xfree (btinfo->replay);
1988 btinfo->replay = NULL;
1989
1990 /* Make sure we're not leaving any stale registers. */
1991 registers_changed_ptid (tp->ptid);
1992}
1993
e3cfc1c7
MM
1994/* Stop replaying TP if it is at the end of its execution history. */
1995
1996static void
1997record_btrace_stop_replaying_at_end (struct thread_info *tp)
1998{
1999 struct btrace_insn_iterator *replay, end;
2000 struct btrace_thread_info *btinfo;
2001
2002 btinfo = &tp->btrace;
2003 replay = btinfo->replay;
2004
2005 if (replay == NULL)
2006 return;
2007
2008 btrace_insn_end (&end, btinfo);
2009
2010 if (btrace_insn_cmp (replay, &end) == 0)
2011 record_btrace_stop_replaying (tp);
2012}
2013
b2f4cfde
MM
2014/* The to_resume method of target record-btrace. */
2015
2016static void
2017record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2018 enum gdb_signal signal)
2019{
0ca912df 2020 struct thread_info *tp;
d2939ba2 2021 enum btrace_thread_flag flag, cflag;
52834460 2022
987e68b1
MM
2023 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2024 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2025 step ? "step" : "cont");
52834460 2026
0ca912df
MM
2027 /* Store the execution direction of the last resume.
2028
2029 If there is more than one to_resume call, we have to rely on infrun
2030 to not change the execution direction in-between. */
70ad5bff
MM
2031 record_btrace_resume_exec_dir = execution_direction;
2032
0ca912df 2033 /* As long as we're not replaying, just forward the request.
52834460 2034
0ca912df
MM
2035 For non-stop targets this means that no thread is replaying. In order to
2036 make progress, we may need to explicitly move replaying threads to the end
2037 of their execution history. */
a52eab48
MM
2038 if ((execution_direction != EXEC_REVERSE)
2039 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2040 {
e75fdfca 2041 ops = ops->beneath;
04c4fe8c
MM
2042 ops->to_resume (ops, ptid, step, signal);
2043 return;
b2f4cfde
MM
2044 }
2045
52834460 2046 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2047 if (execution_direction == EXEC_REVERSE)
2048 {
2049 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2050 cflag = BTHR_RCONT;
2051 }
52834460 2052 else
d2939ba2
MM
2053 {
2054 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2055 cflag = BTHR_CONT;
2056 }
52834460 2057
52834460 2058 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2059 record_btrace_wait below.
2060
2061 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2062 if (!target_is_non_stop_p ())
2063 {
2064 gdb_assert (ptid_match (inferior_ptid, ptid));
2065
2066 ALL_NON_EXITED_THREADS (tp)
2067 if (ptid_match (tp->ptid, ptid))
2068 {
2069 if (ptid_match (tp->ptid, inferior_ptid))
2070 record_btrace_resume_thread (tp, flag);
2071 else
2072 record_btrace_resume_thread (tp, cflag);
2073 }
2074 }
2075 else
2076 {
2077 ALL_NON_EXITED_THREADS (tp)
2078 if (ptid_match (tp->ptid, ptid))
2079 record_btrace_resume_thread (tp, flag);
2080 }
70ad5bff
MM
2081
2082 /* Async support. */
2083 if (target_can_async_p ())
2084 {
6a3753b3 2085 target_async (1);
70ad5bff
MM
2086 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2087 }
52834460
MM
2088}
2089
85ad3aaf
PA
2090/* The to_commit_resume method of target record-btrace. */
2091
2092static void
2093record_btrace_commit_resume (struct target_ops *ops)
2094{
2095 if ((execution_direction != EXEC_REVERSE)
2096 && !record_btrace_is_replaying (ops, minus_one_ptid))
2097 ops->beneath->to_commit_resume (ops->beneath);
2098}
2099
987e68b1
MM
2100/* Cancel resuming TP. */
2101
2102static void
2103record_btrace_cancel_resume (struct thread_info *tp)
2104{
2105 enum btrace_thread_flag flags;
2106
2107 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2108 if (flags == 0)
2109 return;
2110
43792cf0
PA
2111 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2112 print_thread_id (tp),
987e68b1
MM
2113 target_pid_to_str (tp->ptid), flags,
2114 btrace_thread_flag_to_str (flags));
2115
2116 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2117 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2118}
2119
2120/* Return a target_waitstatus indicating that we ran out of history. */
2121
2122static struct target_waitstatus
2123btrace_step_no_history (void)
2124{
2125 struct target_waitstatus status;
2126
2127 status.kind = TARGET_WAITKIND_NO_HISTORY;
2128
2129 return status;
2130}
2131
2132/* Return a target_waitstatus indicating that a step finished. */
2133
2134static struct target_waitstatus
2135btrace_step_stopped (void)
2136{
2137 struct target_waitstatus status;
2138
2139 status.kind = TARGET_WAITKIND_STOPPED;
2140 status.value.sig = GDB_SIGNAL_TRAP;
2141
2142 return status;
2143}
2144
6e4879f0
MM
2145/* Return a target_waitstatus indicating that a thread was stopped as
2146 requested. */
2147
2148static struct target_waitstatus
2149btrace_step_stopped_on_request (void)
2150{
2151 struct target_waitstatus status;
2152
2153 status.kind = TARGET_WAITKIND_STOPPED;
2154 status.value.sig = GDB_SIGNAL_0;
2155
2156 return status;
2157}
2158
d825d248
MM
2159/* Return a target_waitstatus indicating a spurious stop. */
2160
2161static struct target_waitstatus
2162btrace_step_spurious (void)
2163{
2164 struct target_waitstatus status;
2165
2166 status.kind = TARGET_WAITKIND_SPURIOUS;
2167
2168 return status;
2169}
2170
e3cfc1c7
MM
2171/* Return a target_waitstatus indicating that the thread was not resumed. */
2172
2173static struct target_waitstatus
2174btrace_step_no_resumed (void)
2175{
2176 struct target_waitstatus status;
2177
2178 status.kind = TARGET_WAITKIND_NO_RESUMED;
2179
2180 return status;
2181}
2182
2183/* Return a target_waitstatus indicating that we should wait again. */
2184
2185static struct target_waitstatus
2186btrace_step_again (void)
2187{
2188 struct target_waitstatus status;
2189
2190 status.kind = TARGET_WAITKIND_IGNORE;
2191
2192 return status;
2193}
2194
52834460
MM
2195/* Clear the record histories. */
2196
2197static void
2198record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2199{
2200 xfree (btinfo->insn_history);
2201 xfree (btinfo->call_history);
2202
2203 btinfo->insn_history = NULL;
2204 btinfo->call_history = NULL;
2205}
2206
3c615f99
MM
2207/* Check whether TP's current replay position is at a breakpoint. */
2208
2209static int
2210record_btrace_replay_at_breakpoint (struct thread_info *tp)
2211{
2212 struct btrace_insn_iterator *replay;
2213 struct btrace_thread_info *btinfo;
2214 const struct btrace_insn *insn;
2215 struct inferior *inf;
2216
2217 btinfo = &tp->btrace;
2218 replay = btinfo->replay;
2219
2220 if (replay == NULL)
2221 return 0;
2222
2223 insn = btrace_insn_get (replay);
2224 if (insn == NULL)
2225 return 0;
2226
2227 inf = find_inferior_ptid (tp->ptid);
2228 if (inf == NULL)
2229 return 0;
2230
2231 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2232 &btinfo->stop_reason);
2233}
2234
d825d248 2235/* Step one instruction in forward direction. */
52834460
MM
2236
2237static struct target_waitstatus
d825d248 2238record_btrace_single_step_forward (struct thread_info *tp)
52834460 2239{
b61ce85c 2240 struct btrace_insn_iterator *replay, end, start;
52834460 2241 struct btrace_thread_info *btinfo;
52834460 2242
d825d248
MM
2243 btinfo = &tp->btrace;
2244 replay = btinfo->replay;
2245
2246 /* We're done if we're not replaying. */
2247 if (replay == NULL)
2248 return btrace_step_no_history ();
2249
011c71b6
MM
2250 /* Check if we're stepping a breakpoint. */
2251 if (record_btrace_replay_at_breakpoint (tp))
2252 return btrace_step_stopped ();
2253
b61ce85c
MM
2254 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2255 jump back to the instruction at which we started. */
2256 start = *replay;
d825d248
MM
2257 do
2258 {
2259 unsigned int steps;
2260
e3cfc1c7
MM
2261 /* We will bail out here if we continue stepping after reaching the end
2262 of the execution history. */
d825d248
MM
2263 steps = btrace_insn_next (replay, 1);
2264 if (steps == 0)
b61ce85c
MM
2265 {
2266 *replay = start;
2267 return btrace_step_no_history ();
2268 }
d825d248
MM
2269 }
2270 while (btrace_insn_get (replay) == NULL);
2271
2272 /* Determine the end of the instruction trace. */
2273 btrace_insn_end (&end, btinfo);
2274
e3cfc1c7
MM
2275 /* The execution trace contains (and ends with) the current instruction.
2276 This instruction has not been executed, yet, so the trace really ends
2277 one instruction earlier. */
d825d248 2278 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2279 return btrace_step_no_history ();
d825d248
MM
2280
2281 return btrace_step_spurious ();
2282}
2283
2284/* Step one instruction in backward direction. */
2285
2286static struct target_waitstatus
2287record_btrace_single_step_backward (struct thread_info *tp)
2288{
b61ce85c 2289 struct btrace_insn_iterator *replay, start;
d825d248 2290 struct btrace_thread_info *btinfo;
e59fa00f 2291
52834460
MM
2292 btinfo = &tp->btrace;
2293 replay = btinfo->replay;
2294
d825d248
MM
2295 /* Start replaying if we're not already doing so. */
2296 if (replay == NULL)
2297 replay = record_btrace_start_replaying (tp);
2298
2299 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2300 Skip gaps during replay. If we end up at a gap (at the beginning of
2301 the trace), jump back to the instruction at which we started. */
2302 start = *replay;
d825d248
MM
2303 do
2304 {
2305 unsigned int steps;
2306
2307 steps = btrace_insn_prev (replay, 1);
2308 if (steps == 0)
b61ce85c
MM
2309 {
2310 *replay = start;
2311 return btrace_step_no_history ();
2312 }
d825d248
MM
2313 }
2314 while (btrace_insn_get (replay) == NULL);
2315
011c71b6
MM
2316 /* Check if we're stepping a breakpoint.
2317
2318 For reverse-stepping, this check is after the step. There is logic in
2319 infrun.c that handles reverse-stepping separately. See, for example,
2320 proceed and adjust_pc_after_break.
2321
2322 This code assumes that for reverse-stepping, PC points to the last
2323 de-executed instruction, whereas for forward-stepping PC points to the
2324 next to-be-executed instruction. */
2325 if (record_btrace_replay_at_breakpoint (tp))
2326 return btrace_step_stopped ();
2327
d825d248
MM
2328 return btrace_step_spurious ();
2329}
2330
2331/* Step a single thread. */
2332
2333static struct target_waitstatus
2334record_btrace_step_thread (struct thread_info *tp)
2335{
2336 struct btrace_thread_info *btinfo;
2337 struct target_waitstatus status;
2338 enum btrace_thread_flag flags;
2339
2340 btinfo = &tp->btrace;
2341
6e4879f0
MM
2342 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2343 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2344
43792cf0 2345 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2346 target_pid_to_str (tp->ptid), flags,
2347 btrace_thread_flag_to_str (flags));
52834460 2348
6e4879f0
MM
2349 /* We can't step without an execution history. */
2350 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2351 return btrace_step_no_history ();
2352
52834460
MM
2353 switch (flags)
2354 {
2355 default:
2356 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2357
6e4879f0
MM
2358 case BTHR_STOP:
2359 return btrace_step_stopped_on_request ();
2360
52834460 2361 case BTHR_STEP:
d825d248
MM
2362 status = record_btrace_single_step_forward (tp);
2363 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2364 break;
52834460
MM
2365
2366 return btrace_step_stopped ();
2367
2368 case BTHR_RSTEP:
d825d248
MM
2369 status = record_btrace_single_step_backward (tp);
2370 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2371 break;
52834460
MM
2372
2373 return btrace_step_stopped ();
2374
2375 case BTHR_CONT:
e3cfc1c7
MM
2376 status = record_btrace_single_step_forward (tp);
2377 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2378 break;
52834460 2379
e3cfc1c7
MM
2380 btinfo->flags |= flags;
2381 return btrace_step_again ();
52834460
MM
2382
2383 case BTHR_RCONT:
e3cfc1c7
MM
2384 status = record_btrace_single_step_backward (tp);
2385 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2386 break;
52834460 2387
e3cfc1c7
MM
2388 btinfo->flags |= flags;
2389 return btrace_step_again ();
2390 }
d825d248 2391
e3cfc1c7
MM
2392 /* We keep threads moving at the end of their execution history. The to_wait
2393 method will stop the thread for whom the event is reported. */
2394 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2395 btinfo->flags |= flags;
52834460 2396
e3cfc1c7 2397 return status;
b2f4cfde
MM
2398}
2399
e3cfc1c7
MM
2400/* A vector of threads. */
2401
2402typedef struct thread_info * tp_t;
2403DEF_VEC_P (tp_t);
2404
a6b5be76
MM
2405/* Announce further events if necessary. */
2406
2407static void
2408record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2409 const VEC (tp_t) *no_history)
2410{
2411 int more_moving, more_no_history;
2412
2413 more_moving = !VEC_empty (tp_t, moving);
2414 more_no_history = !VEC_empty (tp_t, no_history);
2415
2416 if (!more_moving && !more_no_history)
2417 return;
2418
2419 if (more_moving)
2420 DEBUG ("movers pending");
2421
2422 if (more_no_history)
2423 DEBUG ("no-history pending");
2424
2425 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2426}
2427
b2f4cfde
MM
2428/* The to_wait method of target record-btrace. */
2429
2430static ptid_t
2431record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2432 struct target_waitstatus *status, int options)
2433{
e3cfc1c7
MM
2434 VEC (tp_t) *moving, *no_history;
2435 struct thread_info *tp, *eventing;
2436 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2437
2438 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2439
b2f4cfde 2440 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2441 if ((execution_direction != EXEC_REVERSE)
2442 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2443 {
e75fdfca
TT
2444 ops = ops->beneath;
2445 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2446 }
2447
e3cfc1c7
MM
2448 moving = NULL;
2449 no_history = NULL;
2450
2451 make_cleanup (VEC_cleanup (tp_t), &moving);
2452 make_cleanup (VEC_cleanup (tp_t), &no_history);
2453
2454 /* Keep a work list of moving threads. */
2455 ALL_NON_EXITED_THREADS (tp)
2456 if (ptid_match (tp->ptid, ptid)
2457 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2458 VEC_safe_push (tp_t, moving, tp);
2459
2460 if (VEC_empty (tp_t, moving))
52834460 2461 {
e3cfc1c7 2462 *status = btrace_step_no_resumed ();
52834460 2463
e3cfc1c7
MM
2464 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2465 target_waitstatus_to_string (status));
2466
2467 do_cleanups (cleanups);
2468 return null_ptid;
52834460
MM
2469 }
2470
e3cfc1c7
MM
2471 /* Step moving threads one by one, one step each, until either one thread
2472 reports an event or we run out of threads to step.
2473
2474 When stepping more than one thread, chances are that some threads reach
2475 the end of their execution history earlier than others. If we reported
2476 this immediately, all-stop on top of non-stop would stop all threads and
2477 resume the same threads next time. And we would report the same thread
2478 having reached the end of its execution history again.
2479
2480 In the worst case, this would starve the other threads. But even if other
2481 threads would be allowed to make progress, this would result in far too
2482 many intermediate stops.
2483
2484 We therefore delay the reporting of "no execution history" until we have
2485 nothing else to report. By this time, all threads should have moved to
2486 either the beginning or the end of their execution history. There will
2487 be a single user-visible stop. */
2488 eventing = NULL;
2489 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2490 {
2491 unsigned int ix;
2492
2493 ix = 0;
2494 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2495 {
2496 *status = record_btrace_step_thread (tp);
2497
2498 switch (status->kind)
2499 {
2500 case TARGET_WAITKIND_IGNORE:
2501 ix++;
2502 break;
2503
2504 case TARGET_WAITKIND_NO_HISTORY:
2505 VEC_safe_push (tp_t, no_history,
2506 VEC_ordered_remove (tp_t, moving, ix));
2507 break;
2508
2509 default:
2510 eventing = VEC_unordered_remove (tp_t, moving, ix);
2511 break;
2512 }
2513 }
2514 }
2515
2516 if (eventing == NULL)
2517 {
2518 /* We started with at least one moving thread. This thread must have
2519 either stopped or reached the end of its execution history.
2520
2521 In the former case, EVENTING must not be NULL.
2522 In the latter case, NO_HISTORY must not be empty. */
2523 gdb_assert (!VEC_empty (tp_t, no_history));
2524
2525 /* We kept threads moving at the end of their execution history. Stop
2526 EVENTING now that we are going to report its stop. */
2527 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2528 eventing->btrace.flags &= ~BTHR_MOVE;
2529
2530 *status = btrace_step_no_history ();
2531 }
2532
2533 gdb_assert (eventing != NULL);
2534
2535 /* We kept threads replaying at the end of their execution history. Stop
2536 replaying EVENTING now that we are going to report its stop. */
2537 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2538
2539 /* Stop all other threads. */
5953356c 2540 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2541 ALL_NON_EXITED_THREADS (tp)
2542 record_btrace_cancel_resume (tp);
52834460 2543
a6b5be76
MM
2544 /* In async mode, we need to announce further events. */
2545 if (target_is_async_p ())
2546 record_btrace_maybe_mark_async_event (moving, no_history);
2547
52834460 2548 /* Start record histories anew from the current position. */
e3cfc1c7 2549 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2550
2551 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2552 registers_changed_ptid (eventing->ptid);
2553
43792cf0
PA
2554 DEBUG ("wait ended by thread %s (%s): %s",
2555 print_thread_id (eventing),
e3cfc1c7
MM
2556 target_pid_to_str (eventing->ptid),
2557 target_waitstatus_to_string (status));
52834460 2558
e3cfc1c7
MM
2559 do_cleanups (cleanups);
2560 return eventing->ptid;
52834460
MM
2561}
2562
6e4879f0
MM
2563/* The to_stop method of target record-btrace. */
2564
2565static void
2566record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2567{
2568 DEBUG ("stop %s", target_pid_to_str (ptid));
2569
2570 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2571 if ((execution_direction != EXEC_REVERSE)
2572 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2573 {
2574 ops = ops->beneath;
2575 ops->to_stop (ops, ptid);
2576 }
2577 else
2578 {
2579 struct thread_info *tp;
2580
2581 ALL_NON_EXITED_THREADS (tp)
2582 if (ptid_match (tp->ptid, ptid))
2583 {
2584 tp->btrace.flags &= ~BTHR_MOVE;
2585 tp->btrace.flags |= BTHR_STOP;
2586 }
2587 }
2588 }
2589
52834460
MM
2590/* The to_can_execute_reverse method of target record-btrace. */
2591
2592static int
19db3e69 2593record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2594{
2595 return 1;
2596}
2597
9e8915c6 2598/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2599
9e8915c6
PA
2600static int
2601record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2602{
a52eab48 2603 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2604 {
2605 struct thread_info *tp = inferior_thread ();
2606
2607 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2608 }
2609
2610 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2611}
2612
2613/* The to_supports_stopped_by_sw_breakpoint method of target
2614 record-btrace. */
2615
2616static int
2617record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2618{
a52eab48 2619 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2620 return 1;
2621
2622 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2623}
2624
2625/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2626
2627static int
2628record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2629{
a52eab48 2630 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2631 {
2632 struct thread_info *tp = inferior_thread ();
2633
2634 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2635 }
2636
2637 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2638}
2639
2640/* The to_supports_stopped_by_hw_breakpoint method of target
2641 record-btrace. */
2642
2643static int
2644record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2645{
a52eab48 2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2647 return 1;
52834460 2648
9e8915c6 2649 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2650}
2651
e8032dde 2652/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2653
2654static void
e8032dde 2655record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2656{
e8032dde 2657 /* We don't add or remove threads during replay. */
a52eab48 2658 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2659 return;
2660
2661 /* Forward the request. */
e75fdfca 2662 ops = ops->beneath;
e8032dde 2663 ops->to_update_thread_list (ops);
e2887aa3
MM
2664}
2665
2666/* The to_thread_alive method of target record-btrace. */
2667
2668static int
2669record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2670{
2671 /* We don't add or remove threads during replay. */
a52eab48 2672 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2673 return find_thread_ptid (ptid) != NULL;
2674
2675 /* Forward the request. */
e75fdfca
TT
2676 ops = ops->beneath;
2677 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2678}
2679
066ce621
MM
2680/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2681 is stopped. */
2682
2683static void
2684record_btrace_set_replay (struct thread_info *tp,
2685 const struct btrace_insn_iterator *it)
2686{
2687 struct btrace_thread_info *btinfo;
2688
2689 btinfo = &tp->btrace;
2690
2691 if (it == NULL || it->function == NULL)
52834460 2692 record_btrace_stop_replaying (tp);
066ce621
MM
2693 else
2694 {
2695 if (btinfo->replay == NULL)
52834460 2696 record_btrace_start_replaying (tp);
066ce621
MM
2697 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2698 return;
2699
2700 *btinfo->replay = *it;
52834460 2701 registers_changed_ptid (tp->ptid);
066ce621
MM
2702 }
2703
52834460
MM
2704 /* Start anew from the new replay position. */
2705 record_btrace_clear_histories (btinfo);
485668e5
MM
2706
2707 stop_pc = regcache_read_pc (get_current_regcache ());
2708 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2709}
2710
2711/* The to_goto_record_begin method of target record-btrace. */
2712
2713static void
08475817 2714record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2715{
2716 struct thread_info *tp;
2717 struct btrace_insn_iterator begin;
2718
2719 tp = require_btrace_thread ();
2720
2721 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2722
2723 /* Skip gaps at the beginning of the trace. */
2724 while (btrace_insn_get (&begin) == NULL)
2725 {
2726 unsigned int steps;
2727
2728 steps = btrace_insn_next (&begin, 1);
2729 if (steps == 0)
2730 error (_("No trace."));
2731 }
2732
066ce621 2733 record_btrace_set_replay (tp, &begin);
066ce621
MM
2734}
2735
2736/* The to_goto_record_end method of target record-btrace. */
2737
2738static void
307a1b91 2739record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2740{
2741 struct thread_info *tp;
2742
2743 tp = require_btrace_thread ();
2744
2745 record_btrace_set_replay (tp, NULL);
066ce621
MM
2746}
2747
2748/* The to_goto_record method of target record-btrace. */
2749
2750static void
606183ac 2751record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2752{
2753 struct thread_info *tp;
2754 struct btrace_insn_iterator it;
2755 unsigned int number;
2756 int found;
2757
2758 number = insn;
2759
2760 /* Check for wrap-arounds. */
2761 if (number != insn)
2762 error (_("Instruction number out of range."));
2763
2764 tp = require_btrace_thread ();
2765
2766 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2767
2768 /* Check if the instruction could not be found or is a gap. */
2769 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2770 error (_("No such instruction."));
2771
2772 record_btrace_set_replay (tp, &it);
066ce621
MM
2773}
2774
797094dd
MM
2775/* The to_record_stop_replaying method of target record-btrace. */
2776
2777static void
2778record_btrace_stop_replaying_all (struct target_ops *self)
2779{
2780 struct thread_info *tp;
2781
2782 ALL_NON_EXITED_THREADS (tp)
2783 record_btrace_stop_replaying (tp);
2784}
2785
70ad5bff
MM
2786/* The to_execution_direction target method. */
2787
2788static enum exec_direction_kind
2789record_btrace_execution_direction (struct target_ops *self)
2790{
2791 return record_btrace_resume_exec_dir;
2792}
2793
aef92902
MM
2794/* The to_prepare_to_generate_core target method. */
2795
2796static void
2797record_btrace_prepare_to_generate_core (struct target_ops *self)
2798{
2799 record_btrace_generating_corefile = 1;
2800}
2801
2802/* The to_done_generating_core target method. */
2803
2804static void
2805record_btrace_done_generating_core (struct target_ops *self)
2806{
2807 record_btrace_generating_corefile = 0;
2808}
2809
afedecd3
MM
2810/* Initialize the record-btrace target ops. */
2811
2812static void
2813init_record_btrace_ops (void)
2814{
2815 struct target_ops *ops;
2816
2817 ops = &record_btrace_ops;
2818 ops->to_shortname = "record-btrace";
2819 ops->to_longname = "Branch tracing target";
2820 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2821 ops->to_open = record_btrace_open;
2822 ops->to_close = record_btrace_close;
b7d2e916 2823 ops->to_async = record_btrace_async;
afedecd3 2824 ops->to_detach = record_detach;
c0272db5 2825 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2826 ops->to_mourn_inferior = record_mourn_inferior;
2827 ops->to_kill = record_kill;
afedecd3
MM
2828 ops->to_stop_recording = record_btrace_stop_recording;
2829 ops->to_info_record = record_btrace_info;
2830 ops->to_insn_history = record_btrace_insn_history;
2831 ops->to_insn_history_from = record_btrace_insn_history_from;
2832 ops->to_insn_history_range = record_btrace_insn_history_range;
2833 ops->to_call_history = record_btrace_call_history;
2834 ops->to_call_history_from = record_btrace_call_history_from;
2835 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2836 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2837 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2838 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2839 ops->to_xfer_partial = record_btrace_xfer_partial;
2840 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2841 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2842 ops->to_fetch_registers = record_btrace_fetch_registers;
2843 ops->to_store_registers = record_btrace_store_registers;
2844 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2845 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2846 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2847 ops->to_resume = record_btrace_resume;
85ad3aaf 2848 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2849 ops->to_wait = record_btrace_wait;
6e4879f0 2850 ops->to_stop = record_btrace_stop;
e8032dde 2851 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2852 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2853 ops->to_goto_record_begin = record_btrace_goto_begin;
2854 ops->to_goto_record_end = record_btrace_goto_end;
2855 ops->to_goto_record = record_btrace_goto;
52834460 2856 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2857 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2858 ops->to_supports_stopped_by_sw_breakpoint
2859 = record_btrace_supports_stopped_by_sw_breakpoint;
2860 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2861 ops->to_supports_stopped_by_hw_breakpoint
2862 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2863 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2864 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2865 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2866 ops->to_stratum = record_stratum;
2867 ops->to_magic = OPS_MAGIC;
2868}
2869
f4abbc16
MM
2870/* Start recording in BTS format. */
2871
2872static void
2873cmd_record_btrace_bts_start (char *args, int from_tty)
2874{
f4abbc16
MM
2875 if (args != NULL && *args != 0)
2876 error (_("Invalid argument."));
2877
2878 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2879
492d29ea
PA
2880 TRY
2881 {
2882 execute_command ("target record-btrace", from_tty);
2883 }
2884 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2885 {
2886 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2887 throw_exception (exception);
2888 }
492d29ea 2889 END_CATCH
f4abbc16
MM
2890}
2891
bc504a31 2892/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2893
2894static void
b20a6524 2895cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2896{
2897 if (args != NULL && *args != 0)
2898 error (_("Invalid argument."));
2899
b20a6524 2900 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2901
492d29ea
PA
2902 TRY
2903 {
2904 execute_command ("target record-btrace", from_tty);
2905 }
2906 CATCH (exception, RETURN_MASK_ALL)
2907 {
2908 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2909 throw_exception (exception);
2910 }
2911 END_CATCH
afedecd3
MM
2912}
2913
b20a6524
MM
2914/* Alias for "target record". */
2915
2916static void
2917cmd_record_btrace_start (char *args, int from_tty)
2918{
2919 if (args != NULL && *args != 0)
2920 error (_("Invalid argument."));
2921
2922 record_btrace_conf.format = BTRACE_FORMAT_PT;
2923
2924 TRY
2925 {
2926 execute_command ("target record-btrace", from_tty);
2927 }
2928 CATCH (exception, RETURN_MASK_ALL)
2929 {
2930 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2931
2932 TRY
2933 {
2934 execute_command ("target record-btrace", from_tty);
2935 }
2936 CATCH (exception, RETURN_MASK_ALL)
2937 {
2938 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2939 throw_exception (exception);
2940 }
2941 END_CATCH
2942 }
2943 END_CATCH
2944}
2945
67b5c0c1
MM
2946/* The "set record btrace" command. */
2947
2948static void
2949cmd_set_record_btrace (char *args, int from_tty)
2950{
2951 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2952}
2953
2954/* The "show record btrace" command. */
2955
2956static void
2957cmd_show_record_btrace (char *args, int from_tty)
2958{
2959 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2960}
2961
2962/* The "show record btrace replay-memory-access" command. */
2963
2964static void
2965cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2966 struct cmd_list_element *c, const char *value)
2967{
2968 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2969 replay_memory_access);
2970}
2971
d33501a5
MM
2972/* The "set record btrace bts" command. */
2973
2974static void
2975cmd_set_record_btrace_bts (char *args, int from_tty)
2976{
2977 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2978 "by an appropriate subcommand.\n"));
d33501a5
MM
2979 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2980 all_commands, gdb_stdout);
2981}
2982
2983/* The "show record btrace bts" command. */
2984
2985static void
2986cmd_show_record_btrace_bts (char *args, int from_tty)
2987{
2988 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2989}
2990
b20a6524
MM
2991/* The "set record btrace pt" command. */
2992
2993static void
2994cmd_set_record_btrace_pt (char *args, int from_tty)
2995{
2996 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2997 "by an appropriate subcommand.\n"));
2998 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2999 all_commands, gdb_stdout);
3000}
3001
3002/* The "show record btrace pt" command. */
3003
3004static void
3005cmd_show_record_btrace_pt (char *args, int from_tty)
3006{
3007 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3008}
3009
3010/* The "record bts buffer-size" show value function. */
3011
3012static void
3013show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3014 struct cmd_list_element *c,
3015 const char *value)
3016{
3017 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3018 value);
3019}
3020
3021/* The "record pt buffer-size" show value function. */
3022
3023static void
3024show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3025 struct cmd_list_element *c,
3026 const char *value)
3027{
3028 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3029 value);
3030}
3031
afedecd3
MM
3032void _initialize_record_btrace (void);
3033
3034/* Initialize btrace commands. */
3035
3036void
3037_initialize_record_btrace (void)
3038{
f4abbc16
MM
3039 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3040 _("Start branch trace recording."), &record_btrace_cmdlist,
3041 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3042 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3043
f4abbc16
MM
3044 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3045 _("\
3046Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3047The processor stores a from/to record for each branch into a cyclic buffer.\n\
3048This format may not be available on all processors."),
3049 &record_btrace_cmdlist);
3050 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3051
b20a6524
MM
3052 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3053 _("\
bc504a31 3054Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3055This format may not be available on all processors."),
3056 &record_btrace_cmdlist);
3057 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3058
67b5c0c1
MM
3059 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3060 _("Set record options"), &set_record_btrace_cmdlist,
3061 "set record btrace ", 0, &set_record_cmdlist);
3062
3063 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3064 _("Show record options"), &show_record_btrace_cmdlist,
3065 "show record btrace ", 0, &show_record_cmdlist);
3066
3067 add_setshow_enum_cmd ("replay-memory-access", no_class,
3068 replay_memory_access_types, &replay_memory_access, _("\
3069Set what memory accesses are allowed during replay."), _("\
3070Show what memory accesses are allowed during replay."),
3071 _("Default is READ-ONLY.\n\n\
3072The btrace record target does not trace data.\n\
3073The memory therefore corresponds to the live target and not \
3074to the current replay position.\n\n\
3075When READ-ONLY, allow accesses to read-only memory during replay.\n\
3076When READ-WRITE, allow accesses to read-only and read-write memory during \
3077replay."),
3078 NULL, cmd_show_replay_memory_access,
3079 &set_record_btrace_cmdlist,
3080 &show_record_btrace_cmdlist);
3081
d33501a5
MM
3082 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3083 _("Set record btrace bts options"),
3084 &set_record_btrace_bts_cmdlist,
3085 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3086
3087 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3088 _("Show record btrace bts options"),
3089 &show_record_btrace_bts_cmdlist,
3090 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3091
3092 add_setshow_uinteger_cmd ("buffer-size", no_class,
3093 &record_btrace_conf.bts.size,
3094 _("Set the record/replay bts buffer size."),
3095 _("Show the record/replay bts buffer size."), _("\
3096When starting recording request a trace buffer of this size. \
3097The actual buffer size may differ from the requested size. \
3098Use \"info record\" to see the actual buffer size.\n\n\
3099Bigger buffers allow longer recording but also take more time to process \
3100the recorded execution trace.\n\n\
b20a6524
MM
3101The trace buffer size may not be changed while recording."), NULL,
3102 show_record_bts_buffer_size_value,
d33501a5
MM
3103 &set_record_btrace_bts_cmdlist,
3104 &show_record_btrace_bts_cmdlist);
3105
b20a6524
MM
3106 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3107 _("Set record btrace pt options"),
3108 &set_record_btrace_pt_cmdlist,
3109 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3110
3111 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3112 _("Show record btrace pt options"),
3113 &show_record_btrace_pt_cmdlist,
3114 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3115
3116 add_setshow_uinteger_cmd ("buffer-size", no_class,
3117 &record_btrace_conf.pt.size,
3118 _("Set the record/replay pt buffer size."),
3119 _("Show the record/replay pt buffer size."), _("\
3120Bigger buffers allow longer recording but also take more time to process \
3121the recorded execution.\n\
3122The actual buffer size may differ from the requested size. Use \"info record\" \
3123to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3124 &set_record_btrace_pt_cmdlist,
3125 &show_record_btrace_pt_cmdlist);
3126
afedecd3
MM
3127 init_record_btrace_ops ();
3128 add_target (&record_btrace_ops);
0b722aec
MM
3129
3130 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3131 xcalloc, xfree);
d33501a5
MM
3132
3133 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3134 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3135}
This page took 0.664621 seconds and 4 git commands to generate.