Use new %p format suffixes in gdb
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
42a4f53d 3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
268a13a5 41#include "gdbsupport/vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
0d12e84c 44#include "gdbarch.h"
e43b10e1 45#include "cli/cli-style.h"
afedecd3 46
d9f719f1
PA
47static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51};
52
afedecd3 53/* The target_ops of record-btrace. */
f6ac5f3d
PA
54
55class record_btrace_target final : public target_ops
56{
57public:
d9f719f1
PA
58 const target_info &info () const override
59 { return record_btrace_target_info; }
f6ac5f3d 60
66b4deae
PA
61 strata stratum () const override { return record_stratum; }
62
f6ac5f3d
PA
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
57810aa7
PA
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
57810aa7 124 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
57810aa7 129 bool can_execute_reverse () override;
f6ac5f3d 130
57810aa7
PA
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 133
57810aa7
PA
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140};
141
142static record_btrace_target record_btrace_ops;
143
144/* Initialize the record-btrace target ops. */
afedecd3 145
76727919
TT
146/* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
3dcfdc58 148static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 149
67b5c0c1
MM
150/* Memory access types used in set/show record btrace replay-memory-access. */
151static const char replay_memory_access_read_only[] = "read-only";
152static const char replay_memory_access_read_write[] = "read-write";
153static const char *const replay_memory_access_types[] =
154{
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158};
159
160/* The currently allowed replay memory access type. */
161static const char *replay_memory_access = replay_memory_access_read_only;
162
4a4495d6
MM
163/* The cpu state kinds. */
164enum record_btrace_cpu_state_kind
165{
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169};
170
171/* The current cpu state. */
172static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174/* The current cpu for trace decode. */
175static struct btrace_cpu record_btrace_cpu;
176
67b5c0c1
MM
177/* Command lists for "set/show record btrace". */
178static struct cmd_list_element *set_record_btrace_cmdlist;
179static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 180
70ad5bff
MM
181/* The execution direction of the last resume we got. See record-full.c. */
182static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184/* The async event handler for reverse/replay execution. */
185static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
aef92902
MM
187/* A flag indicating that we are currently generating a core file. */
188static int record_btrace_generating_corefile;
189
f4abbc16
MM
190/* The current branch trace configuration. */
191static struct btrace_config record_btrace_conf;
192
193/* Command list for "record btrace". */
194static struct cmd_list_element *record_btrace_cmdlist;
195
d33501a5
MM
196/* Command lists for "set/show record btrace bts". */
197static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
b20a6524
MM
200/* Command lists for "set/show record btrace pt". */
201static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
4a4495d6
MM
204/* Command list for "set record btrace cpu". */
205static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
afedecd3
MM
207/* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210#define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
4a4495d6
MM
220/* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222const struct btrace_cpu *
223record_btrace_get_cpu (void)
224{
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238}
239
afedecd3 240/* Update the branch trace for the current thread and return a pointer to its
066ce621 241 thread_info.
afedecd3
MM
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
066ce621
MM
246static struct thread_info *
247require_btrace_thread (void)
afedecd3 248{
afedecd3
MM
249 DEBUG ("require");
250
00431a78 251 if (inferior_ptid == null_ptid)
afedecd3
MM
252 error (_("No thread."));
253
00431a78
PA
254 thread_info *tp = inferior_thread ();
255
cd4007e4
MM
256 validate_registers_access ();
257
4a4495d6 258 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 259
6e07b1d2 260 if (btrace_is_empty (tp))
afedecd3
MM
261 error (_("No trace."));
262
066ce621
MM
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
273require_btrace (void)
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
afedecd3
MM
280}
281
282/* Enable branch tracing for one thread. Warn on errors. */
283
284static void
285record_btrace_enable_warn (struct thread_info *tp)
286{
a70b8144 287 try
492d29ea
PA
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
230d2906 291 catch (const gdb_exception_error &error)
492d29ea 292 {
3d6e9d23 293 warning ("%s", error.what ());
492d29ea 294 }
afedecd3
MM
295}
296
afedecd3
MM
297/* Enable automatic tracing of new threads. */
298
299static void
300record_btrace_auto_enable (void)
301{
302 DEBUG ("attach thread observer");
303
76727919
TT
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
afedecd3
MM
306}
307
308/* Disable automatic tracing of new threads. */
309
310static void
311record_btrace_auto_disable (void)
312{
afedecd3
MM
313 DEBUG ("detach thread observer");
314
76727919 315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
316}
317
70ad5bff
MM
318/* The record-btrace async event handler function. */
319
320static void
321record_btrace_handle_async_inferior_event (gdb_client_data data)
322{
323 inferior_event_handler (INF_REG_EVENT, NULL);
324}
325
c0272db5
TW
326/* See record-btrace.h. */
327
328void
329record_btrace_push_target (void)
330{
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
76727919 343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
344}
345
228f1508
SM
346/* Disable btrace on a set of threads on scope exit. */
347
348struct scoped_btrace_disable
349{
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370private:
371 std::forward_list<thread_info *> m_threads;
372};
373
d9f719f1 374/* Open target record-btrace. */
afedecd3 375
d9f719f1
PA
376static void
377record_btrace_target_open (const char *args, int from_tty)
afedecd3 378{
228f1508
SM
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
afedecd3
MM
382
383 DEBUG ("open");
384
8213266a 385 record_preopen ();
afedecd3
MM
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
08036331 390 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 392 {
f4abbc16 393 btrace_enable (tp, &record_btrace_conf);
afedecd3 394
228f1508 395 btrace_disable.add_thread (tp);
afedecd3
MM
396 }
397
c0272db5 398 record_btrace_push_target ();
afedecd3 399
228f1508 400 btrace_disable.discard ();
afedecd3
MM
401}
402
f6ac5f3d 403/* The stop_recording method of target record-btrace. */
afedecd3 404
f6ac5f3d
PA
405void
406record_btrace_target::stop_recording ()
afedecd3 407{
afedecd3
MM
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
08036331 412 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415}
416
f6ac5f3d 417/* The disconnect method of target record-btrace. */
c0272db5 418
f6ac5f3d
PA
419void
420record_btrace_target::disconnect (const char *args,
421 int from_tty)
c0272db5 422{
b6a8c27b 423 struct target_ops *beneath = this->beneath ();
c0272db5
TW
424
425 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 426 unpush_target (this);
c0272db5
TW
427
428 /* Forward disconnect. */
f6ac5f3d 429 beneath->disconnect (args, from_tty);
c0272db5
TW
430}
431
f6ac5f3d 432/* The close method of target record-btrace. */
afedecd3 433
f6ac5f3d
PA
434void
435record_btrace_target::close ()
afedecd3 436{
70ad5bff
MM
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
99c819ee
MM
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
568e808b
MM
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
08036331 446 for (thread_info *tp : all_non_exited_threads ())
568e808b 447 btrace_teardown (tp);
afedecd3
MM
448}
449
f6ac5f3d 450/* The async method of target record-btrace. */
b7d2e916 451
f6ac5f3d
PA
452void
453record_btrace_target::async (int enable)
b7d2e916 454{
6a3753b3 455 if (enable)
b7d2e916
PA
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
b6a8c27b 460 this->beneath ()->async (enable);
b7d2e916
PA
461}
462
d33501a5
MM
463/* Adjusts the size and returns a human readable size suffix. */
464
465static const char *
466record_btrace_adjust_size (unsigned int *size)
467{
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489}
490
491/* Print a BTS configuration. */
492
493static void
494record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495{
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505}
506
bc504a31 507/* Print an Intel Processor Trace configuration. */
b20a6524
MM
508
509static void
510record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511{
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521}
522
d33501a5
MM
523/* Print a branch tracing configuration. */
524
525static void
526record_btrace_print_conf (const struct btrace_config *conf)
527{
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
b20a6524
MM
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
d33501a5
MM
543 }
544
545 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
546}
547
f6ac5f3d 548/* The info_record method of target record-btrace. */
afedecd3 549
f6ac5f3d
PA
550void
551record_btrace_target::info_record ()
afedecd3
MM
552{
553 struct btrace_thread_info *btinfo;
f4abbc16 554 const struct btrace_config *conf;
afedecd3 555 struct thread_info *tp;
31fd9caa 556 unsigned int insns, calls, gaps;
afedecd3
MM
557
558 DEBUG ("info");
559
560 tp = find_thread_ptid (inferior_ptid);
561 if (tp == NULL)
562 error (_("No thread."));
563
cd4007e4
MM
564 validate_registers_access ();
565
f4abbc16
MM
566 btinfo = &tp->btrace;
567
f6ac5f3d 568 conf = ::btrace_conf (btinfo);
f4abbc16 569 if (conf != NULL)
d33501a5 570 record_btrace_print_conf (conf);
f4abbc16 571
4a4495d6 572 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 573
23a7fe75
MM
574 insns = 0;
575 calls = 0;
31fd9caa 576 gaps = 0;
23a7fe75 577
6e07b1d2 578 if (!btrace_is_empty (tp))
23a7fe75
MM
579 {
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
582
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
5de9129b 585 calls = btrace_call_number (&call);
23a7fe75
MM
586
587 btrace_insn_end (&insn, btinfo);
5de9129b 588 insns = btrace_insn_number (&insn);
31fd9caa 589
69090cee
TW
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
593 insns -= 1;
31fd9caa
MM
594
595 gaps = btinfo->ngaps;
23a7fe75 596 }
afedecd3 597
31fd9caa 598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 599 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
600 print_thread_id (tp),
601 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
602
603 if (btrace_is_replaying (tp))
604 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
605 btrace_insn_number (btinfo->replay));
afedecd3
MM
606}
607
31fd9caa
MM
608/* Print a decode error. */
609
610static void
611btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
612 enum btrace_format format)
613{
508352a9 614 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 615
112e8700 616 uiout->text (_("["));
508352a9
TW
617 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
618 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 619 {
112e8700 620 uiout->text (_("decode error ("));
381befee 621 uiout->field_signed ("errcode", errcode);
112e8700 622 uiout->text (_("): "));
31fd9caa 623 }
112e8700
SM
624 uiout->text (errstr);
625 uiout->text (_("]\n"));
31fd9caa
MM
626}
627
f94cc897
MM
628/* A range of source lines. */
629
630struct btrace_line_range
631{
632 /* The symtab this line is from. */
633 struct symtab *symtab;
634
635 /* The first line (inclusive). */
636 int begin;
637
638 /* The last line (exclusive). */
639 int end;
640};
641
642/* Construct a line range. */
643
644static struct btrace_line_range
645btrace_mk_line_range (struct symtab *symtab, int begin, int end)
646{
647 struct btrace_line_range range;
648
649 range.symtab = symtab;
650 range.begin = begin;
651 range.end = end;
652
653 return range;
654}
655
656/* Add a line to a line range. */
657
658static struct btrace_line_range
659btrace_line_range_add (struct btrace_line_range range, int line)
660{
661 if (range.end <= range.begin)
662 {
663 /* This is the first entry. */
664 range.begin = line;
665 range.end = line + 1;
666 }
667 else if (line < range.begin)
668 range.begin = line;
669 else if (range.end < line)
670 range.end = line;
671
672 return range;
673}
674
675/* Return non-zero if RANGE is empty, zero otherwise. */
676
677static int
678btrace_line_range_is_empty (struct btrace_line_range range)
679{
680 return range.end <= range.begin;
681}
682
683/* Return non-zero if LHS contains RHS, zero otherwise. */
684
685static int
686btrace_line_range_contains_range (struct btrace_line_range lhs,
687 struct btrace_line_range rhs)
688{
689 return ((lhs.symtab == rhs.symtab)
690 && (lhs.begin <= rhs.begin)
691 && (rhs.end <= lhs.end));
692}
693
694/* Find the line range associated with PC. */
695
696static struct btrace_line_range
697btrace_find_line_range (CORE_ADDR pc)
698{
699 struct btrace_line_range range;
700 struct linetable_entry *lines;
701 struct linetable *ltable;
702 struct symtab *symtab;
703 int nlines, i;
704
705 symtab = find_pc_line_symtab (pc);
706 if (symtab == NULL)
707 return btrace_mk_line_range (NULL, 0, 0);
708
709 ltable = SYMTAB_LINETABLE (symtab);
710 if (ltable == NULL)
711 return btrace_mk_line_range (symtab, 0, 0);
712
713 nlines = ltable->nitems;
714 lines = ltable->item;
715 if (nlines <= 0)
716 return btrace_mk_line_range (symtab, 0, 0);
717
718 range = btrace_mk_line_range (symtab, 0, 0);
719 for (i = 0; i < nlines - 1; i++)
720 {
721 if ((lines[i].pc == pc) && (lines[i].line != 0))
722 range = btrace_line_range_add (range, lines[i].line);
723 }
724
725 return range;
726}
727
728/* Print source lines in LINES to UIOUT.
729
730 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
731 instructions corresponding to that source line. When printing a new source
732 line, we do the cleanups for the open chain and open a new cleanup chain for
733 the new source line. If the source line range in LINES is not empty, this
734 function will leave the cleanup chain for the last printed source line open
735 so instructions can be added to it. */
736
737static void
738btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
739 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
740 gdb::optional<ui_out_emit_list> *asm_list,
741 gdb_disassembly_flags flags)
f94cc897 742{
8d297bbf 743 print_source_lines_flags psl_flags;
f94cc897 744
f94cc897
MM
745 if (flags & DISASSEMBLY_FILENAME)
746 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
747
7ea78b59 748 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 749 {
7ea78b59 750 asm_list->reset ();
f94cc897 751
7ea78b59 752 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
753
754 print_source_lines (lines.symtab, line, line + 1, psl_flags);
755
7ea78b59 756 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
757 }
758}
759
afedecd3
MM
760/* Disassemble a section of the recorded instruction trace. */
761
762static void
23a7fe75 763btrace_insn_history (struct ui_out *uiout,
31fd9caa 764 const struct btrace_thread_info *btinfo,
23a7fe75 765 const struct btrace_insn_iterator *begin,
9a24775b
PA
766 const struct btrace_insn_iterator *end,
767 gdb_disassembly_flags flags)
afedecd3 768{
9a24775b
PA
769 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
770 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 771
f94cc897
MM
772 flags |= DISASSEMBLY_SPECULATIVE;
773
7ea78b59
SM
774 struct gdbarch *gdbarch = target_gdbarch ();
775 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 776
7ea78b59 777 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 778
7ea78b59
SM
779 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
780 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 781
046bebe1 782 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 783
7ea78b59
SM
784 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
785 btrace_insn_next (&it, 1))
afedecd3 786 {
23a7fe75
MM
787 const struct btrace_insn *insn;
788
789 insn = btrace_insn_get (&it);
790
31fd9caa
MM
791 /* A NULL instruction indicates a gap in the trace. */
792 if (insn == NULL)
793 {
794 const struct btrace_config *conf;
795
796 conf = btrace_conf (btinfo);
afedecd3 797
31fd9caa
MM
798 /* We have trace so we must have a configuration. */
799 gdb_assert (conf != NULL);
800
69090cee
TW
801 uiout->field_fmt ("insn-number", "%u",
802 btrace_insn_number (&it));
803 uiout->text ("\t");
804
805 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
806 conf->format);
807 }
808 else
809 {
f94cc897 810 struct disasm_insn dinsn;
da8c46d2 811
f94cc897 812 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 813 {
f94cc897
MM
814 struct btrace_line_range lines;
815
816 lines = btrace_find_line_range (insn->pc);
817 if (!btrace_line_range_is_empty (lines)
818 && !btrace_line_range_contains_range (last_lines, lines))
819 {
7ea78b59
SM
820 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
821 flags);
f94cc897
MM
822 last_lines = lines;
823 }
7ea78b59 824 else if (!src_and_asm_tuple.has_value ())
f94cc897 825 {
7ea78b59
SM
826 gdb_assert (!asm_list.has_value ());
827
828 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
829
f94cc897 830 /* No source information. */
7ea78b59 831 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
832 }
833
7ea78b59
SM
834 gdb_assert (src_and_asm_tuple.has_value ());
835 gdb_assert (asm_list.has_value ());
da8c46d2 836 }
da8c46d2 837
f94cc897
MM
838 memset (&dinsn, 0, sizeof (dinsn));
839 dinsn.number = btrace_insn_number (&it);
840 dinsn.addr = insn->pc;
31fd9caa 841
da8c46d2 842 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 843 dinsn.is_speculative = 1;
da8c46d2 844
046bebe1 845 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 846 }
afedecd3
MM
847 }
848}
849
f6ac5f3d 850/* The insn_history method of target record-btrace. */
afedecd3 851
f6ac5f3d
PA
852void
853record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
854{
855 struct btrace_thread_info *btinfo;
23a7fe75
MM
856 struct btrace_insn_history *history;
857 struct btrace_insn_iterator begin, end;
afedecd3 858 struct ui_out *uiout;
23a7fe75 859 unsigned int context, covered;
afedecd3
MM
860
861 uiout = current_uiout;
2e783024 862 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 863 context = abs (size);
afedecd3
MM
864 if (context == 0)
865 error (_("Bad record instruction-history-size."));
866
23a7fe75
MM
867 btinfo = require_btrace ();
868 history = btinfo->insn_history;
869 if (history == NULL)
afedecd3 870 {
07bbe694 871 struct btrace_insn_iterator *replay;
afedecd3 872
9a24775b 873 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 874
07bbe694
MM
875 /* If we're replaying, we start at the replay position. Otherwise, we
876 start at the tail of the trace. */
877 replay = btinfo->replay;
878 if (replay != NULL)
879 begin = *replay;
880 else
881 btrace_insn_end (&begin, btinfo);
882
883 /* We start from here and expand in the requested direction. Then we
884 expand in the other direction, as well, to fill up any remaining
885 context. */
886 end = begin;
887 if (size < 0)
888 {
889 /* We want the current position covered, as well. */
890 covered = btrace_insn_next (&end, 1);
891 covered += btrace_insn_prev (&begin, context - covered);
892 covered += btrace_insn_next (&end, context - covered);
893 }
894 else
895 {
896 covered = btrace_insn_next (&end, context);
897 covered += btrace_insn_prev (&begin, context - covered);
898 }
afedecd3
MM
899 }
900 else
901 {
23a7fe75
MM
902 begin = history->begin;
903 end = history->end;
afedecd3 904
9a24775b 905 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 906 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 907
23a7fe75
MM
908 if (size < 0)
909 {
910 end = begin;
911 covered = btrace_insn_prev (&begin, context);
912 }
913 else
914 {
915 begin = end;
916 covered = btrace_insn_next (&end, context);
917 }
afedecd3
MM
918 }
919
23a7fe75 920 if (covered > 0)
31fd9caa 921 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
922 else
923 {
924 if (size < 0)
925 printf_unfiltered (_("At the start of the branch trace record.\n"));
926 else
927 printf_unfiltered (_("At the end of the branch trace record.\n"));
928 }
afedecd3 929
23a7fe75 930 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
931}
932
f6ac5f3d 933/* The insn_history_range method of target record-btrace. */
afedecd3 934
f6ac5f3d
PA
935void
936record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
937 gdb_disassembly_flags flags)
afedecd3
MM
938{
939 struct btrace_thread_info *btinfo;
23a7fe75 940 struct btrace_insn_iterator begin, end;
afedecd3 941 struct ui_out *uiout;
23a7fe75
MM
942 unsigned int low, high;
943 int found;
afedecd3
MM
944
945 uiout = current_uiout;
2e783024 946 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
947 low = from;
948 high = to;
afedecd3 949
9a24775b 950 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
951
952 /* Check for wrap-arounds. */
23a7fe75 953 if (low != from || high != to)
afedecd3
MM
954 error (_("Bad range."));
955
0688d04e 956 if (high < low)
afedecd3
MM
957 error (_("Bad range."));
958
23a7fe75 959 btinfo = require_btrace ();
afedecd3 960
23a7fe75
MM
961 found = btrace_find_insn_by_number (&begin, btinfo, low);
962 if (found == 0)
963 error (_("Range out of bounds."));
afedecd3 964
23a7fe75
MM
965 found = btrace_find_insn_by_number (&end, btinfo, high);
966 if (found == 0)
0688d04e
MM
967 {
968 /* Silently truncate the range. */
969 btrace_insn_end (&end, btinfo);
970 }
971 else
972 {
973 /* We want both begin and end to be inclusive. */
974 btrace_insn_next (&end, 1);
975 }
afedecd3 976
31fd9caa 977 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 978 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
979}
980
f6ac5f3d 981/* The insn_history_from method of target record-btrace. */
afedecd3 982
f6ac5f3d
PA
983void
984record_btrace_target::insn_history_from (ULONGEST from, int size,
985 gdb_disassembly_flags flags)
afedecd3
MM
986{
987 ULONGEST begin, end, context;
988
989 context = abs (size);
0688d04e
MM
990 if (context == 0)
991 error (_("Bad record instruction-history-size."));
afedecd3
MM
992
993 if (size < 0)
994 {
995 end = from;
996
997 if (from < context)
998 begin = 0;
999 else
0688d04e 1000 begin = from - context + 1;
afedecd3
MM
1001 }
1002 else
1003 {
1004 begin = from;
0688d04e 1005 end = from + context - 1;
afedecd3
MM
1006
1007 /* Check for wrap-around. */
1008 if (end < begin)
1009 end = ULONGEST_MAX;
1010 }
1011
f6ac5f3d 1012 insn_history_range (begin, end, flags);
afedecd3
MM
1013}
1014
1015/* Print the instruction number range for a function call history line. */
1016
1017static void
23a7fe75
MM
1018btrace_call_history_insn_range (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
afedecd3 1020{
7acbe133
MM
1021 unsigned int begin, end, size;
1022
0860c437 1023 size = bfun->insn.size ();
7acbe133 1024 gdb_assert (size > 0);
afedecd3 1025
23a7fe75 1026 begin = bfun->insn_offset;
7acbe133 1027 end = begin + size - 1;
afedecd3 1028
1f77b012 1029 uiout->field_unsigned ("insn begin", begin);
112e8700 1030 uiout->text (",");
1f77b012 1031 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1032}
1033
ce0dfbea
MM
1034/* Compute the lowest and highest source line for the instructions in BFUN
1035 and return them in PBEGIN and PEND.
1036 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1037 result from inlining or macro expansion. */
1038
1039static void
1040btrace_compute_src_line_range (const struct btrace_function *bfun,
1041 int *pbegin, int *pend)
1042{
ce0dfbea
MM
1043 struct symtab *symtab;
1044 struct symbol *sym;
ce0dfbea
MM
1045 int begin, end;
1046
1047 begin = INT_MAX;
1048 end = INT_MIN;
1049
1050 sym = bfun->sym;
1051 if (sym == NULL)
1052 goto out;
1053
1054 symtab = symbol_symtab (sym);
1055
0860c437 1056 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1057 {
1058 struct symtab_and_line sal;
1059
0860c437 1060 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1061 if (sal.symtab != symtab || sal.line == 0)
1062 continue;
1063
325fac50
PA
1064 begin = std::min (begin, sal.line);
1065 end = std::max (end, sal.line);
ce0dfbea
MM
1066 }
1067
1068 out:
1069 *pbegin = begin;
1070 *pend = end;
1071}
1072
afedecd3
MM
1073/* Print the source line information for a function call history line. */
1074
1075static void
23a7fe75
MM
1076btrace_call_history_src_line (struct ui_out *uiout,
1077 const struct btrace_function *bfun)
afedecd3
MM
1078{
1079 struct symbol *sym;
23a7fe75 1080 int begin, end;
afedecd3
MM
1081
1082 sym = bfun->sym;
1083 if (sym == NULL)
1084 return;
1085
112e8700 1086 uiout->field_string ("file",
cbe56571 1087 symtab_to_filename_for_display (symbol_symtab (sym)),
e43b10e1 1088 file_name_style.style ());
afedecd3 1089
ce0dfbea 1090 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1091 if (end < begin)
afedecd3
MM
1092 return;
1093
112e8700 1094 uiout->text (":");
381befee 1095 uiout->field_signed ("min line", begin);
afedecd3 1096
23a7fe75 1097 if (end == begin)
afedecd3
MM
1098 return;
1099
112e8700 1100 uiout->text (",");
381befee 1101 uiout->field_signed ("max line", end);
afedecd3
MM
1102}
1103
0b722aec
MM
1104/* Get the name of a branch trace function. */
1105
1106static const char *
1107btrace_get_bfun_name (const struct btrace_function *bfun)
1108{
1109 struct minimal_symbol *msym;
1110 struct symbol *sym;
1111
1112 if (bfun == NULL)
1113 return "??";
1114
1115 msym = bfun->msym;
1116 sym = bfun->sym;
1117
1118 if (sym != NULL)
1119 return SYMBOL_PRINT_NAME (sym);
1120 else if (msym != NULL)
efd66ac6 1121 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1122 else
1123 return "??";
1124}
1125
afedecd3
MM
1126/* Disassemble a section of the recorded function trace. */
1127
1128static void
23a7fe75 1129btrace_call_history (struct ui_out *uiout,
8710b709 1130 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1131 const struct btrace_call_iterator *begin,
1132 const struct btrace_call_iterator *end,
8d297bbf 1133 int int_flags)
afedecd3 1134{
23a7fe75 1135 struct btrace_call_iterator it;
8d297bbf 1136 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1137
8d297bbf 1138 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1139 btrace_call_number (end));
afedecd3 1140
23a7fe75 1141 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1142 {
23a7fe75
MM
1143 const struct btrace_function *bfun;
1144 struct minimal_symbol *msym;
1145 struct symbol *sym;
1146
1147 bfun = btrace_call_get (&it);
23a7fe75 1148 sym = bfun->sym;
0b722aec 1149 msym = bfun->msym;
23a7fe75 1150
afedecd3 1151 /* Print the function index. */
1f77b012 1152 uiout->field_unsigned ("index", bfun->number);
112e8700 1153 uiout->text ("\t");
afedecd3 1154
31fd9caa
MM
1155 /* Indicate gaps in the trace. */
1156 if (bfun->errcode != 0)
1157 {
1158 const struct btrace_config *conf;
1159
1160 conf = btrace_conf (btinfo);
1161
1162 /* We have trace so we must have a configuration. */
1163 gdb_assert (conf != NULL);
1164
1165 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1166
1167 continue;
1168 }
1169
8710b709
MM
1170 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1171 {
1172 int level = bfun->level + btinfo->level, i;
1173
1174 for (i = 0; i < level; ++i)
112e8700 1175 uiout->text (" ");
8710b709
MM
1176 }
1177
1178 if (sym != NULL)
cbe56571 1179 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
e43b10e1 1180 function_name_style.style ());
8710b709 1181 else if (msym != NULL)
cbe56571 1182 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
e43b10e1 1183 function_name_style.style ());
112e8700 1184 else if (!uiout->is_mi_like_p ())
cbe56571 1185 uiout->field_string ("function", "??",
e43b10e1 1186 function_name_style.style ());
8710b709 1187
1e038f67 1188 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1189 {
112e8700 1190 uiout->text (_("\tinst "));
23a7fe75 1191 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1192 }
1193
1e038f67 1194 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1195 {
112e8700 1196 uiout->text (_("\tat "));
23a7fe75 1197 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1198 }
1199
112e8700 1200 uiout->text ("\n");
afedecd3
MM
1201 }
1202}
1203
f6ac5f3d 1204/* The call_history method of target record-btrace. */
afedecd3 1205
f6ac5f3d
PA
1206void
1207record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1208{
1209 struct btrace_thread_info *btinfo;
23a7fe75
MM
1210 struct btrace_call_history *history;
1211 struct btrace_call_iterator begin, end;
afedecd3 1212 struct ui_out *uiout;
23a7fe75 1213 unsigned int context, covered;
afedecd3
MM
1214
1215 uiout = current_uiout;
2e783024 1216 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1217 context = abs (size);
afedecd3
MM
1218 if (context == 0)
1219 error (_("Bad record function-call-history-size."));
1220
23a7fe75
MM
1221 btinfo = require_btrace ();
1222 history = btinfo->call_history;
1223 if (history == NULL)
afedecd3 1224 {
07bbe694 1225 struct btrace_insn_iterator *replay;
afedecd3 1226
0cb7c7b0 1227 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1228
07bbe694
MM
1229 /* If we're replaying, we start at the replay position. Otherwise, we
1230 start at the tail of the trace. */
1231 replay = btinfo->replay;
1232 if (replay != NULL)
1233 {
07bbe694 1234 begin.btinfo = btinfo;
a0f1b963 1235 begin.index = replay->call_index;
07bbe694
MM
1236 }
1237 else
1238 btrace_call_end (&begin, btinfo);
1239
1240 /* We start from here and expand in the requested direction. Then we
1241 expand in the other direction, as well, to fill up any remaining
1242 context. */
1243 end = begin;
1244 if (size < 0)
1245 {
1246 /* We want the current position covered, as well. */
1247 covered = btrace_call_next (&end, 1);
1248 covered += btrace_call_prev (&begin, context - covered);
1249 covered += btrace_call_next (&end, context - covered);
1250 }
1251 else
1252 {
1253 covered = btrace_call_next (&end, context);
1254 covered += btrace_call_prev (&begin, context- covered);
1255 }
afedecd3
MM
1256 }
1257 else
1258 {
23a7fe75
MM
1259 begin = history->begin;
1260 end = history->end;
afedecd3 1261
0cb7c7b0 1262 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1263 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1264
23a7fe75
MM
1265 if (size < 0)
1266 {
1267 end = begin;
1268 covered = btrace_call_prev (&begin, context);
1269 }
1270 else
1271 {
1272 begin = end;
1273 covered = btrace_call_next (&end, context);
1274 }
afedecd3
MM
1275 }
1276
23a7fe75 1277 if (covered > 0)
8710b709 1278 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1279 else
1280 {
1281 if (size < 0)
1282 printf_unfiltered (_("At the start of the branch trace record.\n"));
1283 else
1284 printf_unfiltered (_("At the end of the branch trace record.\n"));
1285 }
afedecd3 1286
23a7fe75 1287 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1288}
1289
f6ac5f3d 1290/* The call_history_range method of target record-btrace. */
afedecd3 1291
f6ac5f3d
PA
1292void
1293record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1294 record_print_flags flags)
afedecd3
MM
1295{
1296 struct btrace_thread_info *btinfo;
23a7fe75 1297 struct btrace_call_iterator begin, end;
afedecd3 1298 struct ui_out *uiout;
23a7fe75
MM
1299 unsigned int low, high;
1300 int found;
afedecd3
MM
1301
1302 uiout = current_uiout;
2e783024 1303 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1304 low = from;
1305 high = to;
afedecd3 1306
0cb7c7b0 1307 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1308
1309 /* Check for wrap-arounds. */
23a7fe75 1310 if (low != from || high != to)
afedecd3
MM
1311 error (_("Bad range."));
1312
0688d04e 1313 if (high < low)
afedecd3
MM
1314 error (_("Bad range."));
1315
23a7fe75 1316 btinfo = require_btrace ();
afedecd3 1317
23a7fe75
MM
1318 found = btrace_find_call_by_number (&begin, btinfo, low);
1319 if (found == 0)
1320 error (_("Range out of bounds."));
afedecd3 1321
23a7fe75
MM
1322 found = btrace_find_call_by_number (&end, btinfo, high);
1323 if (found == 0)
0688d04e
MM
1324 {
1325 /* Silently truncate the range. */
1326 btrace_call_end (&end, btinfo);
1327 }
1328 else
1329 {
1330 /* We want both begin and end to be inclusive. */
1331 btrace_call_next (&end, 1);
1332 }
afedecd3 1333
8710b709 1334 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1335 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1336}
1337
f6ac5f3d 1338/* The call_history_from method of target record-btrace. */
afedecd3 1339
f6ac5f3d
PA
1340void
1341record_btrace_target::call_history_from (ULONGEST from, int size,
1342 record_print_flags flags)
afedecd3
MM
1343{
1344 ULONGEST begin, end, context;
1345
1346 context = abs (size);
0688d04e
MM
1347 if (context == 0)
1348 error (_("Bad record function-call-history-size."));
afedecd3
MM
1349
1350 if (size < 0)
1351 {
1352 end = from;
1353
1354 if (from < context)
1355 begin = 0;
1356 else
0688d04e 1357 begin = from - context + 1;
afedecd3
MM
1358 }
1359 else
1360 {
1361 begin = from;
0688d04e 1362 end = from + context - 1;
afedecd3
MM
1363
1364 /* Check for wrap-around. */
1365 if (end < begin)
1366 end = ULONGEST_MAX;
1367 }
1368
f6ac5f3d 1369 call_history_range ( begin, end, flags);
afedecd3
MM
1370}
1371
f6ac5f3d 1372/* The record_method method of target record-btrace. */
b158a20f 1373
f6ac5f3d
PA
1374enum record_method
1375record_btrace_target::record_method (ptid_t ptid)
b158a20f 1376{
b158a20f
TW
1377 struct thread_info * const tp = find_thread_ptid (ptid);
1378
1379 if (tp == NULL)
1380 error (_("No thread."));
1381
1382 if (tp->btrace.target == NULL)
1383 return RECORD_METHOD_NONE;
1384
1385 return RECORD_METHOD_BTRACE;
1386}
1387
f6ac5f3d 1388/* The record_is_replaying method of target record-btrace. */
07bbe694 1389
57810aa7 1390bool
f6ac5f3d 1391record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1392{
08036331
PA
1393 for (thread_info *tp : all_non_exited_threads (ptid))
1394 if (btrace_is_replaying (tp))
57810aa7 1395 return true;
07bbe694 1396
57810aa7 1397 return false;
07bbe694
MM
1398}
1399
f6ac5f3d 1400/* The record_will_replay method of target record-btrace. */
7ff27e9b 1401
57810aa7 1402bool
f6ac5f3d 1403record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1404{
f6ac5f3d 1405 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1406}
1407
f6ac5f3d 1408/* The xfer_partial method of target record-btrace. */
633785ff 1409
f6ac5f3d
PA
1410enum target_xfer_status
1411record_btrace_target::xfer_partial (enum target_object object,
1412 const char *annex, gdb_byte *readbuf,
1413 const gdb_byte *writebuf, ULONGEST offset,
1414 ULONGEST len, ULONGEST *xfered_len)
633785ff 1415{
633785ff 1416 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1417 if (replay_memory_access == replay_memory_access_read_only
aef92902 1418 && !record_btrace_generating_corefile
f6ac5f3d 1419 && record_is_replaying (inferior_ptid))
633785ff
MM
1420 {
1421 switch (object)
1422 {
1423 case TARGET_OBJECT_MEMORY:
1424 {
1425 struct target_section *section;
1426
1427 /* We do not allow writing memory in general. */
1428 if (writebuf != NULL)
9b409511
YQ
1429 {
1430 *xfered_len = len;
bc113b4e 1431 return TARGET_XFER_UNAVAILABLE;
9b409511 1432 }
633785ff
MM
1433
1434 /* We allow reading readonly memory. */
f6ac5f3d 1435 section = target_section_by_addr (this, offset);
633785ff
MM
1436 if (section != NULL)
1437 {
1438 /* Check if the section we found is readonly. */
fd361982 1439 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1440 & SEC_READONLY) != 0)
1441 {
1442 /* Truncate the request to fit into this section. */
325fac50 1443 len = std::min (len, section->endaddr - offset);
633785ff
MM
1444 break;
1445 }
1446 }
1447
9b409511 1448 *xfered_len = len;
bc113b4e 1449 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1450 }
1451 }
1452 }
1453
1454 /* Forward the request. */
b6a8c27b
PA
1455 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1456 offset, len, xfered_len);
633785ff
MM
1457}
1458
f6ac5f3d 1459/* The insert_breakpoint method of target record-btrace. */
633785ff 1460
f6ac5f3d
PA
1461int
1462record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1463 struct bp_target_info *bp_tgt)
633785ff 1464{
67b5c0c1
MM
1465 const char *old;
1466 int ret;
633785ff
MM
1467
1468 /* Inserting breakpoints requires accessing memory. Allow it for the
1469 duration of this function. */
67b5c0c1
MM
1470 old = replay_memory_access;
1471 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1472
1473 ret = 0;
a70b8144 1474 try
492d29ea 1475 {
b6a8c27b 1476 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1477 }
230d2906 1478 catch (const gdb_exception &except)
492d29ea 1479 {
6c63c96a 1480 replay_memory_access = old;
eedc3f4f 1481 throw;
492d29ea 1482 }
6c63c96a 1483 replay_memory_access = old;
633785ff
MM
1484
1485 return ret;
1486}
1487
f6ac5f3d 1488/* The remove_breakpoint method of target record-btrace. */
633785ff 1489
f6ac5f3d
PA
1490int
1491record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1492 struct bp_target_info *bp_tgt,
1493 enum remove_bp_reason reason)
633785ff 1494{
67b5c0c1
MM
1495 const char *old;
1496 int ret;
633785ff
MM
1497
1498 /* Removing breakpoints requires accessing memory. Allow it for the
1499 duration of this function. */
67b5c0c1
MM
1500 old = replay_memory_access;
1501 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1502
1503 ret = 0;
a70b8144 1504 try
492d29ea 1505 {
b6a8c27b 1506 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1507 }
230d2906 1508 catch (const gdb_exception &except)
492d29ea 1509 {
6c63c96a 1510 replay_memory_access = old;
eedc3f4f 1511 throw;
492d29ea 1512 }
6c63c96a 1513 replay_memory_access = old;
633785ff
MM
1514
1515 return ret;
1516}
1517
f6ac5f3d 1518/* The fetch_registers method of target record-btrace. */
1f3ef581 1519
f6ac5f3d
PA
1520void
1521record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1522{
1523 struct btrace_insn_iterator *replay;
1524 struct thread_info *tp;
1525
222312d3 1526 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1527 gdb_assert (tp != NULL);
1528
1529 replay = tp->btrace.replay;
aef92902 1530 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1531 {
1532 const struct btrace_insn *insn;
1533 struct gdbarch *gdbarch;
1534 int pcreg;
1535
ac7936df 1536 gdbarch = regcache->arch ();
1f3ef581
MM
1537 pcreg = gdbarch_pc_regnum (gdbarch);
1538 if (pcreg < 0)
1539 return;
1540
1541 /* We can only provide the PC register. */
1542 if (regno >= 0 && regno != pcreg)
1543 return;
1544
1545 insn = btrace_insn_get (replay);
1546 gdb_assert (insn != NULL);
1547
73e1c03f 1548 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1549 }
1550 else
b6a8c27b 1551 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1552}
1553
f6ac5f3d 1554/* The store_registers method of target record-btrace. */
1f3ef581 1555
f6ac5f3d
PA
1556void
1557record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1558{
a52eab48 1559 if (!record_btrace_generating_corefile
222312d3 1560 && record_is_replaying (regcache->ptid ()))
4d10e986 1561 error (_("Cannot write registers while replaying."));
1f3ef581 1562
491144b5 1563 gdb_assert (may_write_registers);
1f3ef581 1564
b6a8c27b 1565 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1566}
1567
f6ac5f3d 1568/* The prepare_to_store method of target record-btrace. */
1f3ef581 1569
f6ac5f3d
PA
1570void
1571record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1572{
a52eab48 1573 if (!record_btrace_generating_corefile
222312d3 1574 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1575 return;
1576
b6a8c27b 1577 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1578}
1579
0b722aec
MM
1580/* The branch trace frame cache. */
1581
1582struct btrace_frame_cache
1583{
1584 /* The thread. */
1585 struct thread_info *tp;
1586
1587 /* The frame info. */
1588 struct frame_info *frame;
1589
1590 /* The branch trace function segment. */
1591 const struct btrace_function *bfun;
1592};
1593
1594/* A struct btrace_frame_cache hash table indexed by NEXT. */
1595
1596static htab_t bfcache;
1597
1598/* hash_f for htab_create_alloc of bfcache. */
1599
1600static hashval_t
1601bfcache_hash (const void *arg)
1602{
19ba03f4
SM
1603 const struct btrace_frame_cache *cache
1604 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1605
1606 return htab_hash_pointer (cache->frame);
1607}
1608
1609/* eq_f for htab_create_alloc of bfcache. */
1610
1611static int
1612bfcache_eq (const void *arg1, const void *arg2)
1613{
19ba03f4
SM
1614 const struct btrace_frame_cache *cache1
1615 = (const struct btrace_frame_cache *) arg1;
1616 const struct btrace_frame_cache *cache2
1617 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1618
1619 return cache1->frame == cache2->frame;
1620}
1621
1622/* Create a new btrace frame cache. */
1623
1624static struct btrace_frame_cache *
1625bfcache_new (struct frame_info *frame)
1626{
1627 struct btrace_frame_cache *cache;
1628 void **slot;
1629
1630 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1631 cache->frame = frame;
1632
1633 slot = htab_find_slot (bfcache, cache, INSERT);
1634 gdb_assert (*slot == NULL);
1635 *slot = cache;
1636
1637 return cache;
1638}
1639
1640/* Extract the branch trace function from a branch trace frame. */
1641
1642static const struct btrace_function *
1643btrace_get_frame_function (struct frame_info *frame)
1644{
1645 const struct btrace_frame_cache *cache;
0b722aec
MM
1646 struct btrace_frame_cache pattern;
1647 void **slot;
1648
1649 pattern.frame = frame;
1650
1651 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1652 if (slot == NULL)
1653 return NULL;
1654
19ba03f4 1655 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1656 return cache->bfun;
1657}
1658
cecac1ab
MM
1659/* Implement stop_reason method for record_btrace_frame_unwind. */
1660
1661static enum unwind_stop_reason
1662record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1663 void **this_cache)
1664{
0b722aec
MM
1665 const struct btrace_frame_cache *cache;
1666 const struct btrace_function *bfun;
1667
19ba03f4 1668 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1669 bfun = cache->bfun;
1670 gdb_assert (bfun != NULL);
1671
42bfe59e 1672 if (bfun->up == 0)
0b722aec
MM
1673 return UNWIND_UNAVAILABLE;
1674
1675 return UNWIND_NO_REASON;
cecac1ab
MM
1676}
1677
1678/* Implement this_id method for record_btrace_frame_unwind. */
1679
1680static void
1681record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1682 struct frame_id *this_id)
1683{
0b722aec
MM
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
4aeb0dfc 1686 struct btrace_call_iterator it;
0b722aec
MM
1687 CORE_ADDR code, special;
1688
19ba03f4 1689 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1690
1691 bfun = cache->bfun;
1692 gdb_assert (bfun != NULL);
1693
4aeb0dfc
TW
1694 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1695 bfun = btrace_call_get (&it);
0b722aec
MM
1696
1697 code = get_frame_func (this_frame);
1698 special = bfun->number;
1699
1700 *this_id = frame_id_build_unavailable_stack_special (code, special);
1701
1702 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1703 btrace_get_bfun_name (cache->bfun),
1704 core_addr_to_string_nz (this_id->code_addr),
1705 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1706}
1707
1708/* Implement prev_register method for record_btrace_frame_unwind. */
1709
1710static struct value *
1711record_btrace_frame_prev_register (struct frame_info *this_frame,
1712 void **this_cache,
1713 int regnum)
1714{
0b722aec
MM
1715 const struct btrace_frame_cache *cache;
1716 const struct btrace_function *bfun, *caller;
42bfe59e 1717 struct btrace_call_iterator it;
0b722aec
MM
1718 struct gdbarch *gdbarch;
1719 CORE_ADDR pc;
1720 int pcreg;
1721
1722 gdbarch = get_frame_arch (this_frame);
1723 pcreg = gdbarch_pc_regnum (gdbarch);
1724 if (pcreg < 0 || regnum != pcreg)
1725 throw_error (NOT_AVAILABLE_ERROR,
1726 _("Registers are not available in btrace record history"));
1727
19ba03f4 1728 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1729 bfun = cache->bfun;
1730 gdb_assert (bfun != NULL);
1731
42bfe59e 1732 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("No caller in btrace record history"));
1735
42bfe59e
TW
1736 caller = btrace_call_get (&it);
1737
0b722aec 1738 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1739 pc = caller->insn.front ().pc;
0b722aec
MM
1740 else
1741 {
0860c437 1742 pc = caller->insn.back ().pc;
0b722aec
MM
1743 pc += gdb_insn_length (gdbarch, pc);
1744 }
1745
1746 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1747 btrace_get_bfun_name (bfun), bfun->level,
1748 core_addr_to_string_nz (pc));
1749
1750 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1751}
1752
1753/* Implement sniffer method for record_btrace_frame_unwind. */
1754
1755static int
1756record_btrace_frame_sniffer (const struct frame_unwind *self,
1757 struct frame_info *this_frame,
1758 void **this_cache)
1759{
0b722aec
MM
1760 const struct btrace_function *bfun;
1761 struct btrace_frame_cache *cache;
cecac1ab 1762 struct thread_info *tp;
0b722aec 1763 struct frame_info *next;
cecac1ab
MM
1764
1765 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1766 tp = inferior_thread ();
cecac1ab 1767
0b722aec
MM
1768 bfun = NULL;
1769 next = get_next_frame (this_frame);
1770 if (next == NULL)
1771 {
1772 const struct btrace_insn_iterator *replay;
1773
1774 replay = tp->btrace.replay;
1775 if (replay != NULL)
08c3f6d2 1776 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1777 }
1778 else
1779 {
1780 const struct btrace_function *callee;
42bfe59e 1781 struct btrace_call_iterator it;
0b722aec
MM
1782
1783 callee = btrace_get_frame_function (next);
42bfe59e
TW
1784 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1785 return 0;
1786
1787 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1788 return 0;
1789
1790 bfun = btrace_call_get (&it);
0b722aec
MM
1791 }
1792
1793 if (bfun == NULL)
1794 return 0;
1795
1796 DEBUG ("[frame] sniffed frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1798
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = tp;
1802 cache->bfun = bfun;
1803
1804 *this_cache = cache;
1805 return 1;
1806}
1807
1808/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1809
1810static int
1811record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1812 struct frame_info *this_frame,
1813 void **this_cache)
1814{
1815 const struct btrace_function *bfun, *callee;
1816 struct btrace_frame_cache *cache;
42bfe59e 1817 struct btrace_call_iterator it;
0b722aec 1818 struct frame_info *next;
42bfe59e 1819 struct thread_info *tinfo;
0b722aec
MM
1820
1821 next = get_next_frame (this_frame);
1822 if (next == NULL)
1823 return 0;
1824
1825 callee = btrace_get_frame_function (next);
1826 if (callee == NULL)
1827 return 0;
1828
1829 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1830 return 0;
1831
00431a78 1832 tinfo = inferior_thread ();
42bfe59e 1833 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1834 return 0;
1835
42bfe59e
TW
1836 bfun = btrace_call_get (&it);
1837
0b722aec
MM
1838 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1839 btrace_get_bfun_name (bfun), bfun->level);
1840
1841 /* This is our frame. Initialize the frame cache. */
1842 cache = bfcache_new (this_frame);
42bfe59e 1843 cache->tp = tinfo;
0b722aec
MM
1844 cache->bfun = bfun;
1845
1846 *this_cache = cache;
1847 return 1;
1848}
1849
1850static void
1851record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1852{
1853 struct btrace_frame_cache *cache;
1854 void **slot;
1855
19ba03f4 1856 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1857
1858 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1859 gdb_assert (slot != NULL);
1860
1861 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1862}
1863
1864/* btrace recording does not store previous memory content, neither the stack
1865 frames content. Any unwinding would return errorneous results as the stack
1866 contents no longer matches the changed PC value restored from history.
1867 Therefore this unwinder reports any possibly unwound registers as
1868 <unavailable>. */
1869
0b722aec 1870const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1871{
1872 NORMAL_FRAME,
1873 record_btrace_frame_unwind_stop_reason,
1874 record_btrace_frame_this_id,
1875 record_btrace_frame_prev_register,
1876 NULL,
0b722aec
MM
1877 record_btrace_frame_sniffer,
1878 record_btrace_frame_dealloc_cache
1879};
1880
1881const struct frame_unwind record_btrace_tailcall_frame_unwind =
1882{
1883 TAILCALL_FRAME,
1884 record_btrace_frame_unwind_stop_reason,
1885 record_btrace_frame_this_id,
1886 record_btrace_frame_prev_register,
1887 NULL,
1888 record_btrace_tailcall_frame_sniffer,
1889 record_btrace_frame_dealloc_cache
cecac1ab 1890};
b2f4cfde 1891
f6ac5f3d 1892/* Implement the get_unwinder method. */
ac01945b 1893
f6ac5f3d
PA
1894const struct frame_unwind *
1895record_btrace_target::get_unwinder ()
ac01945b
TT
1896{
1897 return &record_btrace_frame_unwind;
1898}
1899
f6ac5f3d 1900/* Implement the get_tailcall_unwinder method. */
ac01945b 1901
f6ac5f3d
PA
1902const struct frame_unwind *
1903record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1904{
1905 return &record_btrace_tailcall_frame_unwind;
1906}
1907
987e68b1
MM
1908/* Return a human-readable string for FLAG. */
1909
1910static const char *
1911btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1912{
1913 switch (flag)
1914 {
1915 case BTHR_STEP:
1916 return "step";
1917
1918 case BTHR_RSTEP:
1919 return "reverse-step";
1920
1921 case BTHR_CONT:
1922 return "cont";
1923
1924 case BTHR_RCONT:
1925 return "reverse-cont";
1926
1927 case BTHR_STOP:
1928 return "stop";
1929 }
1930
1931 return "<invalid>";
1932}
1933
52834460
MM
1934/* Indicate that TP should be resumed according to FLAG. */
1935
1936static void
1937record_btrace_resume_thread (struct thread_info *tp,
1938 enum btrace_thread_flag flag)
1939{
1940 struct btrace_thread_info *btinfo;
1941
43792cf0 1942 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1943 target_pid_to_str (tp->ptid).c_str (), flag,
1944 btrace_thread_flag_to_str (flag));
52834460
MM
1945
1946 btinfo = &tp->btrace;
1947
52834460 1948 /* Fetch the latest branch trace. */
4a4495d6 1949 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1950
0ca912df
MM
1951 /* A resume request overwrites a preceding resume or stop request. */
1952 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1953 btinfo->flags |= flag;
1954}
1955
ec71cc2f
MM
1956/* Get the current frame for TP. */
1957
79b8d3b0
TT
1958static struct frame_id
1959get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1960{
79b8d3b0 1961 struct frame_id id;
ec71cc2f
MM
1962 int executing;
1963
00431a78
PA
1964 /* Set current thread, which is implicitly used by
1965 get_current_frame. */
1966 scoped_restore_current_thread restore_thread;
1967
1968 switch_to_thread (tp);
ec71cc2f
MM
1969
1970 /* Clear the executing flag to allow changes to the current frame.
1971 We are not actually running, yet. We just started a reverse execution
1972 command or a record goto command.
1973 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1974 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1975 move the thread. Since we need to recompute the stack, we temporarily
1976 set EXECUTING to flase. */
00431a78
PA
1977 executing = tp->executing;
1978 set_executing (inferior_ptid, false);
ec71cc2f 1979
79b8d3b0 1980 id = null_frame_id;
a70b8144 1981 try
ec71cc2f 1982 {
79b8d3b0 1983 id = get_frame_id (get_current_frame ());
ec71cc2f 1984 }
230d2906 1985 catch (const gdb_exception &except)
ec71cc2f
MM
1986 {
1987 /* Restore the previous execution state. */
1988 set_executing (inferior_ptid, executing);
1989
eedc3f4f 1990 throw;
ec71cc2f 1991 }
ec71cc2f
MM
1992
1993 /* Restore the previous execution state. */
1994 set_executing (inferior_ptid, executing);
1995
79b8d3b0 1996 return id;
ec71cc2f
MM
1997}
1998
52834460
MM
1999/* Start replaying a thread. */
2000
2001static struct btrace_insn_iterator *
2002record_btrace_start_replaying (struct thread_info *tp)
2003{
52834460
MM
2004 struct btrace_insn_iterator *replay;
2005 struct btrace_thread_info *btinfo;
52834460
MM
2006
2007 btinfo = &tp->btrace;
2008 replay = NULL;
2009
2010 /* We can't start replaying without trace. */
b54b03bd 2011 if (btinfo->functions.empty ())
52834460
MM
2012 return NULL;
2013
52834460
MM
2014 /* GDB stores the current frame_id when stepping in order to detects steps
2015 into subroutines.
2016 Since frames are computed differently when we're replaying, we need to
2017 recompute those stored frames and fix them up so we can still detect
2018 subroutines after we started replaying. */
a70b8144 2019 try
52834460 2020 {
52834460
MM
2021 struct frame_id frame_id;
2022 int upd_step_frame_id, upd_step_stack_frame_id;
2023
2024 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2025 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2026
2027 /* Check if we need to update any stepping-related frame id's. */
2028 upd_step_frame_id = frame_id_eq (frame_id,
2029 tp->control.step_frame_id);
2030 upd_step_stack_frame_id = frame_id_eq (frame_id,
2031 tp->control.step_stack_frame_id);
2032
2033 /* We start replaying at the end of the branch trace. This corresponds
2034 to the current instruction. */
8d749320 2035 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2036 btrace_insn_end (replay, btinfo);
2037
31fd9caa
MM
2038 /* Skip gaps at the end of the trace. */
2039 while (btrace_insn_get (replay) == NULL)
2040 {
2041 unsigned int steps;
2042
2043 steps = btrace_insn_prev (replay, 1);
2044 if (steps == 0)
2045 error (_("No trace."));
2046 }
2047
52834460
MM
2048 /* We're not replaying, yet. */
2049 gdb_assert (btinfo->replay == NULL);
2050 btinfo->replay = replay;
2051
2052 /* Make sure we're not using any stale registers. */
00431a78 2053 registers_changed_thread (tp);
52834460
MM
2054
2055 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2056 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2057
2058 /* Replace stepping related frames where necessary. */
2059 if (upd_step_frame_id)
2060 tp->control.step_frame_id = frame_id;
2061 if (upd_step_stack_frame_id)
2062 tp->control.step_stack_frame_id = frame_id;
2063 }
230d2906 2064 catch (const gdb_exception &except)
52834460
MM
2065 {
2066 xfree (btinfo->replay);
2067 btinfo->replay = NULL;
2068
00431a78 2069 registers_changed_thread (tp);
52834460 2070
eedc3f4f 2071 throw;
52834460
MM
2072 }
2073
2074 return replay;
2075}
2076
2077/* Stop replaying a thread. */
2078
2079static void
2080record_btrace_stop_replaying (struct thread_info *tp)
2081{
2082 struct btrace_thread_info *btinfo;
2083
2084 btinfo = &tp->btrace;
2085
2086 xfree (btinfo->replay);
2087 btinfo->replay = NULL;
2088
2089 /* Make sure we're not leaving any stale registers. */
00431a78 2090 registers_changed_thread (tp);
52834460
MM
2091}
2092
e3cfc1c7
MM
2093/* Stop replaying TP if it is at the end of its execution history. */
2094
2095static void
2096record_btrace_stop_replaying_at_end (struct thread_info *tp)
2097{
2098 struct btrace_insn_iterator *replay, end;
2099 struct btrace_thread_info *btinfo;
2100
2101 btinfo = &tp->btrace;
2102 replay = btinfo->replay;
2103
2104 if (replay == NULL)
2105 return;
2106
2107 btrace_insn_end (&end, btinfo);
2108
2109 if (btrace_insn_cmp (replay, &end) == 0)
2110 record_btrace_stop_replaying (tp);
2111}
2112
f6ac5f3d 2113/* The resume method of target record-btrace. */
b2f4cfde 2114
f6ac5f3d
PA
2115void
2116record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2117{
d2939ba2 2118 enum btrace_thread_flag flag, cflag;
52834460 2119
a068643d 2120 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2121 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2122 step ? "step" : "cont");
52834460 2123
0ca912df
MM
2124 /* Store the execution direction of the last resume.
2125
f6ac5f3d 2126 If there is more than one resume call, we have to rely on infrun
0ca912df 2127 to not change the execution direction in-between. */
f6ac5f3d 2128 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2129
0ca912df 2130 /* As long as we're not replaying, just forward the request.
52834460 2131
0ca912df
MM
2132 For non-stop targets this means that no thread is replaying. In order to
2133 make progress, we may need to explicitly move replaying threads to the end
2134 of their execution history. */
f6ac5f3d
PA
2135 if ((::execution_direction != EXEC_REVERSE)
2136 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2137 {
b6a8c27b 2138 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2139 return;
b2f4cfde
MM
2140 }
2141
52834460 2142 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2143 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2144 {
2145 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2146 cflag = BTHR_RCONT;
2147 }
52834460 2148 else
d2939ba2
MM
2149 {
2150 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2151 cflag = BTHR_CONT;
2152 }
52834460 2153
52834460 2154 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2155 record_btrace_wait below.
2156
2157 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2158 if (!target_is_non_stop_p ())
2159 {
26a57c92 2160 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2161
08036331
PA
2162 for (thread_info *tp : all_non_exited_threads (ptid))
2163 {
2164 if (tp->ptid.matches (inferior_ptid))
2165 record_btrace_resume_thread (tp, flag);
2166 else
2167 record_btrace_resume_thread (tp, cflag);
2168 }
d2939ba2
MM
2169 }
2170 else
2171 {
08036331
PA
2172 for (thread_info *tp : all_non_exited_threads (ptid))
2173 record_btrace_resume_thread (tp, flag);
d2939ba2 2174 }
70ad5bff
MM
2175
2176 /* Async support. */
2177 if (target_can_async_p ())
2178 {
6a3753b3 2179 target_async (1);
70ad5bff
MM
2180 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2181 }
52834460
MM
2182}
2183
f6ac5f3d 2184/* The commit_resume method of target record-btrace. */
85ad3aaf 2185
f6ac5f3d
PA
2186void
2187record_btrace_target::commit_resume ()
85ad3aaf 2188{
f6ac5f3d
PA
2189 if ((::execution_direction != EXEC_REVERSE)
2190 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2191 beneath ()->commit_resume ();
85ad3aaf
PA
2192}
2193
987e68b1
MM
2194/* Cancel resuming TP. */
2195
2196static void
2197record_btrace_cancel_resume (struct thread_info *tp)
2198{
2199 enum btrace_thread_flag flags;
2200
2201 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2202 if (flags == 0)
2203 return;
2204
43792cf0
PA
2205 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2206 print_thread_id (tp),
a068643d 2207 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2208 btrace_thread_flag_to_str (flags));
2209
2210 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2211 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2212}
2213
2214/* Return a target_waitstatus indicating that we ran out of history. */
2215
2216static struct target_waitstatus
2217btrace_step_no_history (void)
2218{
2219 struct target_waitstatus status;
2220
2221 status.kind = TARGET_WAITKIND_NO_HISTORY;
2222
2223 return status;
2224}
2225
2226/* Return a target_waitstatus indicating that a step finished. */
2227
2228static struct target_waitstatus
2229btrace_step_stopped (void)
2230{
2231 struct target_waitstatus status;
2232
2233 status.kind = TARGET_WAITKIND_STOPPED;
2234 status.value.sig = GDB_SIGNAL_TRAP;
2235
2236 return status;
2237}
2238
6e4879f0
MM
2239/* Return a target_waitstatus indicating that a thread was stopped as
2240 requested. */
2241
2242static struct target_waitstatus
2243btrace_step_stopped_on_request (void)
2244{
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_0;
2249
2250 return status;
2251}
2252
d825d248
MM
2253/* Return a target_waitstatus indicating a spurious stop. */
2254
2255static struct target_waitstatus
2256btrace_step_spurious (void)
2257{
2258 struct target_waitstatus status;
2259
2260 status.kind = TARGET_WAITKIND_SPURIOUS;
2261
2262 return status;
2263}
2264
e3cfc1c7
MM
2265/* Return a target_waitstatus indicating that the thread was not resumed. */
2266
2267static struct target_waitstatus
2268btrace_step_no_resumed (void)
2269{
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_NO_RESUMED;
2273
2274 return status;
2275}
2276
2277/* Return a target_waitstatus indicating that we should wait again. */
2278
2279static struct target_waitstatus
2280btrace_step_again (void)
2281{
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_IGNORE;
2285
2286 return status;
2287}
2288
52834460
MM
2289/* Clear the record histories. */
2290
2291static void
2292record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2293{
2294 xfree (btinfo->insn_history);
2295 xfree (btinfo->call_history);
2296
2297 btinfo->insn_history = NULL;
2298 btinfo->call_history = NULL;
2299}
2300
3c615f99
MM
2301/* Check whether TP's current replay position is at a breakpoint. */
2302
2303static int
2304record_btrace_replay_at_breakpoint (struct thread_info *tp)
2305{
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2308 const struct btrace_insn *insn;
3c615f99
MM
2309
2310 btinfo = &tp->btrace;
2311 replay = btinfo->replay;
2312
2313 if (replay == NULL)
2314 return 0;
2315
2316 insn = btrace_insn_get (replay);
2317 if (insn == NULL)
2318 return 0;
2319
00431a78 2320 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2321 &btinfo->stop_reason);
2322}
2323
d825d248 2324/* Step one instruction in forward direction. */
52834460
MM
2325
2326static struct target_waitstatus
d825d248 2327record_btrace_single_step_forward (struct thread_info *tp)
52834460 2328{
b61ce85c 2329 struct btrace_insn_iterator *replay, end, start;
52834460 2330 struct btrace_thread_info *btinfo;
52834460 2331
d825d248
MM
2332 btinfo = &tp->btrace;
2333 replay = btinfo->replay;
2334
2335 /* We're done if we're not replaying. */
2336 if (replay == NULL)
2337 return btrace_step_no_history ();
2338
011c71b6
MM
2339 /* Check if we're stepping a breakpoint. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2342
b61ce85c
MM
2343 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2344 jump back to the instruction at which we started. */
2345 start = *replay;
d825d248
MM
2346 do
2347 {
2348 unsigned int steps;
2349
e3cfc1c7
MM
2350 /* We will bail out here if we continue stepping after reaching the end
2351 of the execution history. */
d825d248
MM
2352 steps = btrace_insn_next (replay, 1);
2353 if (steps == 0)
b61ce85c
MM
2354 {
2355 *replay = start;
2356 return btrace_step_no_history ();
2357 }
d825d248
MM
2358 }
2359 while (btrace_insn_get (replay) == NULL);
2360
2361 /* Determine the end of the instruction trace. */
2362 btrace_insn_end (&end, btinfo);
2363
e3cfc1c7
MM
2364 /* The execution trace contains (and ends with) the current instruction.
2365 This instruction has not been executed, yet, so the trace really ends
2366 one instruction earlier. */
d825d248 2367 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2368 return btrace_step_no_history ();
d825d248
MM
2369
2370 return btrace_step_spurious ();
2371}
2372
2373/* Step one instruction in backward direction. */
2374
2375static struct target_waitstatus
2376record_btrace_single_step_backward (struct thread_info *tp)
2377{
b61ce85c 2378 struct btrace_insn_iterator *replay, start;
d825d248 2379 struct btrace_thread_info *btinfo;
e59fa00f 2380
52834460
MM
2381 btinfo = &tp->btrace;
2382 replay = btinfo->replay;
2383
d825d248
MM
2384 /* Start replaying if we're not already doing so. */
2385 if (replay == NULL)
2386 replay = record_btrace_start_replaying (tp);
2387
2388 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2389 Skip gaps during replay. If we end up at a gap (at the beginning of
2390 the trace), jump back to the instruction at which we started. */
2391 start = *replay;
d825d248
MM
2392 do
2393 {
2394 unsigned int steps;
2395
2396 steps = btrace_insn_prev (replay, 1);
2397 if (steps == 0)
b61ce85c
MM
2398 {
2399 *replay = start;
2400 return btrace_step_no_history ();
2401 }
d825d248
MM
2402 }
2403 while (btrace_insn_get (replay) == NULL);
2404
011c71b6
MM
2405 /* Check if we're stepping a breakpoint.
2406
2407 For reverse-stepping, this check is after the step. There is logic in
2408 infrun.c that handles reverse-stepping separately. See, for example,
2409 proceed and adjust_pc_after_break.
2410
2411 This code assumes that for reverse-stepping, PC points to the last
2412 de-executed instruction, whereas for forward-stepping PC points to the
2413 next to-be-executed instruction. */
2414 if (record_btrace_replay_at_breakpoint (tp))
2415 return btrace_step_stopped ();
2416
d825d248
MM
2417 return btrace_step_spurious ();
2418}
2419
2420/* Step a single thread. */
2421
2422static struct target_waitstatus
2423record_btrace_step_thread (struct thread_info *tp)
2424{
2425 struct btrace_thread_info *btinfo;
2426 struct target_waitstatus status;
2427 enum btrace_thread_flag flags;
2428
2429 btinfo = &tp->btrace;
2430
6e4879f0
MM
2431 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2432 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2433
43792cf0 2434 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2435 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2436 btrace_thread_flag_to_str (flags));
52834460 2437
6e4879f0
MM
2438 /* We can't step without an execution history. */
2439 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2440 return btrace_step_no_history ();
2441
52834460
MM
2442 switch (flags)
2443 {
2444 default:
2445 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2446
6e4879f0
MM
2447 case BTHR_STOP:
2448 return btrace_step_stopped_on_request ();
2449
52834460 2450 case BTHR_STEP:
d825d248
MM
2451 status = record_btrace_single_step_forward (tp);
2452 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2453 break;
52834460
MM
2454
2455 return btrace_step_stopped ();
2456
2457 case BTHR_RSTEP:
d825d248
MM
2458 status = record_btrace_single_step_backward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2460 break;
52834460
MM
2461
2462 return btrace_step_stopped ();
2463
2464 case BTHR_CONT:
e3cfc1c7
MM
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2467 break;
52834460 2468
e3cfc1c7
MM
2469 btinfo->flags |= flags;
2470 return btrace_step_again ();
52834460
MM
2471
2472 case BTHR_RCONT:
e3cfc1c7
MM
2473 status = record_btrace_single_step_backward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2475 break;
52834460 2476
e3cfc1c7
MM
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
2479 }
d825d248 2480
f6ac5f3d 2481 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2482 method will stop the thread for whom the event is reported. */
2483 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2484 btinfo->flags |= flags;
52834460 2485
e3cfc1c7 2486 return status;
b2f4cfde
MM
2487}
2488
a6b5be76
MM
2489/* Announce further events if necessary. */
2490
2491static void
53127008
SM
2492record_btrace_maybe_mark_async_event
2493 (const std::vector<thread_info *> &moving,
2494 const std::vector<thread_info *> &no_history)
a6b5be76 2495{
53127008
SM
2496 bool more_moving = !moving.empty ();
2497 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2498
2499 if (!more_moving && !more_no_history)
2500 return;
2501
2502 if (more_moving)
2503 DEBUG ("movers pending");
2504
2505 if (more_no_history)
2506 DEBUG ("no-history pending");
2507
2508 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2509}
2510
f6ac5f3d 2511/* The wait method of target record-btrace. */
b2f4cfde 2512
f6ac5f3d
PA
2513ptid_t
2514record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2515 int options)
b2f4cfde 2516{
53127008
SM
2517 std::vector<thread_info *> moving;
2518 std::vector<thread_info *> no_history;
52834460 2519
a068643d 2520 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2521
b2f4cfde 2522 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2523 if ((::execution_direction != EXEC_REVERSE)
2524 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2525 {
b6a8c27b 2526 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2527 }
2528
e3cfc1c7 2529 /* Keep a work list of moving threads. */
08036331
PA
2530 for (thread_info *tp : all_non_exited_threads (ptid))
2531 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2532 moving.push_back (tp);
e3cfc1c7 2533
53127008 2534 if (moving.empty ())
52834460 2535 {
e3cfc1c7 2536 *status = btrace_step_no_resumed ();
52834460 2537
a068643d 2538 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2539 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2540
e3cfc1c7 2541 return null_ptid;
52834460
MM
2542 }
2543
e3cfc1c7
MM
2544 /* Step moving threads one by one, one step each, until either one thread
2545 reports an event or we run out of threads to step.
2546
2547 When stepping more than one thread, chances are that some threads reach
2548 the end of their execution history earlier than others. If we reported
2549 this immediately, all-stop on top of non-stop would stop all threads and
2550 resume the same threads next time. And we would report the same thread
2551 having reached the end of its execution history again.
2552
2553 In the worst case, this would starve the other threads. But even if other
2554 threads would be allowed to make progress, this would result in far too
2555 many intermediate stops.
2556
2557 We therefore delay the reporting of "no execution history" until we have
2558 nothing else to report. By this time, all threads should have moved to
2559 either the beginning or the end of their execution history. There will
2560 be a single user-visible stop. */
53127008
SM
2561 struct thread_info *eventing = NULL;
2562 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2563 {
53127008 2564 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2565 {
53127008
SM
2566 thread_info *tp = moving[ix];
2567
e3cfc1c7
MM
2568 *status = record_btrace_step_thread (tp);
2569
2570 switch (status->kind)
2571 {
2572 case TARGET_WAITKIND_IGNORE:
2573 ix++;
2574 break;
2575
2576 case TARGET_WAITKIND_NO_HISTORY:
53127008 2577 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2578 break;
2579
2580 default:
53127008 2581 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2582 break;
2583 }
2584 }
2585 }
2586
2587 if (eventing == NULL)
2588 {
2589 /* We started with at least one moving thread. This thread must have
2590 either stopped or reached the end of its execution history.
2591
2592 In the former case, EVENTING must not be NULL.
2593 In the latter case, NO_HISTORY must not be empty. */
53127008 2594 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2595
2596 /* We kept threads moving at the end of their execution history. Stop
2597 EVENTING now that we are going to report its stop. */
53127008 2598 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2599 eventing->btrace.flags &= ~BTHR_MOVE;
2600
2601 *status = btrace_step_no_history ();
2602 }
2603
2604 gdb_assert (eventing != NULL);
2605
2606 /* We kept threads replaying at the end of their execution history. Stop
2607 replaying EVENTING now that we are going to report its stop. */
2608 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2609
2610 /* Stop all other threads. */
5953356c 2611 if (!target_is_non_stop_p ())
53127008 2612 {
08036331 2613 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2614 record_btrace_cancel_resume (tp);
2615 }
52834460 2616
a6b5be76
MM
2617 /* In async mode, we need to announce further events. */
2618 if (target_is_async_p ())
2619 record_btrace_maybe_mark_async_event (moving, no_history);
2620
52834460 2621 /* Start record histories anew from the current position. */
e3cfc1c7 2622 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2623
2624 /* We moved the replay position but did not update registers. */
00431a78 2625 registers_changed_thread (eventing);
e3cfc1c7 2626
43792cf0
PA
2627 DEBUG ("wait ended by thread %s (%s): %s",
2628 print_thread_id (eventing),
a068643d 2629 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2630 target_waitstatus_to_string (status).c_str ());
52834460 2631
e3cfc1c7 2632 return eventing->ptid;
52834460
MM
2633}
2634
f6ac5f3d 2635/* The stop method of target record-btrace. */
6e4879f0 2636
f6ac5f3d
PA
2637void
2638record_btrace_target::stop (ptid_t ptid)
6e4879f0 2639{
a068643d 2640 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2641
2642 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2643 if ((::execution_direction != EXEC_REVERSE)
2644 && !record_is_replaying (minus_one_ptid))
6e4879f0 2645 {
b6a8c27b 2646 this->beneath ()->stop (ptid);
6e4879f0
MM
2647 }
2648 else
2649 {
08036331
PA
2650 for (thread_info *tp : all_non_exited_threads (ptid))
2651 {
2652 tp->btrace.flags &= ~BTHR_MOVE;
2653 tp->btrace.flags |= BTHR_STOP;
2654 }
6e4879f0
MM
2655 }
2656 }
2657
f6ac5f3d 2658/* The can_execute_reverse method of target record-btrace. */
52834460 2659
57810aa7 2660bool
f6ac5f3d 2661record_btrace_target::can_execute_reverse ()
52834460 2662{
57810aa7 2663 return true;
52834460
MM
2664}
2665
f6ac5f3d 2666/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2667
57810aa7 2668bool
f6ac5f3d 2669record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2670{
f6ac5f3d 2671 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2672 {
2673 struct thread_info *tp = inferior_thread ();
2674
2675 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2676 }
2677
b6a8c27b 2678 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2679}
2680
f6ac5f3d 2681/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2682 record-btrace. */
2683
57810aa7 2684bool
f6ac5f3d 2685record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2686{
f6ac5f3d 2687 if (record_is_replaying (minus_one_ptid))
57810aa7 2688 return true;
9e8915c6 2689
b6a8c27b 2690 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2691}
2692
f6ac5f3d 2693/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2694
57810aa7 2695bool
f6ac5f3d 2696record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2697{
f6ac5f3d 2698 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2699 {
2700 struct thread_info *tp = inferior_thread ();
2701
2702 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2703 }
2704
b6a8c27b 2705 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2706}
2707
f6ac5f3d 2708/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2709 record-btrace. */
2710
57810aa7 2711bool
f6ac5f3d 2712record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2713{
f6ac5f3d 2714 if (record_is_replaying (minus_one_ptid))
57810aa7 2715 return true;
52834460 2716
b6a8c27b 2717 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2718}
2719
f6ac5f3d 2720/* The update_thread_list method of target record-btrace. */
e2887aa3 2721
f6ac5f3d
PA
2722void
2723record_btrace_target::update_thread_list ()
e2887aa3 2724{
e8032dde 2725 /* We don't add or remove threads during replay. */
f6ac5f3d 2726 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2727 return;
2728
2729 /* Forward the request. */
b6a8c27b 2730 this->beneath ()->update_thread_list ();
e2887aa3
MM
2731}
2732
f6ac5f3d 2733/* The thread_alive method of target record-btrace. */
e2887aa3 2734
57810aa7 2735bool
f6ac5f3d 2736record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2737{
2738 /* We don't add or remove threads during replay. */
f6ac5f3d 2739 if (record_is_replaying (minus_one_ptid))
00431a78 2740 return true;
e2887aa3
MM
2741
2742 /* Forward the request. */
b6a8c27b 2743 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2744}
2745
066ce621
MM
2746/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 is stopped. */
2748
2749static void
2750record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2752{
2753 struct btrace_thread_info *btinfo;
2754
2755 btinfo = &tp->btrace;
2756
a0f1b963 2757 if (it == NULL)
52834460 2758 record_btrace_stop_replaying (tp);
066ce621
MM
2759 else
2760 {
2761 if (btinfo->replay == NULL)
52834460 2762 record_btrace_start_replaying (tp);
066ce621
MM
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2764 return;
2765
2766 *btinfo->replay = *it;
00431a78 2767 registers_changed_thread (tp);
066ce621
MM
2768 }
2769
52834460
MM
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
485668e5 2772
f2ffa92b
PA
2773 inferior_thread ()->suspend.stop_pc
2774 = regcache_read_pc (get_current_regcache ());
485668e5 2775 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2776}
2777
f6ac5f3d 2778/* The goto_record_begin method of target record-btrace. */
066ce621 2779
f6ac5f3d
PA
2780void
2781record_btrace_target::goto_record_begin ()
066ce621
MM
2782{
2783 struct thread_info *tp;
2784 struct btrace_insn_iterator begin;
2785
2786 tp = require_btrace_thread ();
2787
2788 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2789
2790 /* Skip gaps at the beginning of the trace. */
2791 while (btrace_insn_get (&begin) == NULL)
2792 {
2793 unsigned int steps;
2794
2795 steps = btrace_insn_next (&begin, 1);
2796 if (steps == 0)
2797 error (_("No trace."));
2798 }
2799
066ce621 2800 record_btrace_set_replay (tp, &begin);
066ce621
MM
2801}
2802
f6ac5f3d 2803/* The goto_record_end method of target record-btrace. */
066ce621 2804
f6ac5f3d
PA
2805void
2806record_btrace_target::goto_record_end ()
066ce621
MM
2807{
2808 struct thread_info *tp;
2809
2810 tp = require_btrace_thread ();
2811
2812 record_btrace_set_replay (tp, NULL);
066ce621
MM
2813}
2814
f6ac5f3d 2815/* The goto_record method of target record-btrace. */
066ce621 2816
f6ac5f3d
PA
2817void
2818record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2819{
2820 struct thread_info *tp;
2821 struct btrace_insn_iterator it;
2822 unsigned int number;
2823 int found;
2824
2825 number = insn;
2826
2827 /* Check for wrap-arounds. */
2828 if (number != insn)
2829 error (_("Instruction number out of range."));
2830
2831 tp = require_btrace_thread ();
2832
2833 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2834
2835 /* Check if the instruction could not be found or is a gap. */
2836 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2837 error (_("No such instruction."));
2838
2839 record_btrace_set_replay (tp, &it);
066ce621
MM
2840}
2841
f6ac5f3d 2842/* The record_stop_replaying method of target record-btrace. */
797094dd 2843
f6ac5f3d
PA
2844void
2845record_btrace_target::record_stop_replaying ()
797094dd 2846{
08036331 2847 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2848 record_btrace_stop_replaying (tp);
2849}
2850
f6ac5f3d 2851/* The execution_direction target method. */
70ad5bff 2852
f6ac5f3d
PA
2853enum exec_direction_kind
2854record_btrace_target::execution_direction ()
70ad5bff
MM
2855{
2856 return record_btrace_resume_exec_dir;
2857}
2858
f6ac5f3d 2859/* The prepare_to_generate_core target method. */
aef92902 2860
f6ac5f3d
PA
2861void
2862record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2863{
2864 record_btrace_generating_corefile = 1;
2865}
2866
f6ac5f3d 2867/* The done_generating_core target method. */
aef92902 2868
f6ac5f3d
PA
2869void
2870record_btrace_target::done_generating_core ()
aef92902
MM
2871{
2872 record_btrace_generating_corefile = 0;
2873}
2874
f4abbc16
MM
2875/* Start recording in BTS format. */
2876
2877static void
cdb34d4a 2878cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2879{
f4abbc16
MM
2880 if (args != NULL && *args != 0)
2881 error (_("Invalid argument."));
2882
2883 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2884
a70b8144 2885 try
492d29ea 2886 {
95a6b0a1 2887 execute_command ("target record-btrace", from_tty);
492d29ea 2888 }
230d2906 2889 catch (const gdb_exception &exception)
f4abbc16
MM
2890 {
2891 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2892 throw;
f4abbc16
MM
2893 }
2894}
2895
bc504a31 2896/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2897
2898static void
cdb34d4a 2899cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2900{
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
b20a6524 2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2905
a70b8144 2906 try
492d29ea 2907 {
95a6b0a1 2908 execute_command ("target record-btrace", from_tty);
492d29ea 2909 }
230d2906 2910 catch (const gdb_exception &exception)
492d29ea
PA
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2913 throw;
492d29ea 2914 }
afedecd3
MM
2915}
2916
b20a6524
MM
2917/* Alias for "target record". */
2918
2919static void
981a3fb3 2920cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2921{
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2924
2925 record_btrace_conf.format = BTRACE_FORMAT_PT;
2926
a70b8144 2927 try
b20a6524 2928 {
95a6b0a1 2929 execute_command ("target record-btrace", from_tty);
b20a6524 2930 }
230d2906 2931 catch (const gdb_exception &exception)
b20a6524
MM
2932 {
2933 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2934
a70b8144 2935 try
b20a6524 2936 {
95a6b0a1 2937 execute_command ("target record-btrace", from_tty);
b20a6524 2938 }
230d2906 2939 catch (const gdb_exception &ex)
b20a6524
MM
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2942 throw;
b20a6524 2943 }
b20a6524 2944 }
b20a6524
MM
2945}
2946
67b5c0c1
MM
2947/* The "set record btrace" command. */
2948
2949static void
981a3fb3 2950cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2951{
b85310e1
MM
2952 printf_unfiltered (_("\"set record btrace\" must be followed "
2953 "by an appropriate subcommand.\n"));
2954 help_list (set_record_btrace_cmdlist, "set record btrace ",
2955 all_commands, gdb_stdout);
67b5c0c1
MM
2956}
2957
2958/* The "show record btrace" command. */
2959
2960static void
981a3fb3 2961cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2962{
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2964}
2965
2966/* The "show record btrace replay-memory-access" command. */
2967
2968static void
2969cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2971{
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2974}
2975
4a4495d6
MM
2976/* The "set record btrace cpu none" command. */
2977
2978static void
2979cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2980{
2981 if (args != nullptr && *args != 0)
2982 error (_("Trailing junk: '%s'."), args);
2983
2984 record_btrace_cpu_state = CS_NONE;
2985}
2986
2987/* The "set record btrace cpu auto" command. */
2988
2989static void
2990cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2991{
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_AUTO;
2996}
2997
2998/* The "set record btrace cpu" command. */
2999
3000static void
3001cmd_set_record_btrace_cpu (const char *args, int from_tty)
3002{
3003 if (args == nullptr)
3004 args = "";
3005
3006 /* We use a hard-coded vendor string for now. */
3007 unsigned int family, model, stepping;
3008 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3009 &model, &l1, &stepping, &l2);
3010 if (matches == 3)
3011 {
3012 if (strlen (args) != l2)
3013 error (_("Trailing junk: '%s'."), args + l2);
3014 }
3015 else if (matches == 2)
3016 {
3017 if (strlen (args) != l1)
3018 error (_("Trailing junk: '%s'."), args + l1);
3019
3020 stepping = 0;
3021 }
3022 else
3023 error (_("Bad format. See \"help set record btrace cpu\"."));
3024
3025 if (USHRT_MAX < family)
3026 error (_("Cpu family too big."));
3027
3028 if (UCHAR_MAX < model)
3029 error (_("Cpu model too big."));
3030
3031 if (UCHAR_MAX < stepping)
3032 error (_("Cpu stepping too big."));
3033
3034 record_btrace_cpu.vendor = CV_INTEL;
3035 record_btrace_cpu.family = family;
3036 record_btrace_cpu.model = model;
3037 record_btrace_cpu.stepping = stepping;
3038
3039 record_btrace_cpu_state = CS_CPU;
3040}
3041
3042/* The "show record btrace cpu" command. */
3043
3044static void
3045cmd_show_record_btrace_cpu (const char *args, int from_tty)
3046{
4a4495d6
MM
3047 if (args != nullptr && *args != 0)
3048 error (_("Trailing junk: '%s'."), args);
3049
3050 switch (record_btrace_cpu_state)
3051 {
3052 case CS_AUTO:
3053 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3054 return;
3055
3056 case CS_NONE:
3057 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3058 return;
3059
3060 case CS_CPU:
3061 switch (record_btrace_cpu.vendor)
3062 {
3063 case CV_INTEL:
3064 if (record_btrace_cpu.stepping == 0)
3065 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3066 record_btrace_cpu.family,
3067 record_btrace_cpu.model);
3068 else
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model,
3072 record_btrace_cpu.stepping);
3073 return;
3074 }
3075 }
3076
3077 error (_("Internal error: bad cpu state."));
3078}
3079
3080/* The "s record btrace bts" command. */
d33501a5
MM
3081
3082static void
981a3fb3 3083cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3084{
3085 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3086 "by an appropriate subcommand.\n"));
d33501a5
MM
3087 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3088 all_commands, gdb_stdout);
3089}
3090
3091/* The "show record btrace bts" command. */
3092
3093static void
981a3fb3 3094cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3095{
3096 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3097}
3098
b20a6524
MM
3099/* The "set record btrace pt" command. */
3100
3101static void
981a3fb3 3102cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3103{
3104 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3105 "by an appropriate subcommand.\n"));
3106 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3107 all_commands, gdb_stdout);
3108}
3109
3110/* The "show record btrace pt" command. */
3111
3112static void
981a3fb3 3113cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3114{
3115 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3116}
3117
3118/* The "record bts buffer-size" show value function. */
3119
3120static void
3121show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3122 struct cmd_list_element *c,
3123 const char *value)
3124{
3125 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3126 value);
3127}
3128
3129/* The "record pt buffer-size" show value function. */
3130
3131static void
3132show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3134 const char *value)
3135{
3136 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3137 value);
3138}
3139
afedecd3
MM
3140/* Initialize btrace commands. */
3141
3142void
3143_initialize_record_btrace (void)
3144{
f4abbc16
MM
3145 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3146 _("Start branch trace recording."), &record_btrace_cmdlist,
3147 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3148 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3149
f4abbc16
MM
3150 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3151 _("\
3152Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3153The processor stores a from/to record for each branch into a cyclic buffer.\n\
3154This format may not be available on all processors."),
3155 &record_btrace_cmdlist);
3156 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3157
b20a6524
MM
3158 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3159 _("\
bc504a31 3160Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3161This format may not be available on all processors."),
3162 &record_btrace_cmdlist);
3163 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3164
67b5c0c1 3165 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
590042fc 3166 _("Set record options."), &set_record_btrace_cmdlist,
67b5c0c1
MM
3167 "set record btrace ", 0, &set_record_cmdlist);
3168
3169 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
590042fc 3170 _("Show record options."), &show_record_btrace_cmdlist,
67b5c0c1
MM
3171 "show record btrace ", 0, &show_record_cmdlist);
3172
3173 add_setshow_enum_cmd ("replay-memory-access", no_class,
3174 replay_memory_access_types, &replay_memory_access, _("\
3175Set what memory accesses are allowed during replay."), _("\
3176Show what memory accesses are allowed during replay."),
3177 _("Default is READ-ONLY.\n\n\
3178The btrace record target does not trace data.\n\
3179The memory therefore corresponds to the live target and not \
3180to the current replay position.\n\n\
3181When READ-ONLY, allow accesses to read-only memory during replay.\n\
3182When READ-WRITE, allow accesses to read-only and read-write memory during \
3183replay."),
3184 NULL, cmd_show_replay_memory_access,
3185 &set_record_btrace_cmdlist,
3186 &show_record_btrace_cmdlist);
3187
4a4495d6
MM
3188 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3189 _("\
3190Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3191The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3192For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3193When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3194The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3195When GDB does not support that cpu, this option can be used to enable\n\
3196workarounds for a similar cpu that GDB supports.\n\n\
3197When set to \"none\", errata workarounds are disabled."),
3198 &set_record_btrace_cpu_cmdlist,
590042fc 3199 "set record btrace cpu ", 1,
4a4495d6
MM
3200 &set_record_btrace_cmdlist);
3201
3202 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3203Automatically determine the cpu to be used for trace decode."),
3204 &set_record_btrace_cpu_cmdlist);
3205
3206 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3207Do not enable errata workarounds for trace decode."),
3208 &set_record_btrace_cpu_cmdlist);
3209
3210 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3211Show the cpu to be used for trace decode."),
3212 &show_record_btrace_cmdlist);
3213
d33501a5 3214 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
590042fc 3215 _("Set record btrace bts options."),
d33501a5
MM
3216 &set_record_btrace_bts_cmdlist,
3217 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3218
3219 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
590042fc 3220 _("Show record btrace bts options."),
d33501a5
MM
3221 &show_record_btrace_bts_cmdlist,
3222 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3223
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.bts.size,
3226 _("Set the record/replay bts buffer size."),
3227 _("Show the record/replay bts buffer size."), _("\
3228When starting recording request a trace buffer of this size. \
3229The actual buffer size may differ from the requested size. \
3230Use \"info record\" to see the actual buffer size.\n\n\
3231Bigger buffers allow longer recording but also take more time to process \
3232the recorded execution trace.\n\n\
b20a6524
MM
3233The trace buffer size may not be changed while recording."), NULL,
3234 show_record_bts_buffer_size_value,
d33501a5
MM
3235 &set_record_btrace_bts_cmdlist,
3236 &show_record_btrace_bts_cmdlist);
3237
b20a6524 3238 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
590042fc 3239 _("Set record btrace pt options."),
b20a6524
MM
3240 &set_record_btrace_pt_cmdlist,
3241 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3242
3243 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
590042fc 3244 _("Show record btrace pt options."),
b20a6524
MM
3245 &show_record_btrace_pt_cmdlist,
3246 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3247
3248 add_setshow_uinteger_cmd ("buffer-size", no_class,
3249 &record_btrace_conf.pt.size,
3250 _("Set the record/replay pt buffer size."),
3251 _("Show the record/replay pt buffer size."), _("\
3252Bigger buffers allow longer recording but also take more time to process \
3253the recorded execution.\n\
3254The actual buffer size may differ from the requested size. Use \"info record\" \
3255to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3256 &set_record_btrace_pt_cmdlist,
3257 &show_record_btrace_pt_cmdlist);
3258
d9f719f1 3259 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3260
3261 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3262 xcalloc, xfree);
d33501a5
MM
3263
3264 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3265 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3266}
This page took 0.936085 seconds and 4 git commands to generate.