Implement Ada min and max operations
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
3666a048 3 Copyright (C) 2013-2021 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
400b5eca 39#include "gdbsupport/event-loop.h"
70ad5bff 40#include "inf-loop.h"
00431a78 41#include "inferior.h"
325fac50 42#include <algorithm>
0d12e84c 43#include "gdbarch.h"
e43b10e1 44#include "cli/cli-style.h"
93b54c8e 45#include "async-event.h"
159ed7d9 46#include <forward_list>
afedecd3 47
d9f719f1
PA
48static const target_info record_btrace_target_info = {
49 "record-btrace",
50 N_("Branch tracing target"),
51 N_("Collect control-flow trace and provide the execution history.")
52};
53
afedecd3 54/* The target_ops of record-btrace. */
f6ac5f3d
PA
55
56class record_btrace_target final : public target_ops
57{
58public:
d9f719f1
PA
59 const target_info &info () const override
60 { return record_btrace_target_info; }
f6ac5f3d 61
66b4deae
PA
62 strata stratum () const override { return record_stratum; }
63
f6ac5f3d
PA
64 void close () override;
65 void async (int) override;
66
67 void detach (inferior *inf, int from_tty) override
68 { record_detach (this, inf, from_tty); }
69
70 void disconnect (const char *, int) override;
71
72 void mourn_inferior () override
73 { record_mourn_inferior (this); }
74
75 void kill () override
76 { record_kill (this); }
77
78 enum record_method record_method (ptid_t ptid) override;
79
80 void stop_recording () override;
81 void info_record () override;
82
83 void insn_history (int size, gdb_disassembly_flags flags) override;
84 void insn_history_from (ULONGEST from, int size,
85 gdb_disassembly_flags flags) override;
86 void insn_history_range (ULONGEST begin, ULONGEST end,
87 gdb_disassembly_flags flags) override;
88 void call_history (int size, record_print_flags flags) override;
89 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
90 override;
91 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 override;
93
57810aa7
PA
94 bool record_is_replaying (ptid_t ptid) override;
95 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
96 void record_stop_replaying () override;
97
98 enum target_xfer_status xfer_partial (enum target_object object,
99 const char *annex,
100 gdb_byte *readbuf,
101 const gdb_byte *writebuf,
102 ULONGEST offset, ULONGEST len,
103 ULONGEST *xfered_len) override;
104
105 int insert_breakpoint (struct gdbarch *,
106 struct bp_target_info *) override;
107 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
108 enum remove_bp_reason) override;
109
110 void fetch_registers (struct regcache *, int) override;
111
112 void store_registers (struct regcache *, int) override;
113 void prepare_to_store (struct regcache *) override;
114
115 const struct frame_unwind *get_unwinder () override;
116
117 const struct frame_unwind *get_tailcall_unwinder () override;
118
f6ac5f3d 119 void resume (ptid_t, int, enum gdb_signal) override;
b60cea74 120 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
f6ac5f3d
PA
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
57810aa7 124 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
57810aa7 129 bool can_execute_reverse () override;
f6ac5f3d 130
57810aa7
PA
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 133
57810aa7
PA
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140};
141
142static record_btrace_target record_btrace_ops;
143
144/* Initialize the record-btrace target ops. */
afedecd3 145
76727919
TT
146/* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
3dcfdc58 148static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 149
67b5c0c1
MM
150/* Memory access types used in set/show record btrace replay-memory-access. */
151static const char replay_memory_access_read_only[] = "read-only";
152static const char replay_memory_access_read_write[] = "read-write";
153static const char *const replay_memory_access_types[] =
154{
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158};
159
160/* The currently allowed replay memory access type. */
161static const char *replay_memory_access = replay_memory_access_read_only;
162
4a4495d6
MM
163/* The cpu state kinds. */
164enum record_btrace_cpu_state_kind
165{
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169};
170
171/* The current cpu state. */
172static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174/* The current cpu for trace decode. */
175static struct btrace_cpu record_btrace_cpu;
176
67b5c0c1
MM
177/* Command lists for "set/show record btrace". */
178static struct cmd_list_element *set_record_btrace_cmdlist;
179static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 180
70ad5bff
MM
181/* The execution direction of the last resume we got. See record-full.c. */
182static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184/* The async event handler for reverse/replay execution. */
185static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
aef92902
MM
187/* A flag indicating that we are currently generating a core file. */
188static int record_btrace_generating_corefile;
189
f4abbc16
MM
190/* The current branch trace configuration. */
191static struct btrace_config record_btrace_conf;
192
193/* Command list for "record btrace". */
194static struct cmd_list_element *record_btrace_cmdlist;
195
d33501a5
MM
196/* Command lists for "set/show record btrace bts". */
197static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
b20a6524
MM
200/* Command lists for "set/show record btrace pt". */
201static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
4a4495d6
MM
204/* Command list for "set record btrace cpu". */
205static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
afedecd3
MM
207/* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210#define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
dda83cd7 214 fprintf_unfiltered (gdb_stdlog, \
afedecd3
MM
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
4a4495d6
MM
220/* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222const struct btrace_cpu *
223record_btrace_get_cpu (void)
224{
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238}
239
afedecd3 240/* Update the branch trace for the current thread and return a pointer to its
066ce621 241 thread_info.
afedecd3
MM
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
066ce621
MM
246static struct thread_info *
247require_btrace_thread (void)
afedecd3 248{
afedecd3
MM
249 DEBUG ("require");
250
00431a78 251 if (inferior_ptid == null_ptid)
afedecd3
MM
252 error (_("No thread."));
253
00431a78
PA
254 thread_info *tp = inferior_thread ();
255
cd4007e4
MM
256 validate_registers_access ();
257
4a4495d6 258 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 259
6e07b1d2 260 if (btrace_is_empty (tp))
afedecd3
MM
261 error (_("No trace."));
262
066ce621
MM
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
273require_btrace (void)
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
afedecd3
MM
280}
281
282/* Enable branch tracing for one thread. Warn on errors. */
283
284static void
285record_btrace_enable_warn (struct thread_info *tp)
286{
d89edf9b
MM
287 /* Ignore this thread if its inferior is not recorded by us. */
288 target_ops *rec = tp->inf->target_at (record_stratum);
289 if (rec != &record_btrace_ops)
290 return;
291
a70b8144 292 try
492d29ea
PA
293 {
294 btrace_enable (tp, &record_btrace_conf);
295 }
230d2906 296 catch (const gdb_exception_error &error)
492d29ea 297 {
3d6e9d23 298 warning ("%s", error.what ());
492d29ea 299 }
afedecd3
MM
300}
301
afedecd3
MM
302/* Enable automatic tracing of new threads. */
303
304static void
305record_btrace_auto_enable (void)
306{
307 DEBUG ("attach thread observer");
308
76727919
TT
309 gdb::observers::new_thread.attach (record_btrace_enable_warn,
310 record_btrace_thread_observer_token);
afedecd3
MM
311}
312
313/* Disable automatic tracing of new threads. */
314
315static void
316record_btrace_auto_disable (void)
317{
afedecd3
MM
318 DEBUG ("detach thread observer");
319
76727919 320 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
321}
322
70ad5bff
MM
323/* The record-btrace async event handler function. */
324
325static void
326record_btrace_handle_async_inferior_event (gdb_client_data data)
327{
b1a35af2 328 inferior_event_handler (INF_REG_EVENT);
70ad5bff
MM
329}
330
c0272db5
TW
331/* See record-btrace.h. */
332
333void
334record_btrace_push_target (void)
335{
336 const char *format;
337
338 record_btrace_auto_enable ();
339
340 push_target (&record_btrace_ops);
341
342 record_btrace_async_inferior_event_handler
343 = create_async_event_handler (record_btrace_handle_async_inferior_event,
db20ebdf 344 NULL, "record-btrace");
c0272db5
TW
345 record_btrace_generating_corefile = 0;
346
347 format = btrace_format_short_string (record_btrace_conf.format);
76727919 348 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
349}
350
228f1508
SM
351/* Disable btrace on a set of threads on scope exit. */
352
353struct scoped_btrace_disable
354{
355 scoped_btrace_disable () = default;
356
357 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
358
359 ~scoped_btrace_disable ()
360 {
361 for (thread_info *tp : m_threads)
362 btrace_disable (tp);
363 }
364
365 void add_thread (thread_info *thread)
366 {
367 m_threads.push_front (thread);
368 }
369
370 void discard ()
371 {
372 m_threads.clear ();
373 }
374
375private:
376 std::forward_list<thread_info *> m_threads;
377};
378
d9f719f1 379/* Open target record-btrace. */
afedecd3 380
d9f719f1
PA
381static void
382record_btrace_target_open (const char *args, int from_tty)
afedecd3 383{
228f1508
SM
384 /* If we fail to enable btrace for one thread, disable it for the threads for
385 which it was successfully enabled. */
386 scoped_btrace_disable btrace_disable;
afedecd3
MM
387
388 DEBUG ("open");
389
8213266a 390 record_preopen ();
afedecd3 391
55f6301a 392 if (!target_has_execution ())
afedecd3
MM
393 error (_("The program is not being run."));
394
d89edf9b 395 for (thread_info *tp : current_inferior ()->non_exited_threads ())
5d5658a1 396 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 397 {
f4abbc16 398 btrace_enable (tp, &record_btrace_conf);
afedecd3 399
228f1508 400 btrace_disable.add_thread (tp);
afedecd3
MM
401 }
402
c0272db5 403 record_btrace_push_target ();
afedecd3 404
228f1508 405 btrace_disable.discard ();
afedecd3
MM
406}
407
f6ac5f3d 408/* The stop_recording method of target record-btrace. */
afedecd3 409
f6ac5f3d
PA
410void
411record_btrace_target::stop_recording ()
afedecd3 412{
afedecd3
MM
413 DEBUG ("stop recording");
414
415 record_btrace_auto_disable ();
416
d89edf9b 417 for (thread_info *tp : current_inferior ()->non_exited_threads ())
afedecd3
MM
418 if (tp->btrace.target != NULL)
419 btrace_disable (tp);
420}
421
f6ac5f3d 422/* The disconnect method of target record-btrace. */
c0272db5 423
f6ac5f3d
PA
424void
425record_btrace_target::disconnect (const char *args,
426 int from_tty)
c0272db5 427{
b6a8c27b 428 struct target_ops *beneath = this->beneath ();
c0272db5
TW
429
430 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 431 unpush_target (this);
c0272db5
TW
432
433 /* Forward disconnect. */
f6ac5f3d 434 beneath->disconnect (args, from_tty);
c0272db5
TW
435}
436
f6ac5f3d 437/* The close method of target record-btrace. */
afedecd3 438
f6ac5f3d
PA
439void
440record_btrace_target::close ()
afedecd3 441{
70ad5bff
MM
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
444
99c819ee
MM
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
448
568e808b
MM
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
d89edf9b 451 for (thread_info *tp : current_inferior ()->non_exited_threads ())
568e808b 452 btrace_teardown (tp);
afedecd3
MM
453}
454
f6ac5f3d 455/* The async method of target record-btrace. */
b7d2e916 456
f6ac5f3d
PA
457void
458record_btrace_target::async (int enable)
b7d2e916 459{
6a3753b3 460 if (enable)
b7d2e916
PA
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
462 else
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
464
b6a8c27b 465 this->beneath ()->async (enable);
b7d2e916
PA
466}
467
d33501a5
MM
468/* Adjusts the size and returns a human readable size suffix. */
469
470static const char *
471record_btrace_adjust_size (unsigned int *size)
472{
473 unsigned int sz;
474
475 sz = *size;
476
477 if ((sz & ((1u << 30) - 1)) == 0)
478 {
479 *size = sz >> 30;
480 return "GB";
481 }
482 else if ((sz & ((1u << 20) - 1)) == 0)
483 {
484 *size = sz >> 20;
485 return "MB";
486 }
487 else if ((sz & ((1u << 10) - 1)) == 0)
488 {
489 *size = sz >> 10;
490 return "kB";
491 }
492 else
493 return "";
494}
495
496/* Print a BTS configuration. */
497
498static void
499record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
500{
501 const char *suffix;
502 unsigned int size;
503
504 size = conf->size;
505 if (size > 0)
506 {
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
509 }
510}
511
bc504a31 512/* Print an Intel Processor Trace configuration. */
b20a6524
MM
513
514static void
515record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
516{
517 const char *suffix;
518 unsigned int size;
519
520 size = conf->size;
521 if (size > 0)
522 {
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
525 }
526}
527
d33501a5
MM
528/* Print a branch tracing configuration. */
529
530static void
531record_btrace_print_conf (const struct btrace_config *conf)
532{
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
535
536 switch (conf->format)
537 {
538 case BTRACE_FORMAT_NONE:
539 return;
540
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
543 return;
b20a6524
MM
544
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
547 return;
d33501a5
MM
548 }
549
40c94099 550 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
d33501a5
MM
551}
552
f6ac5f3d 553/* The info_record method of target record-btrace. */
afedecd3 554
f6ac5f3d
PA
555void
556record_btrace_target::info_record ()
afedecd3
MM
557{
558 struct btrace_thread_info *btinfo;
f4abbc16 559 const struct btrace_config *conf;
afedecd3 560 struct thread_info *tp;
31fd9caa 561 unsigned int insns, calls, gaps;
afedecd3
MM
562
563 DEBUG ("info");
564
5b6d1e4f 565 if (inferior_ptid == null_ptid)
afedecd3
MM
566 error (_("No thread."));
567
5b6d1e4f
PA
568 tp = inferior_thread ();
569
cd4007e4
MM
570 validate_registers_access ();
571
f4abbc16
MM
572 btinfo = &tp->btrace;
573
f6ac5f3d 574 conf = ::btrace_conf (btinfo);
f4abbc16 575 if (conf != NULL)
d33501a5 576 record_btrace_print_conf (conf);
f4abbc16 577
4a4495d6 578 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 579
23a7fe75
MM
580 insns = 0;
581 calls = 0;
31fd9caa 582 gaps = 0;
23a7fe75 583
6e07b1d2 584 if (!btrace_is_empty (tp))
23a7fe75
MM
585 {
586 struct btrace_call_iterator call;
587 struct btrace_insn_iterator insn;
588
589 btrace_call_end (&call, btinfo);
590 btrace_call_prev (&call, 1);
5de9129b 591 calls = btrace_call_number (&call);
23a7fe75
MM
592
593 btrace_insn_end (&insn, btinfo);
5de9129b 594 insns = btrace_insn_number (&insn);
31fd9caa 595
69090cee
TW
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn) != NULL)
599 insns -= 1;
31fd9caa
MM
600
601 gaps = btinfo->ngaps;
23a7fe75 602 }
afedecd3 603
31fd9caa 604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 605 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
606 print_thread_id (tp),
607 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
608
609 if (btrace_is_replaying (tp))
610 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
611 btrace_insn_number (btinfo->replay));
afedecd3
MM
612}
613
31fd9caa
MM
614/* Print a decode error. */
615
616static void
617btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
618 enum btrace_format format)
619{
508352a9 620 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 621
112e8700 622 uiout->text (_("["));
508352a9
TW
623 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
624 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 625 {
112e8700 626 uiout->text (_("decode error ("));
381befee 627 uiout->field_signed ("errcode", errcode);
112e8700 628 uiout->text (_("): "));
31fd9caa 629 }
112e8700
SM
630 uiout->text (errstr);
631 uiout->text (_("]\n"));
31fd9caa
MM
632}
633
f94cc897
MM
634/* A range of source lines. */
635
636struct btrace_line_range
637{
638 /* The symtab this line is from. */
639 struct symtab *symtab;
640
641 /* The first line (inclusive). */
642 int begin;
643
644 /* The last line (exclusive). */
645 int end;
646};
647
648/* Construct a line range. */
649
650static struct btrace_line_range
651btrace_mk_line_range (struct symtab *symtab, int begin, int end)
652{
653 struct btrace_line_range range;
654
655 range.symtab = symtab;
656 range.begin = begin;
657 range.end = end;
658
659 return range;
660}
661
662/* Add a line to a line range. */
663
664static struct btrace_line_range
665btrace_line_range_add (struct btrace_line_range range, int line)
666{
667 if (range.end <= range.begin)
668 {
669 /* This is the first entry. */
670 range.begin = line;
671 range.end = line + 1;
672 }
673 else if (line < range.begin)
674 range.begin = line;
675 else if (range.end < line)
676 range.end = line;
677
678 return range;
679}
680
681/* Return non-zero if RANGE is empty, zero otherwise. */
682
683static int
684btrace_line_range_is_empty (struct btrace_line_range range)
685{
686 return range.end <= range.begin;
687}
688
689/* Return non-zero if LHS contains RHS, zero otherwise. */
690
691static int
692btrace_line_range_contains_range (struct btrace_line_range lhs,
693 struct btrace_line_range rhs)
694{
695 return ((lhs.symtab == rhs.symtab)
696 && (lhs.begin <= rhs.begin)
697 && (rhs.end <= lhs.end));
698}
699
700/* Find the line range associated with PC. */
701
702static struct btrace_line_range
703btrace_find_line_range (CORE_ADDR pc)
704{
705 struct btrace_line_range range;
706 struct linetable_entry *lines;
707 struct linetable *ltable;
708 struct symtab *symtab;
709 int nlines, i;
710
711 symtab = find_pc_line_symtab (pc);
712 if (symtab == NULL)
713 return btrace_mk_line_range (NULL, 0, 0);
714
715 ltable = SYMTAB_LINETABLE (symtab);
716 if (ltable == NULL)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 nlines = ltable->nitems;
720 lines = ltable->item;
721 if (nlines <= 0)
722 return btrace_mk_line_range (symtab, 0, 0);
723
724 range = btrace_mk_line_range (symtab, 0, 0);
725 for (i = 0; i < nlines - 1; i++)
726 {
8c95582d
AB
727 /* The test of is_stmt here was added when the is_stmt field was
728 introduced to the 'struct linetable_entry' structure. This
729 ensured that this loop maintained the same behaviour as before we
730 introduced is_stmt. That said, it might be that we would be
731 better off not checking is_stmt here, this would lead to us
732 possibly adding more line numbers to the range. At the time this
733 change was made I was unsure how to test this so chose to go with
734 maintaining the existing experience. */
735 if ((lines[i].pc == pc) && (lines[i].line != 0)
736 && (lines[i].is_stmt == 1))
f94cc897
MM
737 range = btrace_line_range_add (range, lines[i].line);
738 }
739
740 return range;
741}
742
743/* Print source lines in LINES to UIOUT.
744
745 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
746 instructions corresponding to that source line. When printing a new source
747 line, we do the cleanups for the open chain and open a new cleanup chain for
748 the new source line. If the source line range in LINES is not empty, this
749 function will leave the cleanup chain for the last printed source line open
750 so instructions can be added to it. */
751
752static void
753btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
754 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
755 gdb::optional<ui_out_emit_list> *asm_list,
756 gdb_disassembly_flags flags)
f94cc897 757{
8d297bbf 758 print_source_lines_flags psl_flags;
f94cc897 759
f94cc897
MM
760 if (flags & DISASSEMBLY_FILENAME)
761 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
762
7ea78b59 763 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 764 {
7ea78b59 765 asm_list->reset ();
f94cc897 766
7ea78b59 767 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
768
769 print_source_lines (lines.symtab, line, line + 1, psl_flags);
770
7ea78b59 771 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
772 }
773}
774
afedecd3
MM
775/* Disassemble a section of the recorded instruction trace. */
776
777static void
23a7fe75 778btrace_insn_history (struct ui_out *uiout,
31fd9caa 779 const struct btrace_thread_info *btinfo,
23a7fe75 780 const struct btrace_insn_iterator *begin,
9a24775b
PA
781 const struct btrace_insn_iterator *end,
782 gdb_disassembly_flags flags)
afedecd3 783{
9a24775b
PA
784 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
785 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 786
f94cc897
MM
787 flags |= DISASSEMBLY_SPECULATIVE;
788
7ea78b59
SM
789 struct gdbarch *gdbarch = target_gdbarch ();
790 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 791
7ea78b59 792 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 793
7ea78b59
SM
794 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
795 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 796
046bebe1 797 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 798
7ea78b59 799 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
dda83cd7 800 btrace_insn_next (&it, 1))
afedecd3 801 {
23a7fe75
MM
802 const struct btrace_insn *insn;
803
804 insn = btrace_insn_get (&it);
805
31fd9caa
MM
806 /* A NULL instruction indicates a gap in the trace. */
807 if (insn == NULL)
808 {
809 const struct btrace_config *conf;
810
811 conf = btrace_conf (btinfo);
afedecd3 812
31fd9caa
MM
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf != NULL);
815
69090cee
TW
816 uiout->field_fmt ("insn-number", "%u",
817 btrace_insn_number (&it));
818 uiout->text ("\t");
819
820 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
821 conf->format);
822 }
823 else
824 {
f94cc897 825 struct disasm_insn dinsn;
da8c46d2 826
f94cc897 827 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 828 {
f94cc897
MM
829 struct btrace_line_range lines;
830
831 lines = btrace_find_line_range (insn->pc);
832 if (!btrace_line_range_is_empty (lines)
833 && !btrace_line_range_contains_range (last_lines, lines))
834 {
7ea78b59
SM
835 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
836 flags);
f94cc897
MM
837 last_lines = lines;
838 }
7ea78b59 839 else if (!src_and_asm_tuple.has_value ())
f94cc897 840 {
7ea78b59
SM
841 gdb_assert (!asm_list.has_value ());
842
843 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
844
f94cc897 845 /* No source information. */
7ea78b59 846 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
847 }
848
7ea78b59
SM
849 gdb_assert (src_and_asm_tuple.has_value ());
850 gdb_assert (asm_list.has_value ());
da8c46d2 851 }
da8c46d2 852
f94cc897
MM
853 memset (&dinsn, 0, sizeof (dinsn));
854 dinsn.number = btrace_insn_number (&it);
855 dinsn.addr = insn->pc;
31fd9caa 856
da8c46d2 857 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 858 dinsn.is_speculative = 1;
da8c46d2 859
046bebe1 860 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 861 }
afedecd3
MM
862 }
863}
864
f6ac5f3d 865/* The insn_history method of target record-btrace. */
afedecd3 866
f6ac5f3d
PA
867void
868record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
869{
870 struct btrace_thread_info *btinfo;
23a7fe75
MM
871 struct btrace_insn_history *history;
872 struct btrace_insn_iterator begin, end;
afedecd3 873 struct ui_out *uiout;
23a7fe75 874 unsigned int context, covered;
afedecd3
MM
875
876 uiout = current_uiout;
2e783024 877 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 878 context = abs (size);
afedecd3
MM
879 if (context == 0)
880 error (_("Bad record instruction-history-size."));
881
23a7fe75
MM
882 btinfo = require_btrace ();
883 history = btinfo->insn_history;
884 if (history == NULL)
afedecd3 885 {
07bbe694 886 struct btrace_insn_iterator *replay;
afedecd3 887
9a24775b 888 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 889
07bbe694
MM
890 /* If we're replaying, we start at the replay position. Otherwise, we
891 start at the tail of the trace. */
892 replay = btinfo->replay;
893 if (replay != NULL)
894 begin = *replay;
895 else
896 btrace_insn_end (&begin, btinfo);
897
898 /* We start from here and expand in the requested direction. Then we
899 expand in the other direction, as well, to fill up any remaining
900 context. */
901 end = begin;
902 if (size < 0)
903 {
904 /* We want the current position covered, as well. */
905 covered = btrace_insn_next (&end, 1);
906 covered += btrace_insn_prev (&begin, context - covered);
907 covered += btrace_insn_next (&end, context - covered);
908 }
909 else
910 {
911 covered = btrace_insn_next (&end, context);
912 covered += btrace_insn_prev (&begin, context - covered);
913 }
afedecd3
MM
914 }
915 else
916 {
23a7fe75
MM
917 begin = history->begin;
918 end = history->end;
afedecd3 919
9a24775b 920 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 921 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 922
23a7fe75
MM
923 if (size < 0)
924 {
925 end = begin;
926 covered = btrace_insn_prev (&begin, context);
927 }
928 else
929 {
930 begin = end;
931 covered = btrace_insn_next (&end, context);
932 }
afedecd3
MM
933 }
934
23a7fe75 935 if (covered > 0)
31fd9caa 936 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
937 else
938 {
939 if (size < 0)
940 printf_unfiltered (_("At the start of the branch trace record.\n"));
941 else
942 printf_unfiltered (_("At the end of the branch trace record.\n"));
943 }
afedecd3 944
23a7fe75 945 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
946}
947
f6ac5f3d 948/* The insn_history_range method of target record-btrace. */
afedecd3 949
f6ac5f3d
PA
950void
951record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
952 gdb_disassembly_flags flags)
afedecd3
MM
953{
954 struct btrace_thread_info *btinfo;
23a7fe75 955 struct btrace_insn_iterator begin, end;
afedecd3 956 struct ui_out *uiout;
23a7fe75
MM
957 unsigned int low, high;
958 int found;
afedecd3
MM
959
960 uiout = current_uiout;
2e783024 961 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
962 low = from;
963 high = to;
afedecd3 964
9a24775b 965 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
966
967 /* Check for wrap-arounds. */
23a7fe75 968 if (low != from || high != to)
afedecd3
MM
969 error (_("Bad range."));
970
0688d04e 971 if (high < low)
afedecd3
MM
972 error (_("Bad range."));
973
23a7fe75 974 btinfo = require_btrace ();
afedecd3 975
23a7fe75
MM
976 found = btrace_find_insn_by_number (&begin, btinfo, low);
977 if (found == 0)
978 error (_("Range out of bounds."));
afedecd3 979
23a7fe75
MM
980 found = btrace_find_insn_by_number (&end, btinfo, high);
981 if (found == 0)
0688d04e
MM
982 {
983 /* Silently truncate the range. */
984 btrace_insn_end (&end, btinfo);
985 }
986 else
987 {
988 /* We want both begin and end to be inclusive. */
989 btrace_insn_next (&end, 1);
990 }
afedecd3 991
31fd9caa 992 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 993 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
994}
995
f6ac5f3d 996/* The insn_history_from method of target record-btrace. */
afedecd3 997
f6ac5f3d
PA
998void
999record_btrace_target::insn_history_from (ULONGEST from, int size,
1000 gdb_disassembly_flags flags)
afedecd3
MM
1001{
1002 ULONGEST begin, end, context;
1003
1004 context = abs (size);
0688d04e
MM
1005 if (context == 0)
1006 error (_("Bad record instruction-history-size."));
afedecd3
MM
1007
1008 if (size < 0)
1009 {
1010 end = from;
1011
1012 if (from < context)
1013 begin = 0;
1014 else
0688d04e 1015 begin = from - context + 1;
afedecd3
MM
1016 }
1017 else
1018 {
1019 begin = from;
0688d04e 1020 end = from + context - 1;
afedecd3
MM
1021
1022 /* Check for wrap-around. */
1023 if (end < begin)
1024 end = ULONGEST_MAX;
1025 }
1026
f6ac5f3d 1027 insn_history_range (begin, end, flags);
afedecd3
MM
1028}
1029
1030/* Print the instruction number range for a function call history line. */
1031
1032static void
23a7fe75
MM
1033btrace_call_history_insn_range (struct ui_out *uiout,
1034 const struct btrace_function *bfun)
afedecd3 1035{
7acbe133
MM
1036 unsigned int begin, end, size;
1037
0860c437 1038 size = bfun->insn.size ();
7acbe133 1039 gdb_assert (size > 0);
afedecd3 1040
23a7fe75 1041 begin = bfun->insn_offset;
7acbe133 1042 end = begin + size - 1;
afedecd3 1043
1f77b012 1044 uiout->field_unsigned ("insn begin", begin);
112e8700 1045 uiout->text (",");
1f77b012 1046 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1047}
1048
ce0dfbea
MM
1049/* Compute the lowest and highest source line for the instructions in BFUN
1050 and return them in PBEGIN and PEND.
1051 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1052 result from inlining or macro expansion. */
1053
1054static void
1055btrace_compute_src_line_range (const struct btrace_function *bfun,
1056 int *pbegin, int *pend)
1057{
ce0dfbea
MM
1058 struct symtab *symtab;
1059 struct symbol *sym;
ce0dfbea
MM
1060 int begin, end;
1061
1062 begin = INT_MAX;
1063 end = INT_MIN;
1064
1065 sym = bfun->sym;
1066 if (sym == NULL)
1067 goto out;
1068
1069 symtab = symbol_symtab (sym);
1070
0860c437 1071 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1072 {
1073 struct symtab_and_line sal;
1074
0860c437 1075 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1076 if (sal.symtab != symtab || sal.line == 0)
1077 continue;
1078
325fac50
PA
1079 begin = std::min (begin, sal.line);
1080 end = std::max (end, sal.line);
ce0dfbea
MM
1081 }
1082
1083 out:
1084 *pbegin = begin;
1085 *pend = end;
1086}
1087
afedecd3
MM
1088/* Print the source line information for a function call history line. */
1089
1090static void
23a7fe75
MM
1091btrace_call_history_src_line (struct ui_out *uiout,
1092 const struct btrace_function *bfun)
afedecd3
MM
1093{
1094 struct symbol *sym;
23a7fe75 1095 int begin, end;
afedecd3
MM
1096
1097 sym = bfun->sym;
1098 if (sym == NULL)
1099 return;
1100
112e8700 1101 uiout->field_string ("file",
cbe56571 1102 symtab_to_filename_for_display (symbol_symtab (sym)),
e43b10e1 1103 file_name_style.style ());
afedecd3 1104
ce0dfbea 1105 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1106 if (end < begin)
afedecd3
MM
1107 return;
1108
112e8700 1109 uiout->text (":");
381befee 1110 uiout->field_signed ("min line", begin);
afedecd3 1111
23a7fe75 1112 if (end == begin)
afedecd3
MM
1113 return;
1114
112e8700 1115 uiout->text (",");
381befee 1116 uiout->field_signed ("max line", end);
afedecd3
MM
1117}
1118
0b722aec
MM
1119/* Get the name of a branch trace function. */
1120
1121static const char *
1122btrace_get_bfun_name (const struct btrace_function *bfun)
1123{
1124 struct minimal_symbol *msym;
1125 struct symbol *sym;
1126
1127 if (bfun == NULL)
1128 return "??";
1129
1130 msym = bfun->msym;
1131 sym = bfun->sym;
1132
1133 if (sym != NULL)
987012b8 1134 return sym->print_name ();
0b722aec 1135 else if (msym != NULL)
c9d95fa3 1136 return msym->print_name ();
0b722aec
MM
1137 else
1138 return "??";
1139}
1140
afedecd3
MM
1141/* Disassemble a section of the recorded function trace. */
1142
1143static void
23a7fe75 1144btrace_call_history (struct ui_out *uiout,
8710b709 1145 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1146 const struct btrace_call_iterator *begin,
1147 const struct btrace_call_iterator *end,
8d297bbf 1148 int int_flags)
afedecd3 1149{
23a7fe75 1150 struct btrace_call_iterator it;
8d297bbf 1151 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1152
8d297bbf 1153 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1154 btrace_call_number (end));
afedecd3 1155
23a7fe75 1156 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1157 {
23a7fe75
MM
1158 const struct btrace_function *bfun;
1159 struct minimal_symbol *msym;
1160 struct symbol *sym;
1161
1162 bfun = btrace_call_get (&it);
23a7fe75 1163 sym = bfun->sym;
0b722aec 1164 msym = bfun->msym;
23a7fe75 1165
afedecd3 1166 /* Print the function index. */
1f77b012 1167 uiout->field_unsigned ("index", bfun->number);
112e8700 1168 uiout->text ("\t");
afedecd3 1169
31fd9caa
MM
1170 /* Indicate gaps in the trace. */
1171 if (bfun->errcode != 0)
1172 {
1173 const struct btrace_config *conf;
1174
1175 conf = btrace_conf (btinfo);
1176
1177 /* We have trace so we must have a configuration. */
1178 gdb_assert (conf != NULL);
1179
1180 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1181
1182 continue;
1183 }
1184
8710b709
MM
1185 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1186 {
1187 int level = bfun->level + btinfo->level, i;
1188
1189 for (i = 0; i < level; ++i)
112e8700 1190 uiout->text (" ");
8710b709
MM
1191 }
1192
1193 if (sym != NULL)
987012b8 1194 uiout->field_string ("function", sym->print_name (),
e43b10e1 1195 function_name_style.style ());
8710b709 1196 else if (msym != NULL)
c9d95fa3 1197 uiout->field_string ("function", msym->print_name (),
e43b10e1 1198 function_name_style.style ());
112e8700 1199 else if (!uiout->is_mi_like_p ())
cbe56571 1200 uiout->field_string ("function", "??",
e43b10e1 1201 function_name_style.style ());
8710b709 1202
1e038f67 1203 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1204 {
112e8700 1205 uiout->text (_("\tinst "));
23a7fe75 1206 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1207 }
1208
1e038f67 1209 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1210 {
112e8700 1211 uiout->text (_("\tat "));
23a7fe75 1212 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1213 }
1214
112e8700 1215 uiout->text ("\n");
afedecd3
MM
1216 }
1217}
1218
f6ac5f3d 1219/* The call_history method of target record-btrace. */
afedecd3 1220
f6ac5f3d
PA
1221void
1222record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1223{
1224 struct btrace_thread_info *btinfo;
23a7fe75
MM
1225 struct btrace_call_history *history;
1226 struct btrace_call_iterator begin, end;
afedecd3 1227 struct ui_out *uiout;
23a7fe75 1228 unsigned int context, covered;
afedecd3
MM
1229
1230 uiout = current_uiout;
2e783024 1231 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1232 context = abs (size);
afedecd3
MM
1233 if (context == 0)
1234 error (_("Bad record function-call-history-size."));
1235
23a7fe75
MM
1236 btinfo = require_btrace ();
1237 history = btinfo->call_history;
1238 if (history == NULL)
afedecd3 1239 {
07bbe694 1240 struct btrace_insn_iterator *replay;
afedecd3 1241
0cb7c7b0 1242 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1243
07bbe694
MM
1244 /* If we're replaying, we start at the replay position. Otherwise, we
1245 start at the tail of the trace. */
1246 replay = btinfo->replay;
1247 if (replay != NULL)
1248 {
07bbe694 1249 begin.btinfo = btinfo;
a0f1b963 1250 begin.index = replay->call_index;
07bbe694
MM
1251 }
1252 else
1253 btrace_call_end (&begin, btinfo);
1254
1255 /* We start from here and expand in the requested direction. Then we
1256 expand in the other direction, as well, to fill up any remaining
1257 context. */
1258 end = begin;
1259 if (size < 0)
1260 {
1261 /* We want the current position covered, as well. */
1262 covered = btrace_call_next (&end, 1);
1263 covered += btrace_call_prev (&begin, context - covered);
1264 covered += btrace_call_next (&end, context - covered);
1265 }
1266 else
1267 {
1268 covered = btrace_call_next (&end, context);
1269 covered += btrace_call_prev (&begin, context- covered);
1270 }
afedecd3
MM
1271 }
1272 else
1273 {
23a7fe75
MM
1274 begin = history->begin;
1275 end = history->end;
afedecd3 1276
0cb7c7b0 1277 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1278 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1279
23a7fe75
MM
1280 if (size < 0)
1281 {
1282 end = begin;
1283 covered = btrace_call_prev (&begin, context);
1284 }
1285 else
1286 {
1287 begin = end;
1288 covered = btrace_call_next (&end, context);
1289 }
afedecd3
MM
1290 }
1291
23a7fe75 1292 if (covered > 0)
8710b709 1293 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1294 else
1295 {
1296 if (size < 0)
1297 printf_unfiltered (_("At the start of the branch trace record.\n"));
1298 else
1299 printf_unfiltered (_("At the end of the branch trace record.\n"));
1300 }
afedecd3 1301
23a7fe75 1302 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1303}
1304
f6ac5f3d 1305/* The call_history_range method of target record-btrace. */
afedecd3 1306
f6ac5f3d
PA
1307void
1308record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1309 record_print_flags flags)
afedecd3
MM
1310{
1311 struct btrace_thread_info *btinfo;
23a7fe75 1312 struct btrace_call_iterator begin, end;
afedecd3 1313 struct ui_out *uiout;
23a7fe75
MM
1314 unsigned int low, high;
1315 int found;
afedecd3
MM
1316
1317 uiout = current_uiout;
2e783024 1318 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1319 low = from;
1320 high = to;
afedecd3 1321
0cb7c7b0 1322 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1323
1324 /* Check for wrap-arounds. */
23a7fe75 1325 if (low != from || high != to)
afedecd3
MM
1326 error (_("Bad range."));
1327
0688d04e 1328 if (high < low)
afedecd3
MM
1329 error (_("Bad range."));
1330
23a7fe75 1331 btinfo = require_btrace ();
afedecd3 1332
23a7fe75
MM
1333 found = btrace_find_call_by_number (&begin, btinfo, low);
1334 if (found == 0)
1335 error (_("Range out of bounds."));
afedecd3 1336
23a7fe75
MM
1337 found = btrace_find_call_by_number (&end, btinfo, high);
1338 if (found == 0)
0688d04e
MM
1339 {
1340 /* Silently truncate the range. */
1341 btrace_call_end (&end, btinfo);
1342 }
1343 else
1344 {
1345 /* We want both begin and end to be inclusive. */
1346 btrace_call_next (&end, 1);
1347 }
afedecd3 1348
8710b709 1349 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1350 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1351}
1352
f6ac5f3d 1353/* The call_history_from method of target record-btrace. */
afedecd3 1354
f6ac5f3d
PA
1355void
1356record_btrace_target::call_history_from (ULONGEST from, int size,
1357 record_print_flags flags)
afedecd3
MM
1358{
1359 ULONGEST begin, end, context;
1360
1361 context = abs (size);
0688d04e
MM
1362 if (context == 0)
1363 error (_("Bad record function-call-history-size."));
afedecd3
MM
1364
1365 if (size < 0)
1366 {
1367 end = from;
1368
1369 if (from < context)
1370 begin = 0;
1371 else
0688d04e 1372 begin = from - context + 1;
afedecd3
MM
1373 }
1374 else
1375 {
1376 begin = from;
0688d04e 1377 end = from + context - 1;
afedecd3
MM
1378
1379 /* Check for wrap-around. */
1380 if (end < begin)
1381 end = ULONGEST_MAX;
1382 }
1383
f6ac5f3d 1384 call_history_range ( begin, end, flags);
afedecd3
MM
1385}
1386
f6ac5f3d 1387/* The record_method method of target record-btrace. */
b158a20f 1388
f6ac5f3d
PA
1389enum record_method
1390record_btrace_target::record_method (ptid_t ptid)
b158a20f 1391{
5b6d1e4f
PA
1392 process_stratum_target *proc_target = current_inferior ()->process_target ();
1393 thread_info *const tp = find_thread_ptid (proc_target, ptid);
b158a20f
TW
1394
1395 if (tp == NULL)
1396 error (_("No thread."));
1397
1398 if (tp->btrace.target == NULL)
1399 return RECORD_METHOD_NONE;
1400
1401 return RECORD_METHOD_BTRACE;
1402}
1403
f6ac5f3d 1404/* The record_is_replaying method of target record-btrace. */
07bbe694 1405
57810aa7 1406bool
f6ac5f3d 1407record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1408{
5b6d1e4f
PA
1409 process_stratum_target *proc_target = current_inferior ()->process_target ();
1410 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 1411 if (btrace_is_replaying (tp))
57810aa7 1412 return true;
07bbe694 1413
57810aa7 1414 return false;
07bbe694
MM
1415}
1416
f6ac5f3d 1417/* The record_will_replay method of target record-btrace. */
7ff27e9b 1418
57810aa7 1419bool
f6ac5f3d 1420record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1421{
f6ac5f3d 1422 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1423}
1424
f6ac5f3d 1425/* The xfer_partial method of target record-btrace. */
633785ff 1426
f6ac5f3d
PA
1427enum target_xfer_status
1428record_btrace_target::xfer_partial (enum target_object object,
1429 const char *annex, gdb_byte *readbuf,
1430 const gdb_byte *writebuf, ULONGEST offset,
1431 ULONGEST len, ULONGEST *xfered_len)
633785ff 1432{
633785ff 1433 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1434 if (replay_memory_access == replay_memory_access_read_only
aef92902 1435 && !record_btrace_generating_corefile
f6ac5f3d 1436 && record_is_replaying (inferior_ptid))
633785ff
MM
1437 {
1438 switch (object)
1439 {
1440 case TARGET_OBJECT_MEMORY:
1441 {
19cf757a 1442 const struct target_section *section;
633785ff
MM
1443
1444 /* We do not allow writing memory in general. */
1445 if (writebuf != NULL)
9b409511
YQ
1446 {
1447 *xfered_len = len;
bc113b4e 1448 return TARGET_XFER_UNAVAILABLE;
9b409511 1449 }
633785ff
MM
1450
1451 /* We allow reading readonly memory. */
f6ac5f3d 1452 section = target_section_by_addr (this, offset);
633785ff
MM
1453 if (section != NULL)
1454 {
1455 /* Check if the section we found is readonly. */
fd361982 1456 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1457 & SEC_READONLY) != 0)
1458 {
1459 /* Truncate the request to fit into this section. */
325fac50 1460 len = std::min (len, section->endaddr - offset);
633785ff
MM
1461 break;
1462 }
1463 }
1464
9b409511 1465 *xfered_len = len;
bc113b4e 1466 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1467 }
1468 }
1469 }
1470
1471 /* Forward the request. */
b6a8c27b
PA
1472 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1473 offset, len, xfered_len);
633785ff
MM
1474}
1475
f6ac5f3d 1476/* The insert_breakpoint method of target record-btrace. */
633785ff 1477
f6ac5f3d
PA
1478int
1479record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1480 struct bp_target_info *bp_tgt)
633785ff 1481{
67b5c0c1
MM
1482 const char *old;
1483 int ret;
633785ff
MM
1484
1485 /* Inserting breakpoints requires accessing memory. Allow it for the
1486 duration of this function. */
67b5c0c1
MM
1487 old = replay_memory_access;
1488 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1489
1490 ret = 0;
a70b8144 1491 try
492d29ea 1492 {
b6a8c27b 1493 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1494 }
230d2906 1495 catch (const gdb_exception &except)
492d29ea 1496 {
6c63c96a 1497 replay_memory_access = old;
eedc3f4f 1498 throw;
492d29ea 1499 }
6c63c96a 1500 replay_memory_access = old;
633785ff
MM
1501
1502 return ret;
1503}
1504
f6ac5f3d 1505/* The remove_breakpoint method of target record-btrace. */
633785ff 1506
f6ac5f3d
PA
1507int
1508record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1509 struct bp_target_info *bp_tgt,
1510 enum remove_bp_reason reason)
633785ff 1511{
67b5c0c1
MM
1512 const char *old;
1513 int ret;
633785ff
MM
1514
1515 /* Removing breakpoints requires accessing memory. Allow it for the
1516 duration of this function. */
67b5c0c1
MM
1517 old = replay_memory_access;
1518 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1519
1520 ret = 0;
a70b8144 1521 try
492d29ea 1522 {
b6a8c27b 1523 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1524 }
230d2906 1525 catch (const gdb_exception &except)
492d29ea 1526 {
6c63c96a 1527 replay_memory_access = old;
eedc3f4f 1528 throw;
492d29ea 1529 }
6c63c96a 1530 replay_memory_access = old;
633785ff
MM
1531
1532 return ret;
1533}
1534
f6ac5f3d 1535/* The fetch_registers method of target record-btrace. */
1f3ef581 1536
f6ac5f3d
PA
1537void
1538record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581 1539{
1a476b6d
MM
1540 btrace_insn_iterator *replay = nullptr;
1541
1542 /* Thread-db may ask for a thread's registers before GDB knows about the
1543 thread. We forward the request to the target beneath in this
1544 case. */
5b6d1e4f 1545 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1a476b6d
MM
1546 if (tp != nullptr)
1547 replay = tp->btrace.replay;
1f3ef581 1548
1a476b6d 1549 if (replay != nullptr && !record_btrace_generating_corefile)
1f3ef581
MM
1550 {
1551 const struct btrace_insn *insn;
1552 struct gdbarch *gdbarch;
1553 int pcreg;
1554
ac7936df 1555 gdbarch = regcache->arch ();
1f3ef581
MM
1556 pcreg = gdbarch_pc_regnum (gdbarch);
1557 if (pcreg < 0)
1558 return;
1559
1560 /* We can only provide the PC register. */
1561 if (regno >= 0 && regno != pcreg)
1562 return;
1563
1564 insn = btrace_insn_get (replay);
1565 gdb_assert (insn != NULL);
1566
73e1c03f 1567 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1568 }
1569 else
b6a8c27b 1570 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1571}
1572
f6ac5f3d 1573/* The store_registers method of target record-btrace. */
1f3ef581 1574
f6ac5f3d
PA
1575void
1576record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1577{
a52eab48 1578 if (!record_btrace_generating_corefile
222312d3 1579 && record_is_replaying (regcache->ptid ()))
4d10e986 1580 error (_("Cannot write registers while replaying."));
1f3ef581 1581
491144b5 1582 gdb_assert (may_write_registers);
1f3ef581 1583
b6a8c27b 1584 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1585}
1586
f6ac5f3d 1587/* The prepare_to_store method of target record-btrace. */
1f3ef581 1588
f6ac5f3d
PA
1589void
1590record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1591{
a52eab48 1592 if (!record_btrace_generating_corefile
222312d3 1593 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1594 return;
1595
b6a8c27b 1596 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1597}
1598
0b722aec
MM
1599/* The branch trace frame cache. */
1600
1601struct btrace_frame_cache
1602{
1603 /* The thread. */
1604 struct thread_info *tp;
1605
1606 /* The frame info. */
1607 struct frame_info *frame;
1608
1609 /* The branch trace function segment. */
1610 const struct btrace_function *bfun;
1611};
1612
1613/* A struct btrace_frame_cache hash table indexed by NEXT. */
1614
1615static htab_t bfcache;
1616
1617/* hash_f for htab_create_alloc of bfcache. */
1618
1619static hashval_t
1620bfcache_hash (const void *arg)
1621{
19ba03f4
SM
1622 const struct btrace_frame_cache *cache
1623 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1624
1625 return htab_hash_pointer (cache->frame);
1626}
1627
1628/* eq_f for htab_create_alloc of bfcache. */
1629
1630static int
1631bfcache_eq (const void *arg1, const void *arg2)
1632{
19ba03f4
SM
1633 const struct btrace_frame_cache *cache1
1634 = (const struct btrace_frame_cache *) arg1;
1635 const struct btrace_frame_cache *cache2
1636 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1637
1638 return cache1->frame == cache2->frame;
1639}
1640
1641/* Create a new btrace frame cache. */
1642
1643static struct btrace_frame_cache *
1644bfcache_new (struct frame_info *frame)
1645{
1646 struct btrace_frame_cache *cache;
1647 void **slot;
1648
1649 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1650 cache->frame = frame;
1651
1652 slot = htab_find_slot (bfcache, cache, INSERT);
1653 gdb_assert (*slot == NULL);
1654 *slot = cache;
1655
1656 return cache;
1657}
1658
1659/* Extract the branch trace function from a branch trace frame. */
1660
1661static const struct btrace_function *
1662btrace_get_frame_function (struct frame_info *frame)
1663{
1664 const struct btrace_frame_cache *cache;
0b722aec
MM
1665 struct btrace_frame_cache pattern;
1666 void **slot;
1667
1668 pattern.frame = frame;
1669
1670 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1671 if (slot == NULL)
1672 return NULL;
1673
19ba03f4 1674 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1675 return cache->bfun;
1676}
1677
cecac1ab
MM
1678/* Implement stop_reason method for record_btrace_frame_unwind. */
1679
1680static enum unwind_stop_reason
1681record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1682 void **this_cache)
1683{
0b722aec
MM
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
1686
19ba03f4 1687 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1688 bfun = cache->bfun;
1689 gdb_assert (bfun != NULL);
1690
42bfe59e 1691 if (bfun->up == 0)
0b722aec
MM
1692 return UNWIND_UNAVAILABLE;
1693
1694 return UNWIND_NO_REASON;
cecac1ab
MM
1695}
1696
1697/* Implement this_id method for record_btrace_frame_unwind. */
1698
1699static void
1700record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1701 struct frame_id *this_id)
1702{
0b722aec
MM
1703 const struct btrace_frame_cache *cache;
1704 const struct btrace_function *bfun;
4aeb0dfc 1705 struct btrace_call_iterator it;
0b722aec
MM
1706 CORE_ADDR code, special;
1707
19ba03f4 1708 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1709
1710 bfun = cache->bfun;
1711 gdb_assert (bfun != NULL);
1712
4aeb0dfc
TW
1713 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1714 bfun = btrace_call_get (&it);
0b722aec
MM
1715
1716 code = get_frame_func (this_frame);
1717 special = bfun->number;
1718
1719 *this_id = frame_id_build_unavailable_stack_special (code, special);
1720
1721 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1722 btrace_get_bfun_name (cache->bfun),
1723 core_addr_to_string_nz (this_id->code_addr),
1724 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1725}
1726
1727/* Implement prev_register method for record_btrace_frame_unwind. */
1728
1729static struct value *
1730record_btrace_frame_prev_register (struct frame_info *this_frame,
1731 void **this_cache,
1732 int regnum)
1733{
0b722aec
MM
1734 const struct btrace_frame_cache *cache;
1735 const struct btrace_function *bfun, *caller;
42bfe59e 1736 struct btrace_call_iterator it;
0b722aec
MM
1737 struct gdbarch *gdbarch;
1738 CORE_ADDR pc;
1739 int pcreg;
1740
1741 gdbarch = get_frame_arch (this_frame);
1742 pcreg = gdbarch_pc_regnum (gdbarch);
1743 if (pcreg < 0 || regnum != pcreg)
1744 throw_error (NOT_AVAILABLE_ERROR,
1745 _("Registers are not available in btrace record history"));
1746
19ba03f4 1747 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1748 bfun = cache->bfun;
1749 gdb_assert (bfun != NULL);
1750
42bfe59e 1751 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1752 throw_error (NOT_AVAILABLE_ERROR,
1753 _("No caller in btrace record history"));
1754
42bfe59e
TW
1755 caller = btrace_call_get (&it);
1756
0b722aec 1757 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1758 pc = caller->insn.front ().pc;
0b722aec
MM
1759 else
1760 {
0860c437 1761 pc = caller->insn.back ().pc;
0b722aec
MM
1762 pc += gdb_insn_length (gdbarch, pc);
1763 }
1764
1765 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1766 btrace_get_bfun_name (bfun), bfun->level,
1767 core_addr_to_string_nz (pc));
1768
1769 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1770}
1771
1772/* Implement sniffer method for record_btrace_frame_unwind. */
1773
1774static int
1775record_btrace_frame_sniffer (const struct frame_unwind *self,
1776 struct frame_info *this_frame,
1777 void **this_cache)
1778{
0b722aec
MM
1779 const struct btrace_function *bfun;
1780 struct btrace_frame_cache *cache;
cecac1ab 1781 struct thread_info *tp;
0b722aec 1782 struct frame_info *next;
cecac1ab
MM
1783
1784 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1785 tp = inferior_thread ();
cecac1ab 1786
0b722aec
MM
1787 bfun = NULL;
1788 next = get_next_frame (this_frame);
1789 if (next == NULL)
1790 {
1791 const struct btrace_insn_iterator *replay;
1792
1793 replay = tp->btrace.replay;
1794 if (replay != NULL)
08c3f6d2 1795 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1796 }
1797 else
1798 {
1799 const struct btrace_function *callee;
42bfe59e 1800 struct btrace_call_iterator it;
0b722aec
MM
1801
1802 callee = btrace_get_frame_function (next);
42bfe59e
TW
1803 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1804 return 0;
1805
1806 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1807 return 0;
1808
1809 bfun = btrace_call_get (&it);
0b722aec
MM
1810 }
1811
1812 if (bfun == NULL)
1813 return 0;
1814
1815 DEBUG ("[frame] sniffed frame for %s on level %d",
1816 btrace_get_bfun_name (bfun), bfun->level);
1817
1818 /* This is our frame. Initialize the frame cache. */
1819 cache = bfcache_new (this_frame);
1820 cache->tp = tp;
1821 cache->bfun = bfun;
1822
1823 *this_cache = cache;
1824 return 1;
1825}
1826
1827/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1828
1829static int
1830record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1831 struct frame_info *this_frame,
1832 void **this_cache)
1833{
1834 const struct btrace_function *bfun, *callee;
1835 struct btrace_frame_cache *cache;
42bfe59e 1836 struct btrace_call_iterator it;
0b722aec 1837 struct frame_info *next;
42bfe59e 1838 struct thread_info *tinfo;
0b722aec
MM
1839
1840 next = get_next_frame (this_frame);
1841 if (next == NULL)
1842 return 0;
1843
1844 callee = btrace_get_frame_function (next);
1845 if (callee == NULL)
1846 return 0;
1847
1848 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1849 return 0;
1850
00431a78 1851 tinfo = inferior_thread ();
42bfe59e 1852 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1853 return 0;
1854
42bfe59e
TW
1855 bfun = btrace_call_get (&it);
1856
0b722aec
MM
1857 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1858 btrace_get_bfun_name (bfun), bfun->level);
1859
1860 /* This is our frame. Initialize the frame cache. */
1861 cache = bfcache_new (this_frame);
42bfe59e 1862 cache->tp = tinfo;
0b722aec
MM
1863 cache->bfun = bfun;
1864
1865 *this_cache = cache;
1866 return 1;
1867}
1868
1869static void
1870record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1871{
1872 struct btrace_frame_cache *cache;
1873 void **slot;
1874
19ba03f4 1875 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1876
1877 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1878 gdb_assert (slot != NULL);
1879
1880 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1881}
1882
1883/* btrace recording does not store previous memory content, neither the stack
30baf67b 1884 frames content. Any unwinding would return erroneous results as the stack
cecac1ab
MM
1885 contents no longer matches the changed PC value restored from history.
1886 Therefore this unwinder reports any possibly unwound registers as
1887 <unavailable>. */
1888
0b722aec 1889const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1890{
1891 NORMAL_FRAME,
1892 record_btrace_frame_unwind_stop_reason,
1893 record_btrace_frame_this_id,
1894 record_btrace_frame_prev_register,
1895 NULL,
0b722aec
MM
1896 record_btrace_frame_sniffer,
1897 record_btrace_frame_dealloc_cache
1898};
1899
1900const struct frame_unwind record_btrace_tailcall_frame_unwind =
1901{
1902 TAILCALL_FRAME,
1903 record_btrace_frame_unwind_stop_reason,
1904 record_btrace_frame_this_id,
1905 record_btrace_frame_prev_register,
1906 NULL,
1907 record_btrace_tailcall_frame_sniffer,
1908 record_btrace_frame_dealloc_cache
cecac1ab 1909};
b2f4cfde 1910
f6ac5f3d 1911/* Implement the get_unwinder method. */
ac01945b 1912
f6ac5f3d
PA
1913const struct frame_unwind *
1914record_btrace_target::get_unwinder ()
ac01945b
TT
1915{
1916 return &record_btrace_frame_unwind;
1917}
1918
f6ac5f3d 1919/* Implement the get_tailcall_unwinder method. */
ac01945b 1920
f6ac5f3d
PA
1921const struct frame_unwind *
1922record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1923{
1924 return &record_btrace_tailcall_frame_unwind;
1925}
1926
987e68b1
MM
1927/* Return a human-readable string for FLAG. */
1928
1929static const char *
04902b09 1930btrace_thread_flag_to_str (btrace_thread_flags flag)
987e68b1
MM
1931{
1932 switch (flag)
1933 {
1934 case BTHR_STEP:
1935 return "step";
1936
1937 case BTHR_RSTEP:
1938 return "reverse-step";
1939
1940 case BTHR_CONT:
1941 return "cont";
1942
1943 case BTHR_RCONT:
1944 return "reverse-cont";
1945
1946 case BTHR_STOP:
1947 return "stop";
1948 }
1949
1950 return "<invalid>";
1951}
1952
52834460
MM
1953/* Indicate that TP should be resumed according to FLAG. */
1954
1955static void
1956record_btrace_resume_thread (struct thread_info *tp,
1957 enum btrace_thread_flag flag)
1958{
1959 struct btrace_thread_info *btinfo;
1960
43792cf0 1961 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1962 target_pid_to_str (tp->ptid).c_str (), flag,
1963 btrace_thread_flag_to_str (flag));
52834460
MM
1964
1965 btinfo = &tp->btrace;
1966
52834460 1967 /* Fetch the latest branch trace. */
4a4495d6 1968 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1969
0ca912df
MM
1970 /* A resume request overwrites a preceding resume or stop request. */
1971 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1972 btinfo->flags |= flag;
1973}
1974
ec71cc2f
MM
1975/* Get the current frame for TP. */
1976
79b8d3b0
TT
1977static struct frame_id
1978get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1979{
79b8d3b0 1980 struct frame_id id;
719546c4 1981 bool executing;
ec71cc2f 1982
00431a78
PA
1983 /* Set current thread, which is implicitly used by
1984 get_current_frame. */
1985 scoped_restore_current_thread restore_thread;
1986
1987 switch_to_thread (tp);
ec71cc2f 1988
5b6d1e4f
PA
1989 process_stratum_target *proc_target = tp->inf->process_target ();
1990
ec71cc2f
MM
1991 /* Clear the executing flag to allow changes to the current frame.
1992 We are not actually running, yet. We just started a reverse execution
1993 command or a record goto command.
1994 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1995 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f 1996 move the thread. Since we need to recompute the stack, we temporarily
85102364 1997 set EXECUTING to false. */
00431a78 1998 executing = tp->executing;
5b6d1e4f 1999 set_executing (proc_target, inferior_ptid, false);
ec71cc2f 2000
79b8d3b0 2001 id = null_frame_id;
a70b8144 2002 try
ec71cc2f 2003 {
79b8d3b0 2004 id = get_frame_id (get_current_frame ());
ec71cc2f 2005 }
230d2906 2006 catch (const gdb_exception &except)
ec71cc2f
MM
2007 {
2008 /* Restore the previous execution state. */
5b6d1e4f 2009 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2010
eedc3f4f 2011 throw;
ec71cc2f 2012 }
ec71cc2f
MM
2013
2014 /* Restore the previous execution state. */
5b6d1e4f 2015 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2016
79b8d3b0 2017 return id;
ec71cc2f
MM
2018}
2019
52834460
MM
2020/* Start replaying a thread. */
2021
2022static struct btrace_insn_iterator *
2023record_btrace_start_replaying (struct thread_info *tp)
2024{
52834460
MM
2025 struct btrace_insn_iterator *replay;
2026 struct btrace_thread_info *btinfo;
52834460
MM
2027
2028 btinfo = &tp->btrace;
2029 replay = NULL;
2030
2031 /* We can't start replaying without trace. */
b54b03bd 2032 if (btinfo->functions.empty ())
52834460
MM
2033 return NULL;
2034
52834460
MM
2035 /* GDB stores the current frame_id when stepping in order to detects steps
2036 into subroutines.
2037 Since frames are computed differently when we're replaying, we need to
2038 recompute those stored frames and fix them up so we can still detect
2039 subroutines after we started replaying. */
a70b8144 2040 try
52834460 2041 {
52834460
MM
2042 struct frame_id frame_id;
2043 int upd_step_frame_id, upd_step_stack_frame_id;
2044
2045 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2046 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2047
2048 /* Check if we need to update any stepping-related frame id's. */
2049 upd_step_frame_id = frame_id_eq (frame_id,
2050 tp->control.step_frame_id);
2051 upd_step_stack_frame_id = frame_id_eq (frame_id,
2052 tp->control.step_stack_frame_id);
2053
2054 /* We start replaying at the end of the branch trace. This corresponds
2055 to the current instruction. */
8d749320 2056 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2057 btrace_insn_end (replay, btinfo);
2058
31fd9caa
MM
2059 /* Skip gaps at the end of the trace. */
2060 while (btrace_insn_get (replay) == NULL)
2061 {
2062 unsigned int steps;
2063
2064 steps = btrace_insn_prev (replay, 1);
2065 if (steps == 0)
2066 error (_("No trace."));
2067 }
2068
52834460
MM
2069 /* We're not replaying, yet. */
2070 gdb_assert (btinfo->replay == NULL);
2071 btinfo->replay = replay;
2072
2073 /* Make sure we're not using any stale registers. */
00431a78 2074 registers_changed_thread (tp);
52834460
MM
2075
2076 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2077 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2078
2079 /* Replace stepping related frames where necessary. */
2080 if (upd_step_frame_id)
2081 tp->control.step_frame_id = frame_id;
2082 if (upd_step_stack_frame_id)
2083 tp->control.step_stack_frame_id = frame_id;
2084 }
230d2906 2085 catch (const gdb_exception &except)
52834460
MM
2086 {
2087 xfree (btinfo->replay);
2088 btinfo->replay = NULL;
2089
00431a78 2090 registers_changed_thread (tp);
52834460 2091
eedc3f4f 2092 throw;
52834460
MM
2093 }
2094
2095 return replay;
2096}
2097
2098/* Stop replaying a thread. */
2099
2100static void
2101record_btrace_stop_replaying (struct thread_info *tp)
2102{
2103 struct btrace_thread_info *btinfo;
2104
2105 btinfo = &tp->btrace;
2106
2107 xfree (btinfo->replay);
2108 btinfo->replay = NULL;
2109
2110 /* Make sure we're not leaving any stale registers. */
00431a78 2111 registers_changed_thread (tp);
52834460
MM
2112}
2113
e3cfc1c7
MM
2114/* Stop replaying TP if it is at the end of its execution history. */
2115
2116static void
2117record_btrace_stop_replaying_at_end (struct thread_info *tp)
2118{
2119 struct btrace_insn_iterator *replay, end;
2120 struct btrace_thread_info *btinfo;
2121
2122 btinfo = &tp->btrace;
2123 replay = btinfo->replay;
2124
2125 if (replay == NULL)
2126 return;
2127
2128 btrace_insn_end (&end, btinfo);
2129
2130 if (btrace_insn_cmp (replay, &end) == 0)
2131 record_btrace_stop_replaying (tp);
2132}
2133
f6ac5f3d 2134/* The resume method of target record-btrace. */
b2f4cfde 2135
f6ac5f3d
PA
2136void
2137record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2138{
d2939ba2 2139 enum btrace_thread_flag flag, cflag;
52834460 2140
a068643d 2141 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2142 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2143 step ? "step" : "cont");
52834460 2144
0ca912df
MM
2145 /* Store the execution direction of the last resume.
2146
f6ac5f3d 2147 If there is more than one resume call, we have to rely on infrun
0ca912df 2148 to not change the execution direction in-between. */
f6ac5f3d 2149 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2150
0ca912df 2151 /* As long as we're not replaying, just forward the request.
52834460 2152
0ca912df
MM
2153 For non-stop targets this means that no thread is replaying. In order to
2154 make progress, we may need to explicitly move replaying threads to the end
2155 of their execution history. */
f6ac5f3d
PA
2156 if ((::execution_direction != EXEC_REVERSE)
2157 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2158 {
b6a8c27b 2159 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2160 return;
b2f4cfde
MM
2161 }
2162
52834460 2163 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2164 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2165 {
2166 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2167 cflag = BTHR_RCONT;
2168 }
52834460 2169 else
d2939ba2
MM
2170 {
2171 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2172 cflag = BTHR_CONT;
2173 }
52834460 2174
52834460 2175 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2176 record_btrace_wait below.
2177
2178 For all-stop targets, we only step INFERIOR_PTID and continue others. */
5b6d1e4f
PA
2179
2180 process_stratum_target *proc_target = current_inferior ()->process_target ();
2181
d2939ba2
MM
2182 if (!target_is_non_stop_p ())
2183 {
26a57c92 2184 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2185
5b6d1e4f 2186 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2187 {
2188 if (tp->ptid.matches (inferior_ptid))
2189 record_btrace_resume_thread (tp, flag);
2190 else
2191 record_btrace_resume_thread (tp, cflag);
2192 }
d2939ba2
MM
2193 }
2194 else
2195 {
5b6d1e4f 2196 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 2197 record_btrace_resume_thread (tp, flag);
d2939ba2 2198 }
70ad5bff
MM
2199
2200 /* Async support. */
2201 if (target_can_async_p ())
2202 {
6a3753b3 2203 target_async (1);
70ad5bff
MM
2204 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2205 }
52834460
MM
2206}
2207
987e68b1
MM
2208/* Cancel resuming TP. */
2209
2210static void
2211record_btrace_cancel_resume (struct thread_info *tp)
2212{
04902b09 2213 btrace_thread_flags flags;
987e68b1
MM
2214
2215 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2216 if (flags == 0)
2217 return;
2218
43792cf0
PA
2219 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2220 print_thread_id (tp),
04902b09 2221 target_pid_to_str (tp->ptid).c_str (), flags.raw (),
987e68b1
MM
2222 btrace_thread_flag_to_str (flags));
2223
2224 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2225 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2226}
2227
2228/* Return a target_waitstatus indicating that we ran out of history. */
2229
2230static struct target_waitstatus
2231btrace_step_no_history (void)
2232{
2233 struct target_waitstatus status;
2234
2235 status.kind = TARGET_WAITKIND_NO_HISTORY;
2236
2237 return status;
2238}
2239
2240/* Return a target_waitstatus indicating that a step finished. */
2241
2242static struct target_waitstatus
2243btrace_step_stopped (void)
2244{
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_TRAP;
2249
2250 return status;
2251}
2252
6e4879f0
MM
2253/* Return a target_waitstatus indicating that a thread was stopped as
2254 requested. */
2255
2256static struct target_waitstatus
2257btrace_step_stopped_on_request (void)
2258{
2259 struct target_waitstatus status;
2260
2261 status.kind = TARGET_WAITKIND_STOPPED;
2262 status.value.sig = GDB_SIGNAL_0;
2263
2264 return status;
2265}
2266
d825d248
MM
2267/* Return a target_waitstatus indicating a spurious stop. */
2268
2269static struct target_waitstatus
2270btrace_step_spurious (void)
2271{
2272 struct target_waitstatus status;
2273
2274 status.kind = TARGET_WAITKIND_SPURIOUS;
2275
2276 return status;
2277}
2278
e3cfc1c7
MM
2279/* Return a target_waitstatus indicating that the thread was not resumed. */
2280
2281static struct target_waitstatus
2282btrace_step_no_resumed (void)
2283{
2284 struct target_waitstatus status;
2285
2286 status.kind = TARGET_WAITKIND_NO_RESUMED;
2287
2288 return status;
2289}
2290
2291/* Return a target_waitstatus indicating that we should wait again. */
2292
2293static struct target_waitstatus
2294btrace_step_again (void)
2295{
2296 struct target_waitstatus status;
2297
2298 status.kind = TARGET_WAITKIND_IGNORE;
2299
2300 return status;
2301}
2302
52834460
MM
2303/* Clear the record histories. */
2304
2305static void
2306record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2307{
2308 xfree (btinfo->insn_history);
2309 xfree (btinfo->call_history);
2310
2311 btinfo->insn_history = NULL;
2312 btinfo->call_history = NULL;
2313}
2314
3c615f99
MM
2315/* Check whether TP's current replay position is at a breakpoint. */
2316
2317static int
2318record_btrace_replay_at_breakpoint (struct thread_info *tp)
2319{
2320 struct btrace_insn_iterator *replay;
2321 struct btrace_thread_info *btinfo;
2322 const struct btrace_insn *insn;
3c615f99
MM
2323
2324 btinfo = &tp->btrace;
2325 replay = btinfo->replay;
2326
2327 if (replay == NULL)
2328 return 0;
2329
2330 insn = btrace_insn_get (replay);
2331 if (insn == NULL)
2332 return 0;
2333
00431a78 2334 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2335 &btinfo->stop_reason);
2336}
2337
d825d248 2338/* Step one instruction in forward direction. */
52834460
MM
2339
2340static struct target_waitstatus
d825d248 2341record_btrace_single_step_forward (struct thread_info *tp)
52834460 2342{
b61ce85c 2343 struct btrace_insn_iterator *replay, end, start;
52834460 2344 struct btrace_thread_info *btinfo;
52834460 2345
d825d248
MM
2346 btinfo = &tp->btrace;
2347 replay = btinfo->replay;
2348
2349 /* We're done if we're not replaying. */
2350 if (replay == NULL)
2351 return btrace_step_no_history ();
2352
011c71b6
MM
2353 /* Check if we're stepping a breakpoint. */
2354 if (record_btrace_replay_at_breakpoint (tp))
2355 return btrace_step_stopped ();
2356
b61ce85c
MM
2357 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2358 jump back to the instruction at which we started. */
2359 start = *replay;
d825d248
MM
2360 do
2361 {
2362 unsigned int steps;
2363
e3cfc1c7
MM
2364 /* We will bail out here if we continue stepping after reaching the end
2365 of the execution history. */
d825d248
MM
2366 steps = btrace_insn_next (replay, 1);
2367 if (steps == 0)
b61ce85c
MM
2368 {
2369 *replay = start;
2370 return btrace_step_no_history ();
2371 }
d825d248
MM
2372 }
2373 while (btrace_insn_get (replay) == NULL);
2374
2375 /* Determine the end of the instruction trace. */
2376 btrace_insn_end (&end, btinfo);
2377
e3cfc1c7
MM
2378 /* The execution trace contains (and ends with) the current instruction.
2379 This instruction has not been executed, yet, so the trace really ends
2380 one instruction earlier. */
d825d248 2381 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2382 return btrace_step_no_history ();
d825d248
MM
2383
2384 return btrace_step_spurious ();
2385}
2386
2387/* Step one instruction in backward direction. */
2388
2389static struct target_waitstatus
2390record_btrace_single_step_backward (struct thread_info *tp)
2391{
b61ce85c 2392 struct btrace_insn_iterator *replay, start;
d825d248 2393 struct btrace_thread_info *btinfo;
e59fa00f 2394
52834460
MM
2395 btinfo = &tp->btrace;
2396 replay = btinfo->replay;
2397
d825d248
MM
2398 /* Start replaying if we're not already doing so. */
2399 if (replay == NULL)
2400 replay = record_btrace_start_replaying (tp);
2401
2402 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2403 Skip gaps during replay. If we end up at a gap (at the beginning of
2404 the trace), jump back to the instruction at which we started. */
2405 start = *replay;
d825d248
MM
2406 do
2407 {
2408 unsigned int steps;
2409
2410 steps = btrace_insn_prev (replay, 1);
2411 if (steps == 0)
b61ce85c
MM
2412 {
2413 *replay = start;
2414 return btrace_step_no_history ();
2415 }
d825d248
MM
2416 }
2417 while (btrace_insn_get (replay) == NULL);
2418
011c71b6
MM
2419 /* Check if we're stepping a breakpoint.
2420
2421 For reverse-stepping, this check is after the step. There is logic in
2422 infrun.c that handles reverse-stepping separately. See, for example,
2423 proceed and adjust_pc_after_break.
2424
2425 This code assumes that for reverse-stepping, PC points to the last
2426 de-executed instruction, whereas for forward-stepping PC points to the
2427 next to-be-executed instruction. */
2428 if (record_btrace_replay_at_breakpoint (tp))
2429 return btrace_step_stopped ();
2430
d825d248
MM
2431 return btrace_step_spurious ();
2432}
2433
2434/* Step a single thread. */
2435
2436static struct target_waitstatus
2437record_btrace_step_thread (struct thread_info *tp)
2438{
2439 struct btrace_thread_info *btinfo;
2440 struct target_waitstatus status;
04902b09 2441 btrace_thread_flags flags;
d825d248
MM
2442
2443 btinfo = &tp->btrace;
2444
6e4879f0
MM
2445 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2446 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2447
43792cf0 2448 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
04902b09 2449 target_pid_to_str (tp->ptid).c_str (), flags.raw (),
987e68b1 2450 btrace_thread_flag_to_str (flags));
52834460 2451
6e4879f0
MM
2452 /* We can't step without an execution history. */
2453 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2454 return btrace_step_no_history ();
2455
52834460
MM
2456 switch (flags)
2457 {
2458 default:
2459 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2460
6e4879f0
MM
2461 case BTHR_STOP:
2462 return btrace_step_stopped_on_request ();
2463
52834460 2464 case BTHR_STEP:
d825d248
MM
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2467 break;
52834460
MM
2468
2469 return btrace_step_stopped ();
2470
2471 case BTHR_RSTEP:
d825d248
MM
2472 status = record_btrace_single_step_backward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2474 break;
52834460
MM
2475
2476 return btrace_step_stopped ();
2477
2478 case BTHR_CONT:
e3cfc1c7
MM
2479 status = record_btrace_single_step_forward (tp);
2480 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2481 break;
52834460 2482
e3cfc1c7
MM
2483 btinfo->flags |= flags;
2484 return btrace_step_again ();
52834460
MM
2485
2486 case BTHR_RCONT:
e3cfc1c7
MM
2487 status = record_btrace_single_step_backward (tp);
2488 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2489 break;
52834460 2490
e3cfc1c7
MM
2491 btinfo->flags |= flags;
2492 return btrace_step_again ();
2493 }
d825d248 2494
f6ac5f3d 2495 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2496 method will stop the thread for whom the event is reported. */
2497 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2498 btinfo->flags |= flags;
52834460 2499
e3cfc1c7 2500 return status;
b2f4cfde
MM
2501}
2502
a6b5be76
MM
2503/* Announce further events if necessary. */
2504
2505static void
53127008
SM
2506record_btrace_maybe_mark_async_event
2507 (const std::vector<thread_info *> &moving,
2508 const std::vector<thread_info *> &no_history)
a6b5be76 2509{
53127008
SM
2510 bool more_moving = !moving.empty ();
2511 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2512
2513 if (!more_moving && !more_no_history)
2514 return;
2515
2516 if (more_moving)
2517 DEBUG ("movers pending");
2518
2519 if (more_no_history)
2520 DEBUG ("no-history pending");
2521
2522 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2523}
2524
f6ac5f3d 2525/* The wait method of target record-btrace. */
b2f4cfde 2526
f6ac5f3d
PA
2527ptid_t
2528record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
b60cea74 2529 target_wait_flags options)
b2f4cfde 2530{
53127008
SM
2531 std::vector<thread_info *> moving;
2532 std::vector<thread_info *> no_history;
52834460 2533
85d3ad8e
SM
2534 /* Clear this, if needed we'll re-mark it below. */
2535 clear_async_event_handler (record_btrace_async_inferior_event_handler);
2536
b60cea74
TT
2537 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (),
2538 (unsigned) options);
52834460 2539
b2f4cfde 2540 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2541 if ((::execution_direction != EXEC_REVERSE)
2542 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2543 {
b6a8c27b 2544 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2545 }
2546
e3cfc1c7 2547 /* Keep a work list of moving threads. */
5b6d1e4f
PA
2548 process_stratum_target *proc_target = current_inferior ()->process_target ();
2549 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2550 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2551 moving.push_back (tp);
e3cfc1c7 2552
53127008 2553 if (moving.empty ())
52834460 2554 {
e3cfc1c7 2555 *status = btrace_step_no_resumed ();
52834460 2556
a068643d 2557 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2558 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2559
e3cfc1c7 2560 return null_ptid;
52834460
MM
2561 }
2562
e3cfc1c7
MM
2563 /* Step moving threads one by one, one step each, until either one thread
2564 reports an event or we run out of threads to step.
2565
2566 When stepping more than one thread, chances are that some threads reach
2567 the end of their execution history earlier than others. If we reported
2568 this immediately, all-stop on top of non-stop would stop all threads and
2569 resume the same threads next time. And we would report the same thread
2570 having reached the end of its execution history again.
2571
2572 In the worst case, this would starve the other threads. But even if other
2573 threads would be allowed to make progress, this would result in far too
2574 many intermediate stops.
2575
2576 We therefore delay the reporting of "no execution history" until we have
2577 nothing else to report. By this time, all threads should have moved to
2578 either the beginning or the end of their execution history. There will
2579 be a single user-visible stop. */
53127008
SM
2580 struct thread_info *eventing = NULL;
2581 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2582 {
53127008 2583 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2584 {
53127008
SM
2585 thread_info *tp = moving[ix];
2586
e3cfc1c7
MM
2587 *status = record_btrace_step_thread (tp);
2588
2589 switch (status->kind)
2590 {
2591 case TARGET_WAITKIND_IGNORE:
2592 ix++;
2593 break;
2594
2595 case TARGET_WAITKIND_NO_HISTORY:
53127008 2596 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2597 break;
2598
2599 default:
53127008 2600 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2601 break;
2602 }
2603 }
2604 }
2605
2606 if (eventing == NULL)
2607 {
2608 /* We started with at least one moving thread. This thread must have
2609 either stopped or reached the end of its execution history.
2610
2611 In the former case, EVENTING must not be NULL.
2612 In the latter case, NO_HISTORY must not be empty. */
53127008 2613 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2614
2615 /* We kept threads moving at the end of their execution history. Stop
2616 EVENTING now that we are going to report its stop. */
53127008 2617 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2618 eventing->btrace.flags &= ~BTHR_MOVE;
2619
2620 *status = btrace_step_no_history ();
2621 }
2622
2623 gdb_assert (eventing != NULL);
2624
2625 /* We kept threads replaying at the end of their execution history. Stop
2626 replaying EVENTING now that we are going to report its stop. */
2627 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2628
2629 /* Stop all other threads. */
5953356c 2630 if (!target_is_non_stop_p ())
53127008 2631 {
d89edf9b 2632 for (thread_info *tp : current_inferior ()->non_exited_threads ())
53127008
SM
2633 record_btrace_cancel_resume (tp);
2634 }
52834460 2635
a6b5be76
MM
2636 /* In async mode, we need to announce further events. */
2637 if (target_is_async_p ())
2638 record_btrace_maybe_mark_async_event (moving, no_history);
2639
52834460 2640 /* Start record histories anew from the current position. */
e3cfc1c7 2641 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2642
2643 /* We moved the replay position but did not update registers. */
00431a78 2644 registers_changed_thread (eventing);
e3cfc1c7 2645
43792cf0
PA
2646 DEBUG ("wait ended by thread %s (%s): %s",
2647 print_thread_id (eventing),
a068643d 2648 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2649 target_waitstatus_to_string (status).c_str ());
52834460 2650
e3cfc1c7 2651 return eventing->ptid;
52834460
MM
2652}
2653
f6ac5f3d 2654/* The stop method of target record-btrace. */
6e4879f0 2655
f6ac5f3d
PA
2656void
2657record_btrace_target::stop (ptid_t ptid)
6e4879f0 2658{
a068643d 2659 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2660
2661 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2662 if ((::execution_direction != EXEC_REVERSE)
2663 && !record_is_replaying (minus_one_ptid))
6e4879f0 2664 {
b6a8c27b 2665 this->beneath ()->stop (ptid);
6e4879f0
MM
2666 }
2667 else
2668 {
5b6d1e4f
PA
2669 process_stratum_target *proc_target
2670 = current_inferior ()->process_target ();
2671
2672 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2673 {
2674 tp->btrace.flags &= ~BTHR_MOVE;
2675 tp->btrace.flags |= BTHR_STOP;
2676 }
6e4879f0
MM
2677 }
2678 }
2679
f6ac5f3d 2680/* The can_execute_reverse method of target record-btrace. */
52834460 2681
57810aa7 2682bool
f6ac5f3d 2683record_btrace_target::can_execute_reverse ()
52834460 2684{
57810aa7 2685 return true;
52834460
MM
2686}
2687
f6ac5f3d 2688/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2689
57810aa7 2690bool
f6ac5f3d 2691record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2692{
f6ac5f3d 2693 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2694 {
2695 struct thread_info *tp = inferior_thread ();
2696
2697 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2698 }
2699
b6a8c27b 2700 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2701}
2702
f6ac5f3d 2703/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2704 record-btrace. */
2705
57810aa7 2706bool
f6ac5f3d 2707record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2708{
f6ac5f3d 2709 if (record_is_replaying (minus_one_ptid))
57810aa7 2710 return true;
9e8915c6 2711
b6a8c27b 2712 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2713}
2714
f6ac5f3d 2715/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2716
57810aa7 2717bool
f6ac5f3d 2718record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2719{
f6ac5f3d 2720 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2721 {
2722 struct thread_info *tp = inferior_thread ();
2723
2724 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2725 }
2726
b6a8c27b 2727 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2728}
2729
f6ac5f3d 2730/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2731 record-btrace. */
2732
57810aa7 2733bool
f6ac5f3d 2734record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2735{
f6ac5f3d 2736 if (record_is_replaying (minus_one_ptid))
57810aa7 2737 return true;
52834460 2738
b6a8c27b 2739 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2740}
2741
f6ac5f3d 2742/* The update_thread_list method of target record-btrace. */
e2887aa3 2743
f6ac5f3d
PA
2744void
2745record_btrace_target::update_thread_list ()
e2887aa3 2746{
e8032dde 2747 /* We don't add or remove threads during replay. */
f6ac5f3d 2748 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2749 return;
2750
2751 /* Forward the request. */
b6a8c27b 2752 this->beneath ()->update_thread_list ();
e2887aa3
MM
2753}
2754
f6ac5f3d 2755/* The thread_alive method of target record-btrace. */
e2887aa3 2756
57810aa7 2757bool
f6ac5f3d 2758record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2759{
2760 /* We don't add or remove threads during replay. */
f6ac5f3d 2761 if (record_is_replaying (minus_one_ptid))
00431a78 2762 return true;
e2887aa3
MM
2763
2764 /* Forward the request. */
b6a8c27b 2765 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2766}
2767
066ce621
MM
2768/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2769 is stopped. */
2770
2771static void
2772record_btrace_set_replay (struct thread_info *tp,
2773 const struct btrace_insn_iterator *it)
2774{
2775 struct btrace_thread_info *btinfo;
2776
2777 btinfo = &tp->btrace;
2778
a0f1b963 2779 if (it == NULL)
52834460 2780 record_btrace_stop_replaying (tp);
066ce621
MM
2781 else
2782 {
2783 if (btinfo->replay == NULL)
52834460 2784 record_btrace_start_replaying (tp);
066ce621
MM
2785 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2786 return;
2787
2788 *btinfo->replay = *it;
00431a78 2789 registers_changed_thread (tp);
066ce621
MM
2790 }
2791
52834460
MM
2792 /* Start anew from the new replay position. */
2793 record_btrace_clear_histories (btinfo);
485668e5 2794
f2ffa92b
PA
2795 inferior_thread ()->suspend.stop_pc
2796 = regcache_read_pc (get_current_regcache ());
485668e5 2797 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2798}
2799
f6ac5f3d 2800/* The goto_record_begin method of target record-btrace. */
066ce621 2801
f6ac5f3d
PA
2802void
2803record_btrace_target::goto_record_begin ()
066ce621
MM
2804{
2805 struct thread_info *tp;
2806 struct btrace_insn_iterator begin;
2807
2808 tp = require_btrace_thread ();
2809
2810 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2811
2812 /* Skip gaps at the beginning of the trace. */
2813 while (btrace_insn_get (&begin) == NULL)
2814 {
2815 unsigned int steps;
2816
2817 steps = btrace_insn_next (&begin, 1);
2818 if (steps == 0)
2819 error (_("No trace."));
2820 }
2821
066ce621 2822 record_btrace_set_replay (tp, &begin);
066ce621
MM
2823}
2824
f6ac5f3d 2825/* The goto_record_end method of target record-btrace. */
066ce621 2826
f6ac5f3d
PA
2827void
2828record_btrace_target::goto_record_end ()
066ce621
MM
2829{
2830 struct thread_info *tp;
2831
2832 tp = require_btrace_thread ();
2833
2834 record_btrace_set_replay (tp, NULL);
066ce621
MM
2835}
2836
f6ac5f3d 2837/* The goto_record method of target record-btrace. */
066ce621 2838
f6ac5f3d
PA
2839void
2840record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2841{
2842 struct thread_info *tp;
2843 struct btrace_insn_iterator it;
2844 unsigned int number;
2845 int found;
2846
2847 number = insn;
2848
2849 /* Check for wrap-arounds. */
2850 if (number != insn)
2851 error (_("Instruction number out of range."));
2852
2853 tp = require_btrace_thread ();
2854
2855 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2856
2857 /* Check if the instruction could not be found or is a gap. */
2858 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2859 error (_("No such instruction."));
2860
2861 record_btrace_set_replay (tp, &it);
066ce621
MM
2862}
2863
f6ac5f3d 2864/* The record_stop_replaying method of target record-btrace. */
797094dd 2865
f6ac5f3d
PA
2866void
2867record_btrace_target::record_stop_replaying ()
797094dd 2868{
d89edf9b 2869 for (thread_info *tp : current_inferior ()->non_exited_threads ())
797094dd
MM
2870 record_btrace_stop_replaying (tp);
2871}
2872
f6ac5f3d 2873/* The execution_direction target method. */
70ad5bff 2874
f6ac5f3d
PA
2875enum exec_direction_kind
2876record_btrace_target::execution_direction ()
70ad5bff
MM
2877{
2878 return record_btrace_resume_exec_dir;
2879}
2880
f6ac5f3d 2881/* The prepare_to_generate_core target method. */
aef92902 2882
f6ac5f3d
PA
2883void
2884record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2885{
2886 record_btrace_generating_corefile = 1;
2887}
2888
f6ac5f3d 2889/* The done_generating_core target method. */
aef92902 2890
f6ac5f3d
PA
2891void
2892record_btrace_target::done_generating_core ()
aef92902
MM
2893{
2894 record_btrace_generating_corefile = 0;
2895}
2896
f4abbc16
MM
2897/* Start recording in BTS format. */
2898
2899static void
cdb34d4a 2900cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2901{
f4abbc16
MM
2902 if (args != NULL && *args != 0)
2903 error (_("Invalid argument."));
2904
2905 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2906
a70b8144 2907 try
492d29ea 2908 {
95a6b0a1 2909 execute_command ("target record-btrace", from_tty);
492d29ea 2910 }
230d2906 2911 catch (const gdb_exception &exception)
f4abbc16
MM
2912 {
2913 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2914 throw;
f4abbc16
MM
2915 }
2916}
2917
bc504a31 2918/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2919
2920static void
cdb34d4a 2921cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2922{
2923 if (args != NULL && *args != 0)
2924 error (_("Invalid argument."));
2925
b20a6524 2926 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2927
a70b8144 2928 try
492d29ea 2929 {
95a6b0a1 2930 execute_command ("target record-btrace", from_tty);
492d29ea 2931 }
230d2906 2932 catch (const gdb_exception &exception)
492d29ea
PA
2933 {
2934 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2935 throw;
492d29ea 2936 }
afedecd3
MM
2937}
2938
b20a6524
MM
2939/* Alias for "target record". */
2940
2941static void
981a3fb3 2942cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2943{
2944 if (args != NULL && *args != 0)
2945 error (_("Invalid argument."));
2946
2947 record_btrace_conf.format = BTRACE_FORMAT_PT;
2948
a70b8144 2949 try
b20a6524 2950 {
95a6b0a1 2951 execute_command ("target record-btrace", from_tty);
b20a6524 2952 }
230d2906 2953 catch (const gdb_exception &exception)
b20a6524
MM
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2956
a70b8144 2957 try
b20a6524 2958 {
95a6b0a1 2959 execute_command ("target record-btrace", from_tty);
b20a6524 2960 }
230d2906 2961 catch (const gdb_exception &ex)
b20a6524
MM
2962 {
2963 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2964 throw;
b20a6524 2965 }
b20a6524 2966 }
b20a6524
MM
2967}
2968
67b5c0c1
MM
2969/* The "show record btrace replay-memory-access" command. */
2970
2971static void
2972cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2973 struct cmd_list_element *c, const char *value)
2974{
2975 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2976 replay_memory_access);
2977}
2978
4a4495d6
MM
2979/* The "set record btrace cpu none" command. */
2980
2981static void
2982cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2983{
2984 if (args != nullptr && *args != 0)
2985 error (_("Trailing junk: '%s'."), args);
2986
2987 record_btrace_cpu_state = CS_NONE;
2988}
2989
2990/* The "set record btrace cpu auto" command. */
2991
2992static void
2993cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2994{
2995 if (args != nullptr && *args != 0)
2996 error (_("Trailing junk: '%s'."), args);
2997
2998 record_btrace_cpu_state = CS_AUTO;
2999}
3000
3001/* The "set record btrace cpu" command. */
3002
3003static void
3004cmd_set_record_btrace_cpu (const char *args, int from_tty)
3005{
3006 if (args == nullptr)
3007 args = "";
3008
3009 /* We use a hard-coded vendor string for now. */
3010 unsigned int family, model, stepping;
3011 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3012 &model, &l1, &stepping, &l2);
3013 if (matches == 3)
3014 {
3015 if (strlen (args) != l2)
3016 error (_("Trailing junk: '%s'."), args + l2);
3017 }
3018 else if (matches == 2)
3019 {
3020 if (strlen (args) != l1)
3021 error (_("Trailing junk: '%s'."), args + l1);
3022
3023 stepping = 0;
3024 }
3025 else
3026 error (_("Bad format. See \"help set record btrace cpu\"."));
3027
3028 if (USHRT_MAX < family)
3029 error (_("Cpu family too big."));
3030
3031 if (UCHAR_MAX < model)
3032 error (_("Cpu model too big."));
3033
3034 if (UCHAR_MAX < stepping)
3035 error (_("Cpu stepping too big."));
3036
3037 record_btrace_cpu.vendor = CV_INTEL;
3038 record_btrace_cpu.family = family;
3039 record_btrace_cpu.model = model;
3040 record_btrace_cpu.stepping = stepping;
3041
3042 record_btrace_cpu_state = CS_CPU;
3043}
3044
3045/* The "show record btrace cpu" command. */
3046
3047static void
3048cmd_show_record_btrace_cpu (const char *args, int from_tty)
3049{
4a4495d6
MM
3050 if (args != nullptr && *args != 0)
3051 error (_("Trailing junk: '%s'."), args);
3052
3053 switch (record_btrace_cpu_state)
3054 {
3055 case CS_AUTO:
3056 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3057 return;
3058
3059 case CS_NONE:
3060 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3061 return;
3062
3063 case CS_CPU:
3064 switch (record_btrace_cpu.vendor)
3065 {
3066 case CV_INTEL:
3067 if (record_btrace_cpu.stepping == 0)
3068 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3069 record_btrace_cpu.family,
3070 record_btrace_cpu.model);
3071 else
3072 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3073 record_btrace_cpu.family,
3074 record_btrace_cpu.model,
3075 record_btrace_cpu.stepping);
3076 return;
3077 }
3078 }
3079
3080 error (_("Internal error: bad cpu state."));
3081}
3082
b20a6524
MM
3083/* The "record bts buffer-size" show value function. */
3084
3085static void
3086show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3087 struct cmd_list_element *c,
3088 const char *value)
3089{
3090 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3091 value);
3092}
3093
3094/* The "record pt buffer-size" show value function. */
3095
3096static void
3097show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3098 struct cmd_list_element *c,
3099 const char *value)
3100{
3101 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3102 value);
3103}
3104
afedecd3
MM
3105/* Initialize btrace commands. */
3106
6c265988 3107void _initialize_record_btrace ();
afedecd3 3108void
6c265988 3109_initialize_record_btrace ()
afedecd3 3110{
f4abbc16
MM
3111 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3112 _("Start branch trace recording."), &record_btrace_cmdlist,
3113 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3114 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3115
f4abbc16
MM
3116 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3117 _("\
3118Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3119The processor stores a from/to record for each branch into a cyclic buffer.\n\
3120This format may not be available on all processors."),
3121 &record_btrace_cmdlist);
3122 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3123
b20a6524
MM
3124 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3125 _("\
bc504a31 3126Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3127This format may not be available on all processors."),
3128 &record_btrace_cmdlist);
3129 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3130
0743fc83
TT
3131 add_basic_prefix_cmd ("btrace", class_support,
3132 _("Set record options."), &set_record_btrace_cmdlist,
3133 "set record btrace ", 0, &set_record_cmdlist);
67b5c0c1 3134
0743fc83
TT
3135 add_show_prefix_cmd ("btrace", class_support,
3136 _("Show record options."), &show_record_btrace_cmdlist,
3137 "show record btrace ", 0, &show_record_cmdlist);
67b5c0c1
MM
3138
3139 add_setshow_enum_cmd ("replay-memory-access", no_class,
3140 replay_memory_access_types, &replay_memory_access, _("\
3141Set what memory accesses are allowed during replay."), _("\
3142Show what memory accesses are allowed during replay."),
3143 _("Default is READ-ONLY.\n\n\
3144The btrace record target does not trace data.\n\
3145The memory therefore corresponds to the live target and not \
3146to the current replay position.\n\n\
3147When READ-ONLY, allow accesses to read-only memory during replay.\n\
3148When READ-WRITE, allow accesses to read-only and read-write memory during \
3149replay."),
3150 NULL, cmd_show_replay_memory_access,
3151 &set_record_btrace_cmdlist,
3152 &show_record_btrace_cmdlist);
3153
4a4495d6
MM
3154 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3155 _("\
3156Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3157The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3158For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3159When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3160The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3161When GDB does not support that cpu, this option can be used to enable\n\
3162workarounds for a similar cpu that GDB supports.\n\n\
3163When set to \"none\", errata workarounds are disabled."),
3164 &set_record_btrace_cpu_cmdlist,
590042fc 3165 "set record btrace cpu ", 1,
4a4495d6
MM
3166 &set_record_btrace_cmdlist);
3167
3168 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3169Automatically determine the cpu to be used for trace decode."),
3170 &set_record_btrace_cpu_cmdlist);
3171
3172 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3173Do not enable errata workarounds for trace decode."),
3174 &set_record_btrace_cpu_cmdlist);
3175
3176 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3177Show the cpu to be used for trace decode."),
3178 &show_record_btrace_cmdlist);
3179
0743fc83
TT
3180 add_basic_prefix_cmd ("bts", class_support,
3181 _("Set record btrace bts options."),
3182 &set_record_btrace_bts_cmdlist,
3183 "set record btrace bts ", 0,
3184 &set_record_btrace_cmdlist);
d33501a5 3185
0743fc83
TT
3186 add_show_prefix_cmd ("bts", class_support,
3187 _("Show record btrace bts options."),
3188 &show_record_btrace_bts_cmdlist,
3189 "show record btrace bts ", 0,
3190 &show_record_btrace_cmdlist);
d33501a5
MM
3191
3192 add_setshow_uinteger_cmd ("buffer-size", no_class,
3193 &record_btrace_conf.bts.size,
3194 _("Set the record/replay bts buffer size."),
3195 _("Show the record/replay bts buffer size."), _("\
3196When starting recording request a trace buffer of this size. \
3197The actual buffer size may differ from the requested size. \
3198Use \"info record\" to see the actual buffer size.\n\n\
3199Bigger buffers allow longer recording but also take more time to process \
3200the recorded execution trace.\n\n\
b20a6524
MM
3201The trace buffer size may not be changed while recording."), NULL,
3202 show_record_bts_buffer_size_value,
d33501a5
MM
3203 &set_record_btrace_bts_cmdlist,
3204 &show_record_btrace_bts_cmdlist);
3205
0743fc83
TT
3206 add_basic_prefix_cmd ("pt", class_support,
3207 _("Set record btrace pt options."),
3208 &set_record_btrace_pt_cmdlist,
3209 "set record btrace pt ", 0,
3210 &set_record_btrace_cmdlist);
3211
3212 add_show_prefix_cmd ("pt", class_support,
3213 _("Show record btrace pt options."),
3214 &show_record_btrace_pt_cmdlist,
3215 "show record btrace pt ", 0,
3216 &show_record_btrace_cmdlist);
b20a6524
MM
3217
3218 add_setshow_uinteger_cmd ("buffer-size", no_class,
3219 &record_btrace_conf.pt.size,
3220 _("Set the record/replay pt buffer size."),
3221 _("Show the record/replay pt buffer size."), _("\
3222Bigger buffers allow longer recording but also take more time to process \
3223the recorded execution.\n\
3224The actual buffer size may differ from the requested size. Use \"info record\" \
3225to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3226 &set_record_btrace_pt_cmdlist,
3227 &show_record_btrace_pt_cmdlist);
3228
d9f719f1 3229 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3230
3231 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3232 xcalloc, xfree);
d33501a5
MM
3233
3234 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3235 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3236}
This page took 1.242942 seconds and 4 git commands to generate.