btrace: temporarily set inferior_ptid in record_btrace_start_replaying
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
afedecd3
MM
204/* The to_open method of target record-btrace. */
205
206static void
014f9477 207record_btrace_open (const char *args, int from_tty)
afedecd3
MM
208{
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
8213266a 214 record_preopen ();
afedecd3
MM
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
52834460
MM
219 if (non_stop)
220 error (_("Record btrace can't debug inferior in non-stop mode."));
221
afedecd3
MM
222 gdb_assert (record_btrace_thread_observer == NULL);
223
224 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 225 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
226 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
227 {
f4abbc16 228 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
229
230 make_cleanup (record_btrace_disable_callback, tp);
231 }
232
233 record_btrace_auto_enable ();
234
235 push_target (&record_btrace_ops);
236
70ad5bff
MM
237 record_btrace_async_inferior_event_handler
238 = create_async_event_handler (record_btrace_handle_async_inferior_event,
239 NULL);
aef92902 240 record_btrace_generating_corefile = 0;
70ad5bff 241
afedecd3
MM
242 observer_notify_record_changed (current_inferior (), 1);
243
244 discard_cleanups (disable_chain);
245}
246
247/* The to_stop_recording method of target record-btrace. */
248
249static void
c6cd7c02 250record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
251{
252 struct thread_info *tp;
253
254 DEBUG ("stop recording");
255
256 record_btrace_auto_disable ();
257
034f788c 258 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
259 if (tp->btrace.target != NULL)
260 btrace_disable (tp);
261}
262
263/* The to_close method of target record-btrace. */
264
265static void
de90e03d 266record_btrace_close (struct target_ops *self)
afedecd3 267{
568e808b
MM
268 struct thread_info *tp;
269
70ad5bff
MM
270 if (record_btrace_async_inferior_event_handler != NULL)
271 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
272
99c819ee
MM
273 /* Make sure automatic recording gets disabled even if we did not stop
274 recording before closing the record-btrace target. */
275 record_btrace_auto_disable ();
276
568e808b
MM
277 /* We should have already stopped recording.
278 Tear down btrace in case we have not. */
034f788c 279 ALL_NON_EXITED_THREADS (tp)
568e808b 280 btrace_teardown (tp);
afedecd3
MM
281}
282
b7d2e916
PA
283/* The to_async method of target record-btrace. */
284
285static void
6a3753b3 286record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 287{
6a3753b3 288 if (enable)
b7d2e916
PA
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 else
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292
6a3753b3 293 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
294}
295
d33501a5
MM
296/* Adjusts the size and returns a human readable size suffix. */
297
298static const char *
299record_btrace_adjust_size (unsigned int *size)
300{
301 unsigned int sz;
302
303 sz = *size;
304
305 if ((sz & ((1u << 30) - 1)) == 0)
306 {
307 *size = sz >> 30;
308 return "GB";
309 }
310 else if ((sz & ((1u << 20) - 1)) == 0)
311 {
312 *size = sz >> 20;
313 return "MB";
314 }
315 else if ((sz & ((1u << 10) - 1)) == 0)
316 {
317 *size = sz >> 10;
318 return "kB";
319 }
320 else
321 return "";
322}
323
324/* Print a BTS configuration. */
325
326static void
327record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
328{
329 const char *suffix;
330 unsigned int size;
331
332 size = conf->size;
333 if (size > 0)
334 {
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
337 }
338}
339
b20a6524
MM
340/* Print an Intel(R) Processor Trace configuration. */
341
342static void
343record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
344{
345 const char *suffix;
346 unsigned int size;
347
348 size = conf->size;
349 if (size > 0)
350 {
351 suffix = record_btrace_adjust_size (&size);
352 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
353 }
354}
355
d33501a5
MM
356/* Print a branch tracing configuration. */
357
358static void
359record_btrace_print_conf (const struct btrace_config *conf)
360{
361 printf_unfiltered (_("Recording format: %s.\n"),
362 btrace_format_string (conf->format));
363
364 switch (conf->format)
365 {
366 case BTRACE_FORMAT_NONE:
367 return;
368
369 case BTRACE_FORMAT_BTS:
370 record_btrace_print_bts_conf (&conf->bts);
371 return;
b20a6524
MM
372
373 case BTRACE_FORMAT_PT:
374 record_btrace_print_pt_conf (&conf->pt);
375 return;
d33501a5
MM
376 }
377
378 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
379}
380
afedecd3
MM
381/* The to_info_record method of target record-btrace. */
382
383static void
630d6a4a 384record_btrace_info (struct target_ops *self)
afedecd3
MM
385{
386 struct btrace_thread_info *btinfo;
f4abbc16 387 const struct btrace_config *conf;
afedecd3 388 struct thread_info *tp;
31fd9caa 389 unsigned int insns, calls, gaps;
afedecd3
MM
390
391 DEBUG ("info");
392
393 tp = find_thread_ptid (inferior_ptid);
394 if (tp == NULL)
395 error (_("No thread."));
396
f4abbc16
MM
397 btinfo = &tp->btrace;
398
399 conf = btrace_conf (btinfo);
400 if (conf != NULL)
d33501a5 401 record_btrace_print_conf (conf);
f4abbc16 402
afedecd3
MM
403 btrace_fetch (tp);
404
23a7fe75
MM
405 insns = 0;
406 calls = 0;
31fd9caa 407 gaps = 0;
23a7fe75 408
6e07b1d2 409 if (!btrace_is_empty (tp))
23a7fe75
MM
410 {
411 struct btrace_call_iterator call;
412 struct btrace_insn_iterator insn;
413
414 btrace_call_end (&call, btinfo);
415 btrace_call_prev (&call, 1);
5de9129b 416 calls = btrace_call_number (&call);
23a7fe75
MM
417
418 btrace_insn_end (&insn, btinfo);
31fd9caa 419
5de9129b 420 insns = btrace_insn_number (&insn);
31fd9caa
MM
421 if (insns != 0)
422 {
423 /* The last instruction does not really belong to the trace. */
424 insns -= 1;
425 }
426 else
427 {
428 unsigned int steps;
429
430 /* Skip gaps at the end. */
431 do
432 {
433 steps = btrace_insn_prev (&insn, 1);
434 if (steps == 0)
435 break;
436
437 insns = btrace_insn_number (&insn);
438 }
439 while (insns == 0);
440 }
441
442 gaps = btinfo->ngaps;
23a7fe75 443 }
afedecd3 444
31fd9caa
MM
445 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
446 "for thread %d (%s).\n"), insns, calls, gaps,
447 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
448
449 if (btrace_is_replaying (tp))
450 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
451 btrace_insn_number (btinfo->replay));
afedecd3
MM
452}
453
31fd9caa
MM
454/* Print a decode error. */
455
456static void
457btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
458 enum btrace_format format)
459{
460 const char *errstr;
461 int is_error;
462
463 errstr = _("unknown");
464 is_error = 1;
465
466 switch (format)
467 {
468 default:
469 break;
470
471 case BTRACE_FORMAT_BTS:
472 switch (errcode)
473 {
474 default:
475 break;
476
477 case BDE_BTS_OVERFLOW:
478 errstr = _("instruction overflow");
479 break;
480
481 case BDE_BTS_INSN_SIZE:
482 errstr = _("unknown instruction");
483 break;
484 }
485 break;
b20a6524
MM
486
487#if defined (HAVE_LIBIPT)
488 case BTRACE_FORMAT_PT:
489 switch (errcode)
490 {
491 case BDE_PT_USER_QUIT:
492 is_error = 0;
493 errstr = _("trace decode cancelled");
494 break;
495
496 case BDE_PT_DISABLED:
497 is_error = 0;
498 errstr = _("disabled");
499 break;
500
501 case BDE_PT_OVERFLOW:
502 is_error = 0;
503 errstr = _("overflow");
504 break;
505
506 default:
507 if (errcode < 0)
508 errstr = pt_errstr (pt_errcode (errcode));
509 break;
510 }
511 break;
512#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
513 }
514
515 ui_out_text (uiout, _("["));
516 if (is_error)
517 {
518 ui_out_text (uiout, _("decode error ("));
519 ui_out_field_int (uiout, "errcode", errcode);
520 ui_out_text (uiout, _("): "));
521 }
522 ui_out_text (uiout, errstr);
523 ui_out_text (uiout, _("]\n"));
524}
525
afedecd3
MM
526/* Print an unsigned int. */
527
528static void
529ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
530{
531 ui_out_field_fmt (uiout, fld, "%u", val);
532}
533
534/* Disassemble a section of the recorded instruction trace. */
535
536static void
23a7fe75 537btrace_insn_history (struct ui_out *uiout,
31fd9caa 538 const struct btrace_thread_info *btinfo,
23a7fe75
MM
539 const struct btrace_insn_iterator *begin,
540 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
541{
542 struct gdbarch *gdbarch;
23a7fe75 543 struct btrace_insn_iterator it;
afedecd3 544
23a7fe75
MM
545 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
546 btrace_insn_number (end));
afedecd3
MM
547
548 gdbarch = target_gdbarch ();
549
23a7fe75 550 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 551 {
23a7fe75
MM
552 const struct btrace_insn *insn;
553
554 insn = btrace_insn_get (&it);
555
31fd9caa
MM
556 /* A NULL instruction indicates a gap in the trace. */
557 if (insn == NULL)
558 {
559 const struct btrace_config *conf;
560
561 conf = btrace_conf (btinfo);
afedecd3 562
31fd9caa
MM
563 /* We have trace so we must have a configuration. */
564 gdb_assert (conf != NULL);
565
566 btrace_ui_out_decode_error (uiout, it.function->errcode,
567 conf->format);
568 }
569 else
570 {
da8c46d2
MM
571 char prefix[4];
572
573 /* We may add a speculation prefix later. We use the same space
574 that is used for the pc prefix. */
575 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
576 strncpy (prefix, pc_prefix (insn->pc), 3);
577 else
578 {
579 prefix[0] = ' ';
580 prefix[1] = ' ';
581 prefix[2] = ' ';
582 }
583 prefix[3] = 0;
584
31fd9caa
MM
585 /* Print the instruction index. */
586 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
587 ui_out_text (uiout, "\t");
588
da8c46d2
MM
589 /* Indicate speculative execution by a leading '?'. */
590 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
591 prefix[0] = '?';
592
593 /* Print the prefix; we tell gdb_disassembly below to omit it. */
594 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
595
31fd9caa
MM
596 /* Disassembly with '/m' flag may not produce the expected result.
597 See PR gdb/11833. */
da8c46d2
MM
598 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
599 1, insn->pc, insn->pc + 1);
31fd9caa 600 }
afedecd3
MM
601 }
602}
603
604/* The to_insn_history method of target record-btrace. */
605
606static void
7a6c5609 607record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
608{
609 struct btrace_thread_info *btinfo;
23a7fe75
MM
610 struct btrace_insn_history *history;
611 struct btrace_insn_iterator begin, end;
afedecd3
MM
612 struct cleanup *uiout_cleanup;
613 struct ui_out *uiout;
23a7fe75 614 unsigned int context, covered;
afedecd3
MM
615
616 uiout = current_uiout;
617 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
618 "insn history");
afedecd3 619 context = abs (size);
afedecd3
MM
620 if (context == 0)
621 error (_("Bad record instruction-history-size."));
622
23a7fe75
MM
623 btinfo = require_btrace ();
624 history = btinfo->insn_history;
625 if (history == NULL)
afedecd3 626 {
07bbe694 627 struct btrace_insn_iterator *replay;
afedecd3 628
23a7fe75 629 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 630
07bbe694
MM
631 /* If we're replaying, we start at the replay position. Otherwise, we
632 start at the tail of the trace. */
633 replay = btinfo->replay;
634 if (replay != NULL)
635 begin = *replay;
636 else
637 btrace_insn_end (&begin, btinfo);
638
639 /* We start from here and expand in the requested direction. Then we
640 expand in the other direction, as well, to fill up any remaining
641 context. */
642 end = begin;
643 if (size < 0)
644 {
645 /* We want the current position covered, as well. */
646 covered = btrace_insn_next (&end, 1);
647 covered += btrace_insn_prev (&begin, context - covered);
648 covered += btrace_insn_next (&end, context - covered);
649 }
650 else
651 {
652 covered = btrace_insn_next (&end, context);
653 covered += btrace_insn_prev (&begin, context - covered);
654 }
afedecd3
MM
655 }
656 else
657 {
23a7fe75
MM
658 begin = history->begin;
659 end = history->end;
afedecd3 660
23a7fe75
MM
661 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
662 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 663
23a7fe75
MM
664 if (size < 0)
665 {
666 end = begin;
667 covered = btrace_insn_prev (&begin, context);
668 }
669 else
670 {
671 begin = end;
672 covered = btrace_insn_next (&end, context);
673 }
afedecd3
MM
674 }
675
23a7fe75 676 if (covered > 0)
31fd9caa 677 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
678 else
679 {
680 if (size < 0)
681 printf_unfiltered (_("At the start of the branch trace record.\n"));
682 else
683 printf_unfiltered (_("At the end of the branch trace record.\n"));
684 }
afedecd3 685
23a7fe75 686 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
687 do_cleanups (uiout_cleanup);
688}
689
690/* The to_insn_history_range method of target record-btrace. */
691
692static void
4e99c6b7
TT
693record_btrace_insn_history_range (struct target_ops *self,
694 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
695{
696 struct btrace_thread_info *btinfo;
23a7fe75
MM
697 struct btrace_insn_history *history;
698 struct btrace_insn_iterator begin, end;
afedecd3
MM
699 struct cleanup *uiout_cleanup;
700 struct ui_out *uiout;
23a7fe75
MM
701 unsigned int low, high;
702 int found;
afedecd3
MM
703
704 uiout = current_uiout;
705 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
706 "insn history");
23a7fe75
MM
707 low = from;
708 high = to;
afedecd3 709
23a7fe75 710 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
711
712 /* Check for wrap-arounds. */
23a7fe75 713 if (low != from || high != to)
afedecd3
MM
714 error (_("Bad range."));
715
0688d04e 716 if (high < low)
afedecd3
MM
717 error (_("Bad range."));
718
23a7fe75 719 btinfo = require_btrace ();
afedecd3 720
23a7fe75
MM
721 found = btrace_find_insn_by_number (&begin, btinfo, low);
722 if (found == 0)
723 error (_("Range out of bounds."));
afedecd3 724
23a7fe75
MM
725 found = btrace_find_insn_by_number (&end, btinfo, high);
726 if (found == 0)
0688d04e
MM
727 {
728 /* Silently truncate the range. */
729 btrace_insn_end (&end, btinfo);
730 }
731 else
732 {
733 /* We want both begin and end to be inclusive. */
734 btrace_insn_next (&end, 1);
735 }
afedecd3 736
31fd9caa 737 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 738 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
739
740 do_cleanups (uiout_cleanup);
741}
742
743/* The to_insn_history_from method of target record-btrace. */
744
745static void
9abc3ff3
TT
746record_btrace_insn_history_from (struct target_ops *self,
747 ULONGEST from, int size, int flags)
afedecd3
MM
748{
749 ULONGEST begin, end, context;
750
751 context = abs (size);
0688d04e
MM
752 if (context == 0)
753 error (_("Bad record instruction-history-size."));
afedecd3
MM
754
755 if (size < 0)
756 {
757 end = from;
758
759 if (from < context)
760 begin = 0;
761 else
0688d04e 762 begin = from - context + 1;
afedecd3
MM
763 }
764 else
765 {
766 begin = from;
0688d04e 767 end = from + context - 1;
afedecd3
MM
768
769 /* Check for wrap-around. */
770 if (end < begin)
771 end = ULONGEST_MAX;
772 }
773
4e99c6b7 774 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
775}
776
777/* Print the instruction number range for a function call history line. */
778
779static void
23a7fe75
MM
780btrace_call_history_insn_range (struct ui_out *uiout,
781 const struct btrace_function *bfun)
afedecd3 782{
7acbe133
MM
783 unsigned int begin, end, size;
784
785 size = VEC_length (btrace_insn_s, bfun->insn);
786 gdb_assert (size > 0);
afedecd3 787
23a7fe75 788 begin = bfun->insn_offset;
7acbe133 789 end = begin + size - 1;
afedecd3 790
23a7fe75 791 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 792 ui_out_text (uiout, ",");
23a7fe75 793 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
794}
795
ce0dfbea
MM
796/* Compute the lowest and highest source line for the instructions in BFUN
797 and return them in PBEGIN and PEND.
798 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
799 result from inlining or macro expansion. */
800
801static void
802btrace_compute_src_line_range (const struct btrace_function *bfun,
803 int *pbegin, int *pend)
804{
805 struct btrace_insn *insn;
806 struct symtab *symtab;
807 struct symbol *sym;
808 unsigned int idx;
809 int begin, end;
810
811 begin = INT_MAX;
812 end = INT_MIN;
813
814 sym = bfun->sym;
815 if (sym == NULL)
816 goto out;
817
818 symtab = symbol_symtab (sym);
819
820 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
821 {
822 struct symtab_and_line sal;
823
824 sal = find_pc_line (insn->pc, 0);
825 if (sal.symtab != symtab || sal.line == 0)
826 continue;
827
828 begin = min (begin, sal.line);
829 end = max (end, sal.line);
830 }
831
832 out:
833 *pbegin = begin;
834 *pend = end;
835}
836
afedecd3
MM
837/* Print the source line information for a function call history line. */
838
839static void
23a7fe75
MM
840btrace_call_history_src_line (struct ui_out *uiout,
841 const struct btrace_function *bfun)
afedecd3
MM
842{
843 struct symbol *sym;
23a7fe75 844 int begin, end;
afedecd3
MM
845
846 sym = bfun->sym;
847 if (sym == NULL)
848 return;
849
850 ui_out_field_string (uiout, "file",
08be3fe3 851 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 852
ce0dfbea 853 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 854 if (end < begin)
afedecd3
MM
855 return;
856
857 ui_out_text (uiout, ":");
23a7fe75 858 ui_out_field_int (uiout, "min line", begin);
afedecd3 859
23a7fe75 860 if (end == begin)
afedecd3
MM
861 return;
862
8710b709 863 ui_out_text (uiout, ",");
23a7fe75 864 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
865}
866
0b722aec
MM
867/* Get the name of a branch trace function. */
868
869static const char *
870btrace_get_bfun_name (const struct btrace_function *bfun)
871{
872 struct minimal_symbol *msym;
873 struct symbol *sym;
874
875 if (bfun == NULL)
876 return "??";
877
878 msym = bfun->msym;
879 sym = bfun->sym;
880
881 if (sym != NULL)
882 return SYMBOL_PRINT_NAME (sym);
883 else if (msym != NULL)
efd66ac6 884 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
885 else
886 return "??";
887}
888
afedecd3
MM
889/* Disassemble a section of the recorded function trace. */
890
891static void
23a7fe75 892btrace_call_history (struct ui_out *uiout,
8710b709 893 const struct btrace_thread_info *btinfo,
23a7fe75
MM
894 const struct btrace_call_iterator *begin,
895 const struct btrace_call_iterator *end,
afedecd3
MM
896 enum record_print_flag flags)
897{
23a7fe75 898 struct btrace_call_iterator it;
afedecd3 899
23a7fe75
MM
900 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
901 btrace_call_number (end));
afedecd3 902
23a7fe75 903 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 904 {
23a7fe75
MM
905 const struct btrace_function *bfun;
906 struct minimal_symbol *msym;
907 struct symbol *sym;
908
909 bfun = btrace_call_get (&it);
23a7fe75 910 sym = bfun->sym;
0b722aec 911 msym = bfun->msym;
23a7fe75 912
afedecd3 913 /* Print the function index. */
23a7fe75 914 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
915 ui_out_text (uiout, "\t");
916
31fd9caa
MM
917 /* Indicate gaps in the trace. */
918 if (bfun->errcode != 0)
919 {
920 const struct btrace_config *conf;
921
922 conf = btrace_conf (btinfo);
923
924 /* We have trace so we must have a configuration. */
925 gdb_assert (conf != NULL);
926
927 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
928
929 continue;
930 }
931
8710b709
MM
932 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
933 {
934 int level = bfun->level + btinfo->level, i;
935
936 for (i = 0; i < level; ++i)
937 ui_out_text (uiout, " ");
938 }
939
940 if (sym != NULL)
941 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
942 else if (msym != NULL)
efd66ac6 943 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
944 else if (!ui_out_is_mi_like_p (uiout))
945 ui_out_field_string (uiout, "function", "??");
946
1e038f67 947 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 948 {
8710b709 949 ui_out_text (uiout, _("\tinst "));
23a7fe75 950 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
951 }
952
1e038f67 953 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 954 {
8710b709 955 ui_out_text (uiout, _("\tat "));
23a7fe75 956 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
957 }
958
afedecd3
MM
959 ui_out_text (uiout, "\n");
960 }
961}
962
963/* The to_call_history method of target record-btrace. */
964
965static void
5df2fcba 966record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
967{
968 struct btrace_thread_info *btinfo;
23a7fe75
MM
969 struct btrace_call_history *history;
970 struct btrace_call_iterator begin, end;
afedecd3
MM
971 struct cleanup *uiout_cleanup;
972 struct ui_out *uiout;
23a7fe75 973 unsigned int context, covered;
afedecd3
MM
974
975 uiout = current_uiout;
976 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
977 "insn history");
afedecd3 978 context = abs (size);
afedecd3
MM
979 if (context == 0)
980 error (_("Bad record function-call-history-size."));
981
23a7fe75
MM
982 btinfo = require_btrace ();
983 history = btinfo->call_history;
984 if (history == NULL)
afedecd3 985 {
07bbe694 986 struct btrace_insn_iterator *replay;
afedecd3 987
23a7fe75 988 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 989
07bbe694
MM
990 /* If we're replaying, we start at the replay position. Otherwise, we
991 start at the tail of the trace. */
992 replay = btinfo->replay;
993 if (replay != NULL)
994 {
995 begin.function = replay->function;
996 begin.btinfo = btinfo;
997 }
998 else
999 btrace_call_end (&begin, btinfo);
1000
1001 /* We start from here and expand in the requested direction. Then we
1002 expand in the other direction, as well, to fill up any remaining
1003 context. */
1004 end = begin;
1005 if (size < 0)
1006 {
1007 /* We want the current position covered, as well. */
1008 covered = btrace_call_next (&end, 1);
1009 covered += btrace_call_prev (&begin, context - covered);
1010 covered += btrace_call_next (&end, context - covered);
1011 }
1012 else
1013 {
1014 covered = btrace_call_next (&end, context);
1015 covered += btrace_call_prev (&begin, context- covered);
1016 }
afedecd3
MM
1017 }
1018 else
1019 {
23a7fe75
MM
1020 begin = history->begin;
1021 end = history->end;
afedecd3 1022
23a7fe75
MM
1023 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1024 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1025
23a7fe75
MM
1026 if (size < 0)
1027 {
1028 end = begin;
1029 covered = btrace_call_prev (&begin, context);
1030 }
1031 else
1032 {
1033 begin = end;
1034 covered = btrace_call_next (&end, context);
1035 }
afedecd3
MM
1036 }
1037
23a7fe75 1038 if (covered > 0)
8710b709 1039 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1040 else
1041 {
1042 if (size < 0)
1043 printf_unfiltered (_("At the start of the branch trace record.\n"));
1044 else
1045 printf_unfiltered (_("At the end of the branch trace record.\n"));
1046 }
afedecd3 1047
23a7fe75 1048 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1049 do_cleanups (uiout_cleanup);
1050}
1051
1052/* The to_call_history_range method of target record-btrace. */
1053
1054static void
f0d960ea
TT
1055record_btrace_call_history_range (struct target_ops *self,
1056 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1057{
1058 struct btrace_thread_info *btinfo;
23a7fe75
MM
1059 struct btrace_call_history *history;
1060 struct btrace_call_iterator begin, end;
afedecd3
MM
1061 struct cleanup *uiout_cleanup;
1062 struct ui_out *uiout;
23a7fe75
MM
1063 unsigned int low, high;
1064 int found;
afedecd3
MM
1065
1066 uiout = current_uiout;
1067 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1068 "func history");
23a7fe75
MM
1069 low = from;
1070 high = to;
afedecd3 1071
23a7fe75 1072 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1073
1074 /* Check for wrap-arounds. */
23a7fe75 1075 if (low != from || high != to)
afedecd3
MM
1076 error (_("Bad range."));
1077
0688d04e 1078 if (high < low)
afedecd3
MM
1079 error (_("Bad range."));
1080
23a7fe75 1081 btinfo = require_btrace ();
afedecd3 1082
23a7fe75
MM
1083 found = btrace_find_call_by_number (&begin, btinfo, low);
1084 if (found == 0)
1085 error (_("Range out of bounds."));
afedecd3 1086
23a7fe75
MM
1087 found = btrace_find_call_by_number (&end, btinfo, high);
1088 if (found == 0)
0688d04e
MM
1089 {
1090 /* Silently truncate the range. */
1091 btrace_call_end (&end, btinfo);
1092 }
1093 else
1094 {
1095 /* We want both begin and end to be inclusive. */
1096 btrace_call_next (&end, 1);
1097 }
afedecd3 1098
8710b709 1099 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1100 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1101
1102 do_cleanups (uiout_cleanup);
1103}
1104
1105/* The to_call_history_from method of target record-btrace. */
1106
1107static void
ec0aea04
TT
1108record_btrace_call_history_from (struct target_ops *self,
1109 ULONGEST from, int size, int flags)
afedecd3
MM
1110{
1111 ULONGEST begin, end, context;
1112
1113 context = abs (size);
0688d04e
MM
1114 if (context == 0)
1115 error (_("Bad record function-call-history-size."));
afedecd3
MM
1116
1117 if (size < 0)
1118 {
1119 end = from;
1120
1121 if (from < context)
1122 begin = 0;
1123 else
0688d04e 1124 begin = from - context + 1;
afedecd3
MM
1125 }
1126 else
1127 {
1128 begin = from;
0688d04e 1129 end = from + context - 1;
afedecd3
MM
1130
1131 /* Check for wrap-around. */
1132 if (end < begin)
1133 end = ULONGEST_MAX;
1134 }
1135
f0d960ea 1136 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1137}
1138
07bbe694
MM
1139/* The to_record_is_replaying method of target record-btrace. */
1140
1141static int
1c63c994 1142record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1143{
1144 struct thread_info *tp;
1145
034f788c 1146 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1147 if (btrace_is_replaying (tp))
1148 return 1;
1149
1150 return 0;
1151}
1152
633785ff
MM
1153/* The to_xfer_partial method of target record-btrace. */
1154
9b409511 1155static enum target_xfer_status
633785ff
MM
1156record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1157 const char *annex, gdb_byte *readbuf,
1158 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1159 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1160{
1161 struct target_ops *t;
1162
1163 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1164 if (replay_memory_access == replay_memory_access_read_only
aef92902 1165 && !record_btrace_generating_corefile
67b5c0c1 1166 && record_btrace_is_replaying (ops))
633785ff
MM
1167 {
1168 switch (object)
1169 {
1170 case TARGET_OBJECT_MEMORY:
1171 {
1172 struct target_section *section;
1173
1174 /* We do not allow writing memory in general. */
1175 if (writebuf != NULL)
9b409511
YQ
1176 {
1177 *xfered_len = len;
bc113b4e 1178 return TARGET_XFER_UNAVAILABLE;
9b409511 1179 }
633785ff
MM
1180
1181 /* We allow reading readonly memory. */
1182 section = target_section_by_addr (ops, offset);
1183 if (section != NULL)
1184 {
1185 /* Check if the section we found is readonly. */
1186 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1187 section->the_bfd_section)
1188 & SEC_READONLY) != 0)
1189 {
1190 /* Truncate the request to fit into this section. */
1191 len = min (len, section->endaddr - offset);
1192 break;
1193 }
1194 }
1195
9b409511 1196 *xfered_len = len;
bc113b4e 1197 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1198 }
1199 }
1200 }
1201
1202 /* Forward the request. */
e75fdfca
TT
1203 ops = ops->beneath;
1204 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1205 offset, len, xfered_len);
633785ff
MM
1206}
1207
1208/* The to_insert_breakpoint method of target record-btrace. */
1209
1210static int
1211record_btrace_insert_breakpoint (struct target_ops *ops,
1212 struct gdbarch *gdbarch,
1213 struct bp_target_info *bp_tgt)
1214{
67b5c0c1
MM
1215 const char *old;
1216 int ret;
633785ff
MM
1217
1218 /* Inserting breakpoints requires accessing memory. Allow it for the
1219 duration of this function. */
67b5c0c1
MM
1220 old = replay_memory_access;
1221 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1222
1223 ret = 0;
492d29ea
PA
1224 TRY
1225 {
1226 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1227 }
492d29ea
PA
1228 CATCH (except, RETURN_MASK_ALL)
1229 {
6c63c96a 1230 replay_memory_access = old;
492d29ea
PA
1231 throw_exception (except);
1232 }
1233 END_CATCH
6c63c96a 1234 replay_memory_access = old;
633785ff
MM
1235
1236 return ret;
1237}
1238
1239/* The to_remove_breakpoint method of target record-btrace. */
1240
1241static int
1242record_btrace_remove_breakpoint (struct target_ops *ops,
1243 struct gdbarch *gdbarch,
1244 struct bp_target_info *bp_tgt)
1245{
67b5c0c1
MM
1246 const char *old;
1247 int ret;
633785ff
MM
1248
1249 /* Removing breakpoints requires accessing memory. Allow it for the
1250 duration of this function. */
67b5c0c1
MM
1251 old = replay_memory_access;
1252 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1253
1254 ret = 0;
492d29ea
PA
1255 TRY
1256 {
1257 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1258 }
492d29ea
PA
1259 CATCH (except, RETURN_MASK_ALL)
1260 {
6c63c96a 1261 replay_memory_access = old;
492d29ea
PA
1262 throw_exception (except);
1263 }
1264 END_CATCH
6c63c96a 1265 replay_memory_access = old;
633785ff
MM
1266
1267 return ret;
1268}
1269
1f3ef581
MM
1270/* The to_fetch_registers method of target record-btrace. */
1271
1272static void
1273record_btrace_fetch_registers (struct target_ops *ops,
1274 struct regcache *regcache, int regno)
1275{
1276 struct btrace_insn_iterator *replay;
1277 struct thread_info *tp;
1278
1279 tp = find_thread_ptid (inferior_ptid);
1280 gdb_assert (tp != NULL);
1281
1282 replay = tp->btrace.replay;
aef92902 1283 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1284 {
1285 const struct btrace_insn *insn;
1286 struct gdbarch *gdbarch;
1287 int pcreg;
1288
1289 gdbarch = get_regcache_arch (regcache);
1290 pcreg = gdbarch_pc_regnum (gdbarch);
1291 if (pcreg < 0)
1292 return;
1293
1294 /* We can only provide the PC register. */
1295 if (regno >= 0 && regno != pcreg)
1296 return;
1297
1298 insn = btrace_insn_get (replay);
1299 gdb_assert (insn != NULL);
1300
1301 regcache_raw_supply (regcache, regno, &insn->pc);
1302 }
1303 else
1304 {
e75fdfca 1305 struct target_ops *t = ops->beneath;
1f3ef581 1306
e75fdfca 1307 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1308 }
1309}
1310
1311/* The to_store_registers method of target record-btrace. */
1312
1313static void
1314record_btrace_store_registers (struct target_ops *ops,
1315 struct regcache *regcache, int regno)
1316{
1317 struct target_ops *t;
1318
aef92902 1319 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1320 error (_("This record target does not allow writing registers."));
1321
1322 gdb_assert (may_write_registers != 0);
1323
e75fdfca
TT
1324 t = ops->beneath;
1325 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1326}
1327
1328/* The to_prepare_to_store method of target record-btrace. */
1329
1330static void
1331record_btrace_prepare_to_store (struct target_ops *ops,
1332 struct regcache *regcache)
1333{
1334 struct target_ops *t;
1335
aef92902 1336 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1337 return;
1338
e75fdfca
TT
1339 t = ops->beneath;
1340 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1341}
1342
0b722aec
MM
1343/* The branch trace frame cache. */
1344
1345struct btrace_frame_cache
1346{
1347 /* The thread. */
1348 struct thread_info *tp;
1349
1350 /* The frame info. */
1351 struct frame_info *frame;
1352
1353 /* The branch trace function segment. */
1354 const struct btrace_function *bfun;
1355};
1356
1357/* A struct btrace_frame_cache hash table indexed by NEXT. */
1358
1359static htab_t bfcache;
1360
1361/* hash_f for htab_create_alloc of bfcache. */
1362
1363static hashval_t
1364bfcache_hash (const void *arg)
1365{
1366 const struct btrace_frame_cache *cache = arg;
1367
1368 return htab_hash_pointer (cache->frame);
1369}
1370
1371/* eq_f for htab_create_alloc of bfcache. */
1372
1373static int
1374bfcache_eq (const void *arg1, const void *arg2)
1375{
1376 const struct btrace_frame_cache *cache1 = arg1;
1377 const struct btrace_frame_cache *cache2 = arg2;
1378
1379 return cache1->frame == cache2->frame;
1380}
1381
1382/* Create a new btrace frame cache. */
1383
1384static struct btrace_frame_cache *
1385bfcache_new (struct frame_info *frame)
1386{
1387 struct btrace_frame_cache *cache;
1388 void **slot;
1389
1390 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1391 cache->frame = frame;
1392
1393 slot = htab_find_slot (bfcache, cache, INSERT);
1394 gdb_assert (*slot == NULL);
1395 *slot = cache;
1396
1397 return cache;
1398}
1399
1400/* Extract the branch trace function from a branch trace frame. */
1401
1402static const struct btrace_function *
1403btrace_get_frame_function (struct frame_info *frame)
1404{
1405 const struct btrace_frame_cache *cache;
1406 const struct btrace_function *bfun;
1407 struct btrace_frame_cache pattern;
1408 void **slot;
1409
1410 pattern.frame = frame;
1411
1412 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1413 if (slot == NULL)
1414 return NULL;
1415
1416 cache = *slot;
1417 return cache->bfun;
1418}
1419
cecac1ab
MM
1420/* Implement stop_reason method for record_btrace_frame_unwind. */
1421
1422static enum unwind_stop_reason
1423record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1424 void **this_cache)
1425{
0b722aec
MM
1426 const struct btrace_frame_cache *cache;
1427 const struct btrace_function *bfun;
1428
1429 cache = *this_cache;
1430 bfun = cache->bfun;
1431 gdb_assert (bfun != NULL);
1432
1433 if (bfun->up == NULL)
1434 return UNWIND_UNAVAILABLE;
1435
1436 return UNWIND_NO_REASON;
cecac1ab
MM
1437}
1438
1439/* Implement this_id method for record_btrace_frame_unwind. */
1440
1441static void
1442record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1443 struct frame_id *this_id)
1444{
0b722aec
MM
1445 const struct btrace_frame_cache *cache;
1446 const struct btrace_function *bfun;
1447 CORE_ADDR code, special;
1448
1449 cache = *this_cache;
1450
1451 bfun = cache->bfun;
1452 gdb_assert (bfun != NULL);
1453
1454 while (bfun->segment.prev != NULL)
1455 bfun = bfun->segment.prev;
1456
1457 code = get_frame_func (this_frame);
1458 special = bfun->number;
1459
1460 *this_id = frame_id_build_unavailable_stack_special (code, special);
1461
1462 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1463 btrace_get_bfun_name (cache->bfun),
1464 core_addr_to_string_nz (this_id->code_addr),
1465 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1466}
1467
1468/* Implement prev_register method for record_btrace_frame_unwind. */
1469
1470static struct value *
1471record_btrace_frame_prev_register (struct frame_info *this_frame,
1472 void **this_cache,
1473 int regnum)
1474{
0b722aec
MM
1475 const struct btrace_frame_cache *cache;
1476 const struct btrace_function *bfun, *caller;
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1479 CORE_ADDR pc;
1480 int pcreg;
1481
1482 gdbarch = get_frame_arch (this_frame);
1483 pcreg = gdbarch_pc_regnum (gdbarch);
1484 if (pcreg < 0 || regnum != pcreg)
1485 throw_error (NOT_AVAILABLE_ERROR,
1486 _("Registers are not available in btrace record history"));
1487
1488 cache = *this_cache;
1489 bfun = cache->bfun;
1490 gdb_assert (bfun != NULL);
1491
1492 caller = bfun->up;
1493 if (caller == NULL)
1494 throw_error (NOT_AVAILABLE_ERROR,
1495 _("No caller in btrace record history"));
1496
1497 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1498 {
1499 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1500 pc = insn->pc;
1501 }
1502 else
1503 {
1504 insn = VEC_last (btrace_insn_s, caller->insn);
1505 pc = insn->pc;
1506
1507 pc += gdb_insn_length (gdbarch, pc);
1508 }
1509
1510 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1511 btrace_get_bfun_name (bfun), bfun->level,
1512 core_addr_to_string_nz (pc));
1513
1514 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1515}
1516
1517/* Implement sniffer method for record_btrace_frame_unwind. */
1518
1519static int
1520record_btrace_frame_sniffer (const struct frame_unwind *self,
1521 struct frame_info *this_frame,
1522 void **this_cache)
1523{
0b722aec
MM
1524 const struct btrace_function *bfun;
1525 struct btrace_frame_cache *cache;
cecac1ab 1526 struct thread_info *tp;
0b722aec 1527 struct frame_info *next;
cecac1ab
MM
1528
1529 /* THIS_FRAME does not contain a reference to its thread. */
1530 tp = find_thread_ptid (inferior_ptid);
1531 gdb_assert (tp != NULL);
1532
0b722aec
MM
1533 bfun = NULL;
1534 next = get_next_frame (this_frame);
1535 if (next == NULL)
1536 {
1537 const struct btrace_insn_iterator *replay;
1538
1539 replay = tp->btrace.replay;
1540 if (replay != NULL)
1541 bfun = replay->function;
1542 }
1543 else
1544 {
1545 const struct btrace_function *callee;
1546
1547 callee = btrace_get_frame_function (next);
1548 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1549 bfun = callee->up;
1550 }
1551
1552 if (bfun == NULL)
1553 return 0;
1554
1555 DEBUG ("[frame] sniffed frame for %s on level %d",
1556 btrace_get_bfun_name (bfun), bfun->level);
1557
1558 /* This is our frame. Initialize the frame cache. */
1559 cache = bfcache_new (this_frame);
1560 cache->tp = tp;
1561 cache->bfun = bfun;
1562
1563 *this_cache = cache;
1564 return 1;
1565}
1566
1567/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1568
1569static int
1570record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1571 struct frame_info *this_frame,
1572 void **this_cache)
1573{
1574 const struct btrace_function *bfun, *callee;
1575 struct btrace_frame_cache *cache;
1576 struct frame_info *next;
1577
1578 next = get_next_frame (this_frame);
1579 if (next == NULL)
1580 return 0;
1581
1582 callee = btrace_get_frame_function (next);
1583 if (callee == NULL)
1584 return 0;
1585
1586 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1587 return 0;
1588
1589 bfun = callee->up;
1590 if (bfun == NULL)
1591 return 0;
1592
1593 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1594 btrace_get_bfun_name (bfun), bfun->level);
1595
1596 /* This is our frame. Initialize the frame cache. */
1597 cache = bfcache_new (this_frame);
1598 cache->tp = find_thread_ptid (inferior_ptid);
1599 cache->bfun = bfun;
1600
1601 *this_cache = cache;
1602 return 1;
1603}
1604
1605static void
1606record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1607{
1608 struct btrace_frame_cache *cache;
1609 void **slot;
1610
1611 cache = this_cache;
1612
1613 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1614 gdb_assert (slot != NULL);
1615
1616 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1617}
1618
1619/* btrace recording does not store previous memory content, neither the stack
1620 frames content. Any unwinding would return errorneous results as the stack
1621 contents no longer matches the changed PC value restored from history.
1622 Therefore this unwinder reports any possibly unwound registers as
1623 <unavailable>. */
1624
0b722aec 1625const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1626{
1627 NORMAL_FRAME,
1628 record_btrace_frame_unwind_stop_reason,
1629 record_btrace_frame_this_id,
1630 record_btrace_frame_prev_register,
1631 NULL,
0b722aec
MM
1632 record_btrace_frame_sniffer,
1633 record_btrace_frame_dealloc_cache
1634};
1635
1636const struct frame_unwind record_btrace_tailcall_frame_unwind =
1637{
1638 TAILCALL_FRAME,
1639 record_btrace_frame_unwind_stop_reason,
1640 record_btrace_frame_this_id,
1641 record_btrace_frame_prev_register,
1642 NULL,
1643 record_btrace_tailcall_frame_sniffer,
1644 record_btrace_frame_dealloc_cache
cecac1ab 1645};
b2f4cfde 1646
ac01945b
TT
1647/* Implement the to_get_unwinder method. */
1648
1649static const struct frame_unwind *
1650record_btrace_to_get_unwinder (struct target_ops *self)
1651{
1652 return &record_btrace_frame_unwind;
1653}
1654
1655/* Implement the to_get_tailcall_unwinder method. */
1656
1657static const struct frame_unwind *
1658record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1659{
1660 return &record_btrace_tailcall_frame_unwind;
1661}
1662
987e68b1
MM
1663/* Return a human-readable string for FLAG. */
1664
1665static const char *
1666btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1667{
1668 switch (flag)
1669 {
1670 case BTHR_STEP:
1671 return "step";
1672
1673 case BTHR_RSTEP:
1674 return "reverse-step";
1675
1676 case BTHR_CONT:
1677 return "cont";
1678
1679 case BTHR_RCONT:
1680 return "reverse-cont";
1681
1682 case BTHR_STOP:
1683 return "stop";
1684 }
1685
1686 return "<invalid>";
1687}
1688
52834460
MM
1689/* Indicate that TP should be resumed according to FLAG. */
1690
1691static void
1692record_btrace_resume_thread (struct thread_info *tp,
1693 enum btrace_thread_flag flag)
1694{
1695 struct btrace_thread_info *btinfo;
1696
987e68b1
MM
1697 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1698 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1699
1700 btinfo = &tp->btrace;
1701
52834460
MM
1702 /* Fetch the latest branch trace. */
1703 btrace_fetch (tp);
1704
0ca912df
MM
1705 /* A resume request overwrites a preceding resume or stop request. */
1706 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1707 btinfo->flags |= flag;
1708}
1709
ec71cc2f
MM
1710/* Get the current frame for TP. */
1711
1712static struct frame_info *
1713get_thread_current_frame (struct thread_info *tp)
1714{
1715 struct frame_info *frame;
1716 ptid_t old_inferior_ptid;
1717 int executing;
1718
1719 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1720 old_inferior_ptid = inferior_ptid;
1721 inferior_ptid = tp->ptid;
1722
1723 /* Clear the executing flag to allow changes to the current frame.
1724 We are not actually running, yet. We just started a reverse execution
1725 command or a record goto command.
1726 For the latter, EXECUTING is false and this has no effect.
1727 For the former, EXECUTING is true and we're in to_wait, about to
1728 move the thread. Since we need to recompute the stack, we temporarily
1729 set EXECUTING to flase. */
1730 executing = is_executing (inferior_ptid);
1731 set_executing (inferior_ptid, 0);
1732
1733 frame = NULL;
1734 TRY
1735 {
1736 frame = get_current_frame ();
1737 }
1738 CATCH (except, RETURN_MASK_ALL)
1739 {
1740 /* Restore the previous execution state. */
1741 set_executing (inferior_ptid, executing);
1742
1743 /* Restore the previous inferior_ptid. */
1744 inferior_ptid = old_inferior_ptid;
1745
1746 throw_exception (except);
1747 }
1748 END_CATCH
1749
1750 /* Restore the previous execution state. */
1751 set_executing (inferior_ptid, executing);
1752
1753 /* Restore the previous inferior_ptid. */
1754 inferior_ptid = old_inferior_ptid;
1755
1756 return frame;
1757}
1758
52834460
MM
1759/* Start replaying a thread. */
1760
1761static struct btrace_insn_iterator *
1762record_btrace_start_replaying (struct thread_info *tp)
1763{
52834460
MM
1764 struct btrace_insn_iterator *replay;
1765 struct btrace_thread_info *btinfo;
52834460
MM
1766
1767 btinfo = &tp->btrace;
1768 replay = NULL;
1769
1770 /* We can't start replaying without trace. */
1771 if (btinfo->begin == NULL)
1772 return NULL;
1773
52834460
MM
1774 /* GDB stores the current frame_id when stepping in order to detects steps
1775 into subroutines.
1776 Since frames are computed differently when we're replaying, we need to
1777 recompute those stored frames and fix them up so we can still detect
1778 subroutines after we started replaying. */
492d29ea 1779 TRY
52834460
MM
1780 {
1781 struct frame_info *frame;
1782 struct frame_id frame_id;
1783 int upd_step_frame_id, upd_step_stack_frame_id;
1784
1785 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1786 frame = get_thread_current_frame (tp);
52834460
MM
1787 frame_id = get_frame_id (frame);
1788
1789 /* Check if we need to update any stepping-related frame id's. */
1790 upd_step_frame_id = frame_id_eq (frame_id,
1791 tp->control.step_frame_id);
1792 upd_step_stack_frame_id = frame_id_eq (frame_id,
1793 tp->control.step_stack_frame_id);
1794
1795 /* We start replaying at the end of the branch trace. This corresponds
1796 to the current instruction. */
8d749320 1797 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1798 btrace_insn_end (replay, btinfo);
1799
31fd9caa
MM
1800 /* Skip gaps at the end of the trace. */
1801 while (btrace_insn_get (replay) == NULL)
1802 {
1803 unsigned int steps;
1804
1805 steps = btrace_insn_prev (replay, 1);
1806 if (steps == 0)
1807 error (_("No trace."));
1808 }
1809
52834460
MM
1810 /* We're not replaying, yet. */
1811 gdb_assert (btinfo->replay == NULL);
1812 btinfo->replay = replay;
1813
1814 /* Make sure we're not using any stale registers. */
1815 registers_changed_ptid (tp->ptid);
1816
1817 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1818 frame = get_thread_current_frame (tp);
52834460
MM
1819 frame_id = get_frame_id (frame);
1820
1821 /* Replace stepping related frames where necessary. */
1822 if (upd_step_frame_id)
1823 tp->control.step_frame_id = frame_id;
1824 if (upd_step_stack_frame_id)
1825 tp->control.step_stack_frame_id = frame_id;
1826 }
492d29ea 1827 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1828 {
1829 xfree (btinfo->replay);
1830 btinfo->replay = NULL;
1831
1832 registers_changed_ptid (tp->ptid);
1833
1834 throw_exception (except);
1835 }
492d29ea 1836 END_CATCH
52834460
MM
1837
1838 return replay;
1839}
1840
1841/* Stop replaying a thread. */
1842
1843static void
1844record_btrace_stop_replaying (struct thread_info *tp)
1845{
1846 struct btrace_thread_info *btinfo;
1847
1848 btinfo = &tp->btrace;
1849
1850 xfree (btinfo->replay);
1851 btinfo->replay = NULL;
1852
1853 /* Make sure we're not leaving any stale registers. */
1854 registers_changed_ptid (tp->ptid);
1855}
1856
e3cfc1c7
MM
1857/* Stop replaying TP if it is at the end of its execution history. */
1858
1859static void
1860record_btrace_stop_replaying_at_end (struct thread_info *tp)
1861{
1862 struct btrace_insn_iterator *replay, end;
1863 struct btrace_thread_info *btinfo;
1864
1865 btinfo = &tp->btrace;
1866 replay = btinfo->replay;
1867
1868 if (replay == NULL)
1869 return;
1870
1871 btrace_insn_end (&end, btinfo);
1872
1873 if (btrace_insn_cmp (replay, &end) == 0)
1874 record_btrace_stop_replaying (tp);
1875}
1876
b2f4cfde
MM
1877/* The to_resume method of target record-btrace. */
1878
1879static void
1880record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1881 enum gdb_signal signal)
1882{
0ca912df 1883 struct thread_info *tp;
52834460 1884 enum btrace_thread_flag flag;
0ca912df 1885 ptid_t orig_ptid;
52834460 1886
987e68b1
MM
1887 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1888 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1889 step ? "step" : "cont");
52834460 1890
0ca912df
MM
1891 orig_ptid = ptid;
1892
1893 /* Store the execution direction of the last resume.
1894
1895 If there is more than one to_resume call, we have to rely on infrun
1896 to not change the execution direction in-between. */
70ad5bff
MM
1897 record_btrace_resume_exec_dir = execution_direction;
1898
0ca912df
MM
1899 /* For all-stop targets... */
1900 if (!target_is_non_stop_p ())
1901 {
1902 /* ...we pick the current thread when asked to resume an entire process
1903 or everything. */
1904 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1905 ptid = inferior_ptid;
1906
1907 tp = find_thread_ptid (ptid);
1908 if (tp == NULL)
1909 error (_("Cannot find thread to resume."));
1910
1911 /* ...and we stop replaying other threads if the thread to resume is not
1912 replaying. */
1913 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1914 ALL_NON_EXITED_THREADS (tp)
1915 record_btrace_stop_replaying (tp);
1916 }
52834460 1917
0ca912df 1918 /* As long as we're not replaying, just forward the request.
52834460 1919
0ca912df
MM
1920 For non-stop targets this means that no thread is replaying. In order to
1921 make progress, we may need to explicitly move replaying threads to the end
1922 of their execution history. */
1c63c994 1923 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1924 {
e75fdfca 1925 ops = ops->beneath;
0ca912df 1926 return ops->to_resume (ops, orig_ptid, step, signal);
b2f4cfde
MM
1927 }
1928
52834460
MM
1929 /* Compute the btrace thread flag for the requested move. */
1930 if (step == 0)
1931 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1932 else
1933 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1934
52834460
MM
1935 /* We just indicate the resume intent here. The actual stepping happens in
1936 record_btrace_wait below. */
0ca912df
MM
1937 ALL_NON_EXITED_THREADS (tp)
1938 if (ptid_match (tp->ptid, ptid))
1939 record_btrace_resume_thread (tp, flag);
70ad5bff
MM
1940
1941 /* Async support. */
1942 if (target_can_async_p ())
1943 {
6a3753b3 1944 target_async (1);
70ad5bff
MM
1945 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1946 }
52834460
MM
1947}
1948
987e68b1
MM
1949/* Cancel resuming TP. */
1950
1951static void
1952record_btrace_cancel_resume (struct thread_info *tp)
1953{
1954 enum btrace_thread_flag flags;
1955
1956 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1957 if (flags == 0)
1958 return;
1959
1960 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1961 target_pid_to_str (tp->ptid), flags,
1962 btrace_thread_flag_to_str (flags));
1963
1964 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 1965 record_btrace_stop_replaying_at_end (tp);
52834460
MM
1966}
1967
1968/* Return a target_waitstatus indicating that we ran out of history. */
1969
1970static struct target_waitstatus
1971btrace_step_no_history (void)
1972{
1973 struct target_waitstatus status;
1974
1975 status.kind = TARGET_WAITKIND_NO_HISTORY;
1976
1977 return status;
1978}
1979
1980/* Return a target_waitstatus indicating that a step finished. */
1981
1982static struct target_waitstatus
1983btrace_step_stopped (void)
1984{
1985 struct target_waitstatus status;
1986
1987 status.kind = TARGET_WAITKIND_STOPPED;
1988 status.value.sig = GDB_SIGNAL_TRAP;
1989
1990 return status;
1991}
1992
6e4879f0
MM
1993/* Return a target_waitstatus indicating that a thread was stopped as
1994 requested. */
1995
1996static struct target_waitstatus
1997btrace_step_stopped_on_request (void)
1998{
1999 struct target_waitstatus status;
2000
2001 status.kind = TARGET_WAITKIND_STOPPED;
2002 status.value.sig = GDB_SIGNAL_0;
2003
2004 return status;
2005}
2006
d825d248
MM
2007/* Return a target_waitstatus indicating a spurious stop. */
2008
2009static struct target_waitstatus
2010btrace_step_spurious (void)
2011{
2012 struct target_waitstatus status;
2013
2014 status.kind = TARGET_WAITKIND_SPURIOUS;
2015
2016 return status;
2017}
2018
e3cfc1c7
MM
2019/* Return a target_waitstatus indicating that the thread was not resumed. */
2020
2021static struct target_waitstatus
2022btrace_step_no_resumed (void)
2023{
2024 struct target_waitstatus status;
2025
2026 status.kind = TARGET_WAITKIND_NO_RESUMED;
2027
2028 return status;
2029}
2030
2031/* Return a target_waitstatus indicating that we should wait again. */
2032
2033static struct target_waitstatus
2034btrace_step_again (void)
2035{
2036 struct target_waitstatus status;
2037
2038 status.kind = TARGET_WAITKIND_IGNORE;
2039
2040 return status;
2041}
2042
52834460
MM
2043/* Clear the record histories. */
2044
2045static void
2046record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2047{
2048 xfree (btinfo->insn_history);
2049 xfree (btinfo->call_history);
2050
2051 btinfo->insn_history = NULL;
2052 btinfo->call_history = NULL;
2053}
2054
3c615f99
MM
2055/* Check whether TP's current replay position is at a breakpoint. */
2056
2057static int
2058record_btrace_replay_at_breakpoint (struct thread_info *tp)
2059{
2060 struct btrace_insn_iterator *replay;
2061 struct btrace_thread_info *btinfo;
2062 const struct btrace_insn *insn;
2063 struct inferior *inf;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return 0;
2070
2071 insn = btrace_insn_get (replay);
2072 if (insn == NULL)
2073 return 0;
2074
2075 inf = find_inferior_ptid (tp->ptid);
2076 if (inf == NULL)
2077 return 0;
2078
2079 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2080 &btinfo->stop_reason);
2081}
2082
d825d248 2083/* Step one instruction in forward direction. */
52834460
MM
2084
2085static struct target_waitstatus
d825d248 2086record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2087{
2088 struct btrace_insn_iterator *replay, end;
2089 struct btrace_thread_info *btinfo;
52834460 2090
d825d248
MM
2091 btinfo = &tp->btrace;
2092 replay = btinfo->replay;
2093
2094 /* We're done if we're not replaying. */
2095 if (replay == NULL)
2096 return btrace_step_no_history ();
2097
011c71b6
MM
2098 /* Check if we're stepping a breakpoint. */
2099 if (record_btrace_replay_at_breakpoint (tp))
2100 return btrace_step_stopped ();
2101
d825d248
MM
2102 /* Skip gaps during replay. */
2103 do
2104 {
2105 unsigned int steps;
2106
e3cfc1c7
MM
2107 /* We will bail out here if we continue stepping after reaching the end
2108 of the execution history. */
d825d248
MM
2109 steps = btrace_insn_next (replay, 1);
2110 if (steps == 0)
e3cfc1c7 2111 return btrace_step_no_history ();
d825d248
MM
2112 }
2113 while (btrace_insn_get (replay) == NULL);
2114
2115 /* Determine the end of the instruction trace. */
2116 btrace_insn_end (&end, btinfo);
2117
e3cfc1c7
MM
2118 /* The execution trace contains (and ends with) the current instruction.
2119 This instruction has not been executed, yet, so the trace really ends
2120 one instruction earlier. */
d825d248 2121 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2122 return btrace_step_no_history ();
d825d248
MM
2123
2124 return btrace_step_spurious ();
2125}
2126
2127/* Step one instruction in backward direction. */
2128
2129static struct target_waitstatus
2130record_btrace_single_step_backward (struct thread_info *tp)
2131{
2132 struct btrace_insn_iterator *replay;
2133 struct btrace_thread_info *btinfo;
e59fa00f 2134
52834460
MM
2135 btinfo = &tp->btrace;
2136 replay = btinfo->replay;
2137
d825d248
MM
2138 /* Start replaying if we're not already doing so. */
2139 if (replay == NULL)
2140 replay = record_btrace_start_replaying (tp);
2141
2142 /* If we can't step any further, we reached the end of the history.
2143 Skip gaps during replay. */
2144 do
2145 {
2146 unsigned int steps;
2147
2148 steps = btrace_insn_prev (replay, 1);
2149 if (steps == 0)
2150 return btrace_step_no_history ();
2151 }
2152 while (btrace_insn_get (replay) == NULL);
2153
011c71b6
MM
2154 /* Check if we're stepping a breakpoint.
2155
2156 For reverse-stepping, this check is after the step. There is logic in
2157 infrun.c that handles reverse-stepping separately. See, for example,
2158 proceed and adjust_pc_after_break.
2159
2160 This code assumes that for reverse-stepping, PC points to the last
2161 de-executed instruction, whereas for forward-stepping PC points to the
2162 next to-be-executed instruction. */
2163 if (record_btrace_replay_at_breakpoint (tp))
2164 return btrace_step_stopped ();
2165
d825d248
MM
2166 return btrace_step_spurious ();
2167}
2168
2169/* Step a single thread. */
2170
2171static struct target_waitstatus
2172record_btrace_step_thread (struct thread_info *tp)
2173{
2174 struct btrace_thread_info *btinfo;
2175 struct target_waitstatus status;
2176 enum btrace_thread_flag flags;
2177
2178 btinfo = &tp->btrace;
2179
6e4879f0
MM
2180 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2181 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2182
987e68b1
MM
2183 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2184 target_pid_to_str (tp->ptid), flags,
2185 btrace_thread_flag_to_str (flags));
52834460 2186
6e4879f0
MM
2187 /* We can't step without an execution history. */
2188 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2189 return btrace_step_no_history ();
2190
52834460
MM
2191 switch (flags)
2192 {
2193 default:
2194 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2195
6e4879f0
MM
2196 case BTHR_STOP:
2197 return btrace_step_stopped_on_request ();
2198
52834460 2199 case BTHR_STEP:
d825d248
MM
2200 status = record_btrace_single_step_forward (tp);
2201 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2202 break;
52834460
MM
2203
2204 return btrace_step_stopped ();
2205
2206 case BTHR_RSTEP:
d825d248
MM
2207 status = record_btrace_single_step_backward (tp);
2208 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2209 break;
52834460
MM
2210
2211 return btrace_step_stopped ();
2212
2213 case BTHR_CONT:
e3cfc1c7
MM
2214 status = record_btrace_single_step_forward (tp);
2215 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2216 break;
52834460 2217
e3cfc1c7
MM
2218 btinfo->flags |= flags;
2219 return btrace_step_again ();
52834460
MM
2220
2221 case BTHR_RCONT:
e3cfc1c7
MM
2222 status = record_btrace_single_step_backward (tp);
2223 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2224 break;
52834460 2225
e3cfc1c7
MM
2226 btinfo->flags |= flags;
2227 return btrace_step_again ();
2228 }
d825d248 2229
e3cfc1c7
MM
2230 /* We keep threads moving at the end of their execution history. The to_wait
2231 method will stop the thread for whom the event is reported. */
2232 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2233 btinfo->flags |= flags;
52834460 2234
e3cfc1c7 2235 return status;
b2f4cfde
MM
2236}
2237
e3cfc1c7
MM
2238/* A vector of threads. */
2239
2240typedef struct thread_info * tp_t;
2241DEF_VEC_P (tp_t);
2242
b2f4cfde
MM
2243/* The to_wait method of target record-btrace. */
2244
2245static ptid_t
2246record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2247 struct target_waitstatus *status, int options)
2248{
e3cfc1c7
MM
2249 VEC (tp_t) *moving, *no_history;
2250 struct thread_info *tp, *eventing;
2251 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2252
2253 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2254
b2f4cfde 2255 /* As long as we're not replaying, just forward the request. */
1c63c994 2256 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 2257 {
e75fdfca
TT
2258 ops = ops->beneath;
2259 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2260 }
2261
e3cfc1c7
MM
2262 moving = NULL;
2263 no_history = NULL;
2264
2265 make_cleanup (VEC_cleanup (tp_t), &moving);
2266 make_cleanup (VEC_cleanup (tp_t), &no_history);
2267
2268 /* Keep a work list of moving threads. */
2269 ALL_NON_EXITED_THREADS (tp)
2270 if (ptid_match (tp->ptid, ptid)
2271 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2272 VEC_safe_push (tp_t, moving, tp);
2273
2274 if (VEC_empty (tp_t, moving))
52834460 2275 {
e3cfc1c7 2276 *status = btrace_step_no_resumed ();
52834460 2277
e3cfc1c7
MM
2278 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2279 target_waitstatus_to_string (status));
2280
2281 do_cleanups (cleanups);
2282 return null_ptid;
52834460
MM
2283 }
2284
e3cfc1c7
MM
2285 /* Step moving threads one by one, one step each, until either one thread
2286 reports an event or we run out of threads to step.
2287
2288 When stepping more than one thread, chances are that some threads reach
2289 the end of their execution history earlier than others. If we reported
2290 this immediately, all-stop on top of non-stop would stop all threads and
2291 resume the same threads next time. And we would report the same thread
2292 having reached the end of its execution history again.
2293
2294 In the worst case, this would starve the other threads. But even if other
2295 threads would be allowed to make progress, this would result in far too
2296 many intermediate stops.
2297
2298 We therefore delay the reporting of "no execution history" until we have
2299 nothing else to report. By this time, all threads should have moved to
2300 either the beginning or the end of their execution history. There will
2301 be a single user-visible stop. */
2302 eventing = NULL;
2303 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2304 {
2305 unsigned int ix;
2306
2307 ix = 0;
2308 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2309 {
2310 *status = record_btrace_step_thread (tp);
2311
2312 switch (status->kind)
2313 {
2314 case TARGET_WAITKIND_IGNORE:
2315 ix++;
2316 break;
2317
2318 case TARGET_WAITKIND_NO_HISTORY:
2319 VEC_safe_push (tp_t, no_history,
2320 VEC_ordered_remove (tp_t, moving, ix));
2321 break;
2322
2323 default:
2324 eventing = VEC_unordered_remove (tp_t, moving, ix);
2325 break;
2326 }
2327 }
2328 }
2329
2330 if (eventing == NULL)
2331 {
2332 /* We started with at least one moving thread. This thread must have
2333 either stopped or reached the end of its execution history.
2334
2335 In the former case, EVENTING must not be NULL.
2336 In the latter case, NO_HISTORY must not be empty. */
2337 gdb_assert (!VEC_empty (tp_t, no_history));
2338
2339 /* We kept threads moving at the end of their execution history. Stop
2340 EVENTING now that we are going to report its stop. */
2341 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2342 eventing->btrace.flags &= ~BTHR_MOVE;
2343
2344 *status = btrace_step_no_history ();
2345 }
2346
2347 gdb_assert (eventing != NULL);
2348
2349 /* We kept threads replaying at the end of their execution history. Stop
2350 replaying EVENTING now that we are going to report its stop. */
2351 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2352
2353 /* Stop all other threads. */
5953356c 2354 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2355 ALL_NON_EXITED_THREADS (tp)
2356 record_btrace_cancel_resume (tp);
52834460
MM
2357
2358 /* Start record histories anew from the current position. */
e3cfc1c7 2359 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2360
2361 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2362 registers_changed_ptid (eventing->ptid);
2363
2364 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2365 target_pid_to_str (eventing->ptid),
2366 target_waitstatus_to_string (status));
52834460 2367
e3cfc1c7
MM
2368 do_cleanups (cleanups);
2369 return eventing->ptid;
52834460
MM
2370}
2371
6e4879f0
MM
2372/* The to_stop method of target record-btrace. */
2373
2374static void
2375record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2376{
2377 DEBUG ("stop %s", target_pid_to_str (ptid));
2378
2379 /* As long as we're not replaying, just forward the request. */
2380 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2381 {
2382 ops = ops->beneath;
2383 ops->to_stop (ops, ptid);
2384 }
2385 else
2386 {
2387 struct thread_info *tp;
2388
2389 ALL_NON_EXITED_THREADS (tp)
2390 if (ptid_match (tp->ptid, ptid))
2391 {
2392 tp->btrace.flags &= ~BTHR_MOVE;
2393 tp->btrace.flags |= BTHR_STOP;
2394 }
2395 }
2396 }
2397
52834460
MM
2398/* The to_can_execute_reverse method of target record-btrace. */
2399
2400static int
19db3e69 2401record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2402{
2403 return 1;
2404}
2405
9e8915c6 2406/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2407
9e8915c6
PA
2408static int
2409record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2410{
1c63c994 2411 if (record_btrace_is_replaying (ops))
9e8915c6
PA
2412 {
2413 struct thread_info *tp = inferior_thread ();
2414
2415 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2416 }
2417
2418 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2419}
2420
2421/* The to_supports_stopped_by_sw_breakpoint method of target
2422 record-btrace. */
2423
2424static int
2425record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2426{
2427 if (record_btrace_is_replaying (ops))
2428 return 1;
2429
2430 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2431}
2432
2433/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2434
2435static int
2436record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2437{
2438 if (record_btrace_is_replaying (ops))
2439 {
2440 struct thread_info *tp = inferior_thread ();
2441
2442 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2443 }
2444
2445 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2446}
2447
2448/* The to_supports_stopped_by_hw_breakpoint method of target
2449 record-btrace. */
2450
2451static int
2452record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2453{
2454 if (record_btrace_is_replaying (ops))
2455 return 1;
52834460 2456
9e8915c6 2457 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2458}
2459
e8032dde 2460/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2461
2462static void
e8032dde 2463record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2464{
e8032dde 2465 /* We don't add or remove threads during replay. */
1c63c994 2466 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2467 return;
2468
2469 /* Forward the request. */
e75fdfca 2470 ops = ops->beneath;
e8032dde 2471 ops->to_update_thread_list (ops);
e2887aa3
MM
2472}
2473
2474/* The to_thread_alive method of target record-btrace. */
2475
2476static int
2477record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2478{
2479 /* We don't add or remove threads during replay. */
1c63c994 2480 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2481 return find_thread_ptid (ptid) != NULL;
2482
2483 /* Forward the request. */
e75fdfca
TT
2484 ops = ops->beneath;
2485 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2486}
2487
066ce621
MM
2488/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2489 is stopped. */
2490
2491static void
2492record_btrace_set_replay (struct thread_info *tp,
2493 const struct btrace_insn_iterator *it)
2494{
2495 struct btrace_thread_info *btinfo;
2496
2497 btinfo = &tp->btrace;
2498
2499 if (it == NULL || it->function == NULL)
52834460 2500 record_btrace_stop_replaying (tp);
066ce621
MM
2501 else
2502 {
2503 if (btinfo->replay == NULL)
52834460 2504 record_btrace_start_replaying (tp);
066ce621
MM
2505 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2506 return;
2507
2508 *btinfo->replay = *it;
52834460 2509 registers_changed_ptid (tp->ptid);
066ce621
MM
2510 }
2511
52834460
MM
2512 /* Start anew from the new replay position. */
2513 record_btrace_clear_histories (btinfo);
485668e5
MM
2514
2515 stop_pc = regcache_read_pc (get_current_regcache ());
2516 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2517}
2518
2519/* The to_goto_record_begin method of target record-btrace. */
2520
2521static void
08475817 2522record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2523{
2524 struct thread_info *tp;
2525 struct btrace_insn_iterator begin;
2526
2527 tp = require_btrace_thread ();
2528
2529 btrace_insn_begin (&begin, &tp->btrace);
2530 record_btrace_set_replay (tp, &begin);
066ce621
MM
2531}
2532
2533/* The to_goto_record_end method of target record-btrace. */
2534
2535static void
307a1b91 2536record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2537{
2538 struct thread_info *tp;
2539
2540 tp = require_btrace_thread ();
2541
2542 record_btrace_set_replay (tp, NULL);
066ce621
MM
2543}
2544
2545/* The to_goto_record method of target record-btrace. */
2546
2547static void
606183ac 2548record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2549{
2550 struct thread_info *tp;
2551 struct btrace_insn_iterator it;
2552 unsigned int number;
2553 int found;
2554
2555 number = insn;
2556
2557 /* Check for wrap-arounds. */
2558 if (number != insn)
2559 error (_("Instruction number out of range."));
2560
2561 tp = require_btrace_thread ();
2562
2563 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2564 if (found == 0)
2565 error (_("No such instruction."));
2566
2567 record_btrace_set_replay (tp, &it);
066ce621
MM
2568}
2569
70ad5bff
MM
2570/* The to_execution_direction target method. */
2571
2572static enum exec_direction_kind
2573record_btrace_execution_direction (struct target_ops *self)
2574{
2575 return record_btrace_resume_exec_dir;
2576}
2577
aef92902
MM
2578/* The to_prepare_to_generate_core target method. */
2579
2580static void
2581record_btrace_prepare_to_generate_core (struct target_ops *self)
2582{
2583 record_btrace_generating_corefile = 1;
2584}
2585
2586/* The to_done_generating_core target method. */
2587
2588static void
2589record_btrace_done_generating_core (struct target_ops *self)
2590{
2591 record_btrace_generating_corefile = 0;
2592}
2593
afedecd3
MM
2594/* Initialize the record-btrace target ops. */
2595
2596static void
2597init_record_btrace_ops (void)
2598{
2599 struct target_ops *ops;
2600
2601 ops = &record_btrace_ops;
2602 ops->to_shortname = "record-btrace";
2603 ops->to_longname = "Branch tracing target";
2604 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2605 ops->to_open = record_btrace_open;
2606 ops->to_close = record_btrace_close;
b7d2e916 2607 ops->to_async = record_btrace_async;
afedecd3
MM
2608 ops->to_detach = record_detach;
2609 ops->to_disconnect = record_disconnect;
2610 ops->to_mourn_inferior = record_mourn_inferior;
2611 ops->to_kill = record_kill;
afedecd3
MM
2612 ops->to_stop_recording = record_btrace_stop_recording;
2613 ops->to_info_record = record_btrace_info;
2614 ops->to_insn_history = record_btrace_insn_history;
2615 ops->to_insn_history_from = record_btrace_insn_history_from;
2616 ops->to_insn_history_range = record_btrace_insn_history_range;
2617 ops->to_call_history = record_btrace_call_history;
2618 ops->to_call_history_from = record_btrace_call_history_from;
2619 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2620 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2621 ops->to_xfer_partial = record_btrace_xfer_partial;
2622 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2623 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2624 ops->to_fetch_registers = record_btrace_fetch_registers;
2625 ops->to_store_registers = record_btrace_store_registers;
2626 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2627 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2628 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2629 ops->to_resume = record_btrace_resume;
2630 ops->to_wait = record_btrace_wait;
6e4879f0 2631 ops->to_stop = record_btrace_stop;
e8032dde 2632 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2633 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2634 ops->to_goto_record_begin = record_btrace_goto_begin;
2635 ops->to_goto_record_end = record_btrace_goto_end;
2636 ops->to_goto_record = record_btrace_goto;
52834460 2637 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2638 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2639 ops->to_supports_stopped_by_sw_breakpoint
2640 = record_btrace_supports_stopped_by_sw_breakpoint;
2641 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2642 ops->to_supports_stopped_by_hw_breakpoint
2643 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2644 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2645 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2646 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2647 ops->to_stratum = record_stratum;
2648 ops->to_magic = OPS_MAGIC;
2649}
2650
f4abbc16
MM
2651/* Start recording in BTS format. */
2652
2653static void
2654cmd_record_btrace_bts_start (char *args, int from_tty)
2655{
f4abbc16
MM
2656 if (args != NULL && *args != 0)
2657 error (_("Invalid argument."));
2658
2659 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2660
492d29ea
PA
2661 TRY
2662 {
2663 execute_command ("target record-btrace", from_tty);
2664 }
2665 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2666 {
2667 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2668 throw_exception (exception);
2669 }
492d29ea 2670 END_CATCH
f4abbc16
MM
2671}
2672
b20a6524 2673/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2674
2675static void
b20a6524 2676cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2677{
2678 if (args != NULL && *args != 0)
2679 error (_("Invalid argument."));
2680
b20a6524 2681 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2682
492d29ea
PA
2683 TRY
2684 {
2685 execute_command ("target record-btrace", from_tty);
2686 }
2687 CATCH (exception, RETURN_MASK_ALL)
2688 {
2689 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2690 throw_exception (exception);
2691 }
2692 END_CATCH
afedecd3
MM
2693}
2694
b20a6524
MM
2695/* Alias for "target record". */
2696
2697static void
2698cmd_record_btrace_start (char *args, int from_tty)
2699{
2700 if (args != NULL && *args != 0)
2701 error (_("Invalid argument."));
2702
2703 record_btrace_conf.format = BTRACE_FORMAT_PT;
2704
2705 TRY
2706 {
2707 execute_command ("target record-btrace", from_tty);
2708 }
2709 CATCH (exception, RETURN_MASK_ALL)
2710 {
2711 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2712
2713 TRY
2714 {
2715 execute_command ("target record-btrace", from_tty);
2716 }
2717 CATCH (exception, RETURN_MASK_ALL)
2718 {
2719 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2720 throw_exception (exception);
2721 }
2722 END_CATCH
2723 }
2724 END_CATCH
2725}
2726
67b5c0c1
MM
2727/* The "set record btrace" command. */
2728
2729static void
2730cmd_set_record_btrace (char *args, int from_tty)
2731{
2732 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2733}
2734
2735/* The "show record btrace" command. */
2736
2737static void
2738cmd_show_record_btrace (char *args, int from_tty)
2739{
2740 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2741}
2742
2743/* The "show record btrace replay-memory-access" command. */
2744
2745static void
2746cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2747 struct cmd_list_element *c, const char *value)
2748{
2749 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2750 replay_memory_access);
2751}
2752
d33501a5
MM
2753/* The "set record btrace bts" command. */
2754
2755static void
2756cmd_set_record_btrace_bts (char *args, int from_tty)
2757{
2758 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2759 "by an appropriate subcommand.\n"));
d33501a5
MM
2760 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2761 all_commands, gdb_stdout);
2762}
2763
2764/* The "show record btrace bts" command. */
2765
2766static void
2767cmd_show_record_btrace_bts (char *args, int from_tty)
2768{
2769 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2770}
2771
b20a6524
MM
2772/* The "set record btrace pt" command. */
2773
2774static void
2775cmd_set_record_btrace_pt (char *args, int from_tty)
2776{
2777 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2778 "by an appropriate subcommand.\n"));
2779 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2780 all_commands, gdb_stdout);
2781}
2782
2783/* The "show record btrace pt" command. */
2784
2785static void
2786cmd_show_record_btrace_pt (char *args, int from_tty)
2787{
2788 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2789}
2790
2791/* The "record bts buffer-size" show value function. */
2792
2793static void
2794show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2795 struct cmd_list_element *c,
2796 const char *value)
2797{
2798 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2799 value);
2800}
2801
2802/* The "record pt buffer-size" show value function. */
2803
2804static void
2805show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2806 struct cmd_list_element *c,
2807 const char *value)
2808{
2809 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2810 value);
2811}
2812
afedecd3
MM
2813void _initialize_record_btrace (void);
2814
2815/* Initialize btrace commands. */
2816
2817void
2818_initialize_record_btrace (void)
2819{
f4abbc16
MM
2820 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2821 _("Start branch trace recording."), &record_btrace_cmdlist,
2822 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2823 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2824
f4abbc16
MM
2825 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2826 _("\
2827Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2828The processor stores a from/to record for each branch into a cyclic buffer.\n\
2829This format may not be available on all processors."),
2830 &record_btrace_cmdlist);
2831 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2832
b20a6524
MM
2833 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2834 _("\
2835Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2836This format may not be available on all processors."),
2837 &record_btrace_cmdlist);
2838 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2839
67b5c0c1
MM
2840 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2841 _("Set record options"), &set_record_btrace_cmdlist,
2842 "set record btrace ", 0, &set_record_cmdlist);
2843
2844 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2845 _("Show record options"), &show_record_btrace_cmdlist,
2846 "show record btrace ", 0, &show_record_cmdlist);
2847
2848 add_setshow_enum_cmd ("replay-memory-access", no_class,
2849 replay_memory_access_types, &replay_memory_access, _("\
2850Set what memory accesses are allowed during replay."), _("\
2851Show what memory accesses are allowed during replay."),
2852 _("Default is READ-ONLY.\n\n\
2853The btrace record target does not trace data.\n\
2854The memory therefore corresponds to the live target and not \
2855to the current replay position.\n\n\
2856When READ-ONLY, allow accesses to read-only memory during replay.\n\
2857When READ-WRITE, allow accesses to read-only and read-write memory during \
2858replay."),
2859 NULL, cmd_show_replay_memory_access,
2860 &set_record_btrace_cmdlist,
2861 &show_record_btrace_cmdlist);
2862
d33501a5
MM
2863 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2864 _("Set record btrace bts options"),
2865 &set_record_btrace_bts_cmdlist,
2866 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2867
2868 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2869 _("Show record btrace bts options"),
2870 &show_record_btrace_bts_cmdlist,
2871 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2872
2873 add_setshow_uinteger_cmd ("buffer-size", no_class,
2874 &record_btrace_conf.bts.size,
2875 _("Set the record/replay bts buffer size."),
2876 _("Show the record/replay bts buffer size."), _("\
2877When starting recording request a trace buffer of this size. \
2878The actual buffer size may differ from the requested size. \
2879Use \"info record\" to see the actual buffer size.\n\n\
2880Bigger buffers allow longer recording but also take more time to process \
2881the recorded execution trace.\n\n\
b20a6524
MM
2882The trace buffer size may not be changed while recording."), NULL,
2883 show_record_bts_buffer_size_value,
d33501a5
MM
2884 &set_record_btrace_bts_cmdlist,
2885 &show_record_btrace_bts_cmdlist);
2886
b20a6524
MM
2887 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2888 _("Set record btrace pt options"),
2889 &set_record_btrace_pt_cmdlist,
2890 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2891
2892 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2893 _("Show record btrace pt options"),
2894 &show_record_btrace_pt_cmdlist,
2895 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2896
2897 add_setshow_uinteger_cmd ("buffer-size", no_class,
2898 &record_btrace_conf.pt.size,
2899 _("Set the record/replay pt buffer size."),
2900 _("Show the record/replay pt buffer size."), _("\
2901Bigger buffers allow longer recording but also take more time to process \
2902the recorded execution.\n\
2903The actual buffer size may differ from the requested size. Use \"info record\" \
2904to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2905 &set_record_btrace_pt_cmdlist,
2906 &show_record_btrace_pt_cmdlist);
2907
afedecd3
MM
2908 init_record_btrace_ops ();
2909 add_target (&record_btrace_ops);
0b722aec
MM
2910
2911 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2912 xcalloc, xfree);
d33501a5
MM
2913
2914 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2915 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2916}
This page took 0.6713 seconds and 4 git commands to generate.