target, record: add PTID argument to to_record_is_replaying
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
afedecd3
MM
204/* The to_open method of target record-btrace. */
205
206static void
014f9477 207record_btrace_open (const char *args, int from_tty)
afedecd3
MM
208{
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
8213266a 214 record_preopen ();
afedecd3
MM
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
afedecd3
MM
219 gdb_assert (record_btrace_thread_observer == NULL);
220
221 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 222 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
224 {
f4abbc16 225 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
226
227 make_cleanup (record_btrace_disable_callback, tp);
228 }
229
230 record_btrace_auto_enable ();
231
232 push_target (&record_btrace_ops);
233
70ad5bff
MM
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
236 NULL);
aef92902 237 record_btrace_generating_corefile = 0;
70ad5bff 238
afedecd3
MM
239 observer_notify_record_changed (current_inferior (), 1);
240
241 discard_cleanups (disable_chain);
242}
243
244/* The to_stop_recording method of target record-btrace. */
245
246static void
c6cd7c02 247record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
248{
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
034f788c 255 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258}
259
260/* The to_close method of target record-btrace. */
261
262static void
de90e03d 263record_btrace_close (struct target_ops *self)
afedecd3 264{
568e808b
MM
265 struct thread_info *tp;
266
70ad5bff
MM
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
99c819ee
MM
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
568e808b
MM
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
034f788c 276 ALL_NON_EXITED_THREADS (tp)
568e808b 277 btrace_teardown (tp);
afedecd3
MM
278}
279
b7d2e916
PA
280/* The to_async method of target record-btrace. */
281
282static void
6a3753b3 283record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 284{
6a3753b3 285 if (enable)
b7d2e916
PA
286 mark_async_event_handler (record_btrace_async_inferior_event_handler);
287 else
288 clear_async_event_handler (record_btrace_async_inferior_event_handler);
289
6a3753b3 290 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
291}
292
d33501a5
MM
293/* Adjusts the size and returns a human readable size suffix. */
294
295static const char *
296record_btrace_adjust_size (unsigned int *size)
297{
298 unsigned int sz;
299
300 sz = *size;
301
302 if ((sz & ((1u << 30) - 1)) == 0)
303 {
304 *size = sz >> 30;
305 return "GB";
306 }
307 else if ((sz & ((1u << 20) - 1)) == 0)
308 {
309 *size = sz >> 20;
310 return "MB";
311 }
312 else if ((sz & ((1u << 10) - 1)) == 0)
313 {
314 *size = sz >> 10;
315 return "kB";
316 }
317 else
318 return "";
319}
320
321/* Print a BTS configuration. */
322
323static void
324record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
325{
326 const char *suffix;
327 unsigned int size;
328
329 size = conf->size;
330 if (size > 0)
331 {
332 suffix = record_btrace_adjust_size (&size);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
334 }
335}
336
b20a6524
MM
337/* Print an Intel(R) Processor Trace configuration. */
338
339static void
340record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
341{
342 const char *suffix;
343 unsigned int size;
344
345 size = conf->size;
346 if (size > 0)
347 {
348 suffix = record_btrace_adjust_size (&size);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
350 }
351}
352
d33501a5
MM
353/* Print a branch tracing configuration. */
354
355static void
356record_btrace_print_conf (const struct btrace_config *conf)
357{
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf->format));
360
361 switch (conf->format)
362 {
363 case BTRACE_FORMAT_NONE:
364 return;
365
366 case BTRACE_FORMAT_BTS:
367 record_btrace_print_bts_conf (&conf->bts);
368 return;
b20a6524
MM
369
370 case BTRACE_FORMAT_PT:
371 record_btrace_print_pt_conf (&conf->pt);
372 return;
d33501a5
MM
373 }
374
375 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376}
377
afedecd3
MM
378/* The to_info_record method of target record-btrace. */
379
380static void
630d6a4a 381record_btrace_info (struct target_ops *self)
afedecd3
MM
382{
383 struct btrace_thread_info *btinfo;
f4abbc16 384 const struct btrace_config *conf;
afedecd3 385 struct thread_info *tp;
31fd9caa 386 unsigned int insns, calls, gaps;
afedecd3
MM
387
388 DEBUG ("info");
389
390 tp = find_thread_ptid (inferior_ptid);
391 if (tp == NULL)
392 error (_("No thread."));
393
f4abbc16
MM
394 btinfo = &tp->btrace;
395
396 conf = btrace_conf (btinfo);
397 if (conf != NULL)
d33501a5 398 record_btrace_print_conf (conf);
f4abbc16 399
afedecd3
MM
400 btrace_fetch (tp);
401
23a7fe75
MM
402 insns = 0;
403 calls = 0;
31fd9caa 404 gaps = 0;
23a7fe75 405
6e07b1d2 406 if (!btrace_is_empty (tp))
23a7fe75
MM
407 {
408 struct btrace_call_iterator call;
409 struct btrace_insn_iterator insn;
410
411 btrace_call_end (&call, btinfo);
412 btrace_call_prev (&call, 1);
5de9129b 413 calls = btrace_call_number (&call);
23a7fe75
MM
414
415 btrace_insn_end (&insn, btinfo);
31fd9caa 416
5de9129b 417 insns = btrace_insn_number (&insn);
31fd9caa
MM
418 if (insns != 0)
419 {
420 /* The last instruction does not really belong to the trace. */
421 insns -= 1;
422 }
423 else
424 {
425 unsigned int steps;
426
427 /* Skip gaps at the end. */
428 do
429 {
430 steps = btrace_insn_prev (&insn, 1);
431 if (steps == 0)
432 break;
433
434 insns = btrace_insn_number (&insn);
435 }
436 while (insns == 0);
437 }
438
439 gaps = btinfo->ngaps;
23a7fe75 440 }
afedecd3 441
31fd9caa
MM
442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
443 "for thread %d (%s).\n"), insns, calls, gaps,
444 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
445
446 if (btrace_is_replaying (tp))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo->replay));
afedecd3
MM
449}
450
31fd9caa
MM
451/* Print a decode error. */
452
453static void
454btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
455 enum btrace_format format)
456{
457 const char *errstr;
458 int is_error;
459
460 errstr = _("unknown");
461 is_error = 1;
462
463 switch (format)
464 {
465 default:
466 break;
467
468 case BTRACE_FORMAT_BTS:
469 switch (errcode)
470 {
471 default:
472 break;
473
474 case BDE_BTS_OVERFLOW:
475 errstr = _("instruction overflow");
476 break;
477
478 case BDE_BTS_INSN_SIZE:
479 errstr = _("unknown instruction");
480 break;
481 }
482 break;
b20a6524
MM
483
484#if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT:
486 switch (errcode)
487 {
488 case BDE_PT_USER_QUIT:
489 is_error = 0;
490 errstr = _("trace decode cancelled");
491 break;
492
493 case BDE_PT_DISABLED:
494 is_error = 0;
495 errstr = _("disabled");
496 break;
497
498 case BDE_PT_OVERFLOW:
499 is_error = 0;
500 errstr = _("overflow");
501 break;
502
503 default:
504 if (errcode < 0)
505 errstr = pt_errstr (pt_errcode (errcode));
506 break;
507 }
508 break;
509#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
510 }
511
512 ui_out_text (uiout, _("["));
513 if (is_error)
514 {
515 ui_out_text (uiout, _("decode error ("));
516 ui_out_field_int (uiout, "errcode", errcode);
517 ui_out_text (uiout, _("): "));
518 }
519 ui_out_text (uiout, errstr);
520 ui_out_text (uiout, _("]\n"));
521}
522
afedecd3
MM
523/* Print an unsigned int. */
524
525static void
526ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
527{
528 ui_out_field_fmt (uiout, fld, "%u", val);
529}
530
531/* Disassemble a section of the recorded instruction trace. */
532
533static void
23a7fe75 534btrace_insn_history (struct ui_out *uiout,
31fd9caa 535 const struct btrace_thread_info *btinfo,
23a7fe75
MM
536 const struct btrace_insn_iterator *begin,
537 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
538{
539 struct gdbarch *gdbarch;
23a7fe75 540 struct btrace_insn_iterator it;
afedecd3 541
23a7fe75
MM
542 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
543 btrace_insn_number (end));
afedecd3
MM
544
545 gdbarch = target_gdbarch ();
546
23a7fe75 547 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 548 {
23a7fe75
MM
549 const struct btrace_insn *insn;
550
551 insn = btrace_insn_get (&it);
552
31fd9caa
MM
553 /* A NULL instruction indicates a gap in the trace. */
554 if (insn == NULL)
555 {
556 const struct btrace_config *conf;
557
558 conf = btrace_conf (btinfo);
afedecd3 559
31fd9caa
MM
560 /* We have trace so we must have a configuration. */
561 gdb_assert (conf != NULL);
562
563 btrace_ui_out_decode_error (uiout, it.function->errcode,
564 conf->format);
565 }
566 else
567 {
da8c46d2
MM
568 char prefix[4];
569
570 /* We may add a speculation prefix later. We use the same space
571 that is used for the pc prefix. */
572 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
573 strncpy (prefix, pc_prefix (insn->pc), 3);
574 else
575 {
576 prefix[0] = ' ';
577 prefix[1] = ' ';
578 prefix[2] = ' ';
579 }
580 prefix[3] = 0;
581
31fd9caa
MM
582 /* Print the instruction index. */
583 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
584 ui_out_text (uiout, "\t");
585
da8c46d2
MM
586 /* Indicate speculative execution by a leading '?'. */
587 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
588 prefix[0] = '?';
589
590 /* Print the prefix; we tell gdb_disassembly below to omit it. */
591 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
592
31fd9caa
MM
593 /* Disassembly with '/m' flag may not produce the expected result.
594 See PR gdb/11833. */
da8c46d2
MM
595 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
596 1, insn->pc, insn->pc + 1);
31fd9caa 597 }
afedecd3
MM
598 }
599}
600
601/* The to_insn_history method of target record-btrace. */
602
603static void
7a6c5609 604record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
605{
606 struct btrace_thread_info *btinfo;
23a7fe75
MM
607 struct btrace_insn_history *history;
608 struct btrace_insn_iterator begin, end;
afedecd3
MM
609 struct cleanup *uiout_cleanup;
610 struct ui_out *uiout;
23a7fe75 611 unsigned int context, covered;
afedecd3
MM
612
613 uiout = current_uiout;
614 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
615 "insn history");
afedecd3 616 context = abs (size);
afedecd3
MM
617 if (context == 0)
618 error (_("Bad record instruction-history-size."));
619
23a7fe75
MM
620 btinfo = require_btrace ();
621 history = btinfo->insn_history;
622 if (history == NULL)
afedecd3 623 {
07bbe694 624 struct btrace_insn_iterator *replay;
afedecd3 625
23a7fe75 626 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 627
07bbe694
MM
628 /* If we're replaying, we start at the replay position. Otherwise, we
629 start at the tail of the trace. */
630 replay = btinfo->replay;
631 if (replay != NULL)
632 begin = *replay;
633 else
634 btrace_insn_end (&begin, btinfo);
635
636 /* We start from here and expand in the requested direction. Then we
637 expand in the other direction, as well, to fill up any remaining
638 context. */
639 end = begin;
640 if (size < 0)
641 {
642 /* We want the current position covered, as well. */
643 covered = btrace_insn_next (&end, 1);
644 covered += btrace_insn_prev (&begin, context - covered);
645 covered += btrace_insn_next (&end, context - covered);
646 }
647 else
648 {
649 covered = btrace_insn_next (&end, context);
650 covered += btrace_insn_prev (&begin, context - covered);
651 }
afedecd3
MM
652 }
653 else
654 {
23a7fe75
MM
655 begin = history->begin;
656 end = history->end;
afedecd3 657
23a7fe75
MM
658 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
659 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 660
23a7fe75
MM
661 if (size < 0)
662 {
663 end = begin;
664 covered = btrace_insn_prev (&begin, context);
665 }
666 else
667 {
668 begin = end;
669 covered = btrace_insn_next (&end, context);
670 }
afedecd3
MM
671 }
672
23a7fe75 673 if (covered > 0)
31fd9caa 674 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
675 else
676 {
677 if (size < 0)
678 printf_unfiltered (_("At the start of the branch trace record.\n"));
679 else
680 printf_unfiltered (_("At the end of the branch trace record.\n"));
681 }
afedecd3 682
23a7fe75 683 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
684 do_cleanups (uiout_cleanup);
685}
686
687/* The to_insn_history_range method of target record-btrace. */
688
689static void
4e99c6b7
TT
690record_btrace_insn_history_range (struct target_ops *self,
691 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
692{
693 struct btrace_thread_info *btinfo;
23a7fe75
MM
694 struct btrace_insn_history *history;
695 struct btrace_insn_iterator begin, end;
afedecd3
MM
696 struct cleanup *uiout_cleanup;
697 struct ui_out *uiout;
23a7fe75
MM
698 unsigned int low, high;
699 int found;
afedecd3
MM
700
701 uiout = current_uiout;
702 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
703 "insn history");
23a7fe75
MM
704 low = from;
705 high = to;
afedecd3 706
23a7fe75 707 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
708
709 /* Check for wrap-arounds. */
23a7fe75 710 if (low != from || high != to)
afedecd3
MM
711 error (_("Bad range."));
712
0688d04e 713 if (high < low)
afedecd3
MM
714 error (_("Bad range."));
715
23a7fe75 716 btinfo = require_btrace ();
afedecd3 717
23a7fe75
MM
718 found = btrace_find_insn_by_number (&begin, btinfo, low);
719 if (found == 0)
720 error (_("Range out of bounds."));
afedecd3 721
23a7fe75
MM
722 found = btrace_find_insn_by_number (&end, btinfo, high);
723 if (found == 0)
0688d04e
MM
724 {
725 /* Silently truncate the range. */
726 btrace_insn_end (&end, btinfo);
727 }
728 else
729 {
730 /* We want both begin and end to be inclusive. */
731 btrace_insn_next (&end, 1);
732 }
afedecd3 733
31fd9caa 734 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 735 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
736
737 do_cleanups (uiout_cleanup);
738}
739
740/* The to_insn_history_from method of target record-btrace. */
741
742static void
9abc3ff3
TT
743record_btrace_insn_history_from (struct target_ops *self,
744 ULONGEST from, int size, int flags)
afedecd3
MM
745{
746 ULONGEST begin, end, context;
747
748 context = abs (size);
0688d04e
MM
749 if (context == 0)
750 error (_("Bad record instruction-history-size."));
afedecd3
MM
751
752 if (size < 0)
753 {
754 end = from;
755
756 if (from < context)
757 begin = 0;
758 else
0688d04e 759 begin = from - context + 1;
afedecd3
MM
760 }
761 else
762 {
763 begin = from;
0688d04e 764 end = from + context - 1;
afedecd3
MM
765
766 /* Check for wrap-around. */
767 if (end < begin)
768 end = ULONGEST_MAX;
769 }
770
4e99c6b7 771 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
772}
773
774/* Print the instruction number range for a function call history line. */
775
776static void
23a7fe75
MM
777btrace_call_history_insn_range (struct ui_out *uiout,
778 const struct btrace_function *bfun)
afedecd3 779{
7acbe133
MM
780 unsigned int begin, end, size;
781
782 size = VEC_length (btrace_insn_s, bfun->insn);
783 gdb_assert (size > 0);
afedecd3 784
23a7fe75 785 begin = bfun->insn_offset;
7acbe133 786 end = begin + size - 1;
afedecd3 787
23a7fe75 788 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 789 ui_out_text (uiout, ",");
23a7fe75 790 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
791}
792
ce0dfbea
MM
793/* Compute the lowest and highest source line for the instructions in BFUN
794 and return them in PBEGIN and PEND.
795 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
796 result from inlining or macro expansion. */
797
798static void
799btrace_compute_src_line_range (const struct btrace_function *bfun,
800 int *pbegin, int *pend)
801{
802 struct btrace_insn *insn;
803 struct symtab *symtab;
804 struct symbol *sym;
805 unsigned int idx;
806 int begin, end;
807
808 begin = INT_MAX;
809 end = INT_MIN;
810
811 sym = bfun->sym;
812 if (sym == NULL)
813 goto out;
814
815 symtab = symbol_symtab (sym);
816
817 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
818 {
819 struct symtab_and_line sal;
820
821 sal = find_pc_line (insn->pc, 0);
822 if (sal.symtab != symtab || sal.line == 0)
823 continue;
824
825 begin = min (begin, sal.line);
826 end = max (end, sal.line);
827 }
828
829 out:
830 *pbegin = begin;
831 *pend = end;
832}
833
afedecd3
MM
834/* Print the source line information for a function call history line. */
835
836static void
23a7fe75
MM
837btrace_call_history_src_line (struct ui_out *uiout,
838 const struct btrace_function *bfun)
afedecd3
MM
839{
840 struct symbol *sym;
23a7fe75 841 int begin, end;
afedecd3
MM
842
843 sym = bfun->sym;
844 if (sym == NULL)
845 return;
846
847 ui_out_field_string (uiout, "file",
08be3fe3 848 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 849
ce0dfbea 850 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 851 if (end < begin)
afedecd3
MM
852 return;
853
854 ui_out_text (uiout, ":");
23a7fe75 855 ui_out_field_int (uiout, "min line", begin);
afedecd3 856
23a7fe75 857 if (end == begin)
afedecd3
MM
858 return;
859
8710b709 860 ui_out_text (uiout, ",");
23a7fe75 861 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
862}
863
0b722aec
MM
864/* Get the name of a branch trace function. */
865
866static const char *
867btrace_get_bfun_name (const struct btrace_function *bfun)
868{
869 struct minimal_symbol *msym;
870 struct symbol *sym;
871
872 if (bfun == NULL)
873 return "??";
874
875 msym = bfun->msym;
876 sym = bfun->sym;
877
878 if (sym != NULL)
879 return SYMBOL_PRINT_NAME (sym);
880 else if (msym != NULL)
efd66ac6 881 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
882 else
883 return "??";
884}
885
afedecd3
MM
886/* Disassemble a section of the recorded function trace. */
887
888static void
23a7fe75 889btrace_call_history (struct ui_out *uiout,
8710b709 890 const struct btrace_thread_info *btinfo,
23a7fe75
MM
891 const struct btrace_call_iterator *begin,
892 const struct btrace_call_iterator *end,
afedecd3
MM
893 enum record_print_flag flags)
894{
23a7fe75 895 struct btrace_call_iterator it;
afedecd3 896
23a7fe75
MM
897 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
898 btrace_call_number (end));
afedecd3 899
23a7fe75 900 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 901 {
23a7fe75
MM
902 const struct btrace_function *bfun;
903 struct minimal_symbol *msym;
904 struct symbol *sym;
905
906 bfun = btrace_call_get (&it);
23a7fe75 907 sym = bfun->sym;
0b722aec 908 msym = bfun->msym;
23a7fe75 909
afedecd3 910 /* Print the function index. */
23a7fe75 911 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
912 ui_out_text (uiout, "\t");
913
31fd9caa
MM
914 /* Indicate gaps in the trace. */
915 if (bfun->errcode != 0)
916 {
917 const struct btrace_config *conf;
918
919 conf = btrace_conf (btinfo);
920
921 /* We have trace so we must have a configuration. */
922 gdb_assert (conf != NULL);
923
924 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
925
926 continue;
927 }
928
8710b709
MM
929 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
930 {
931 int level = bfun->level + btinfo->level, i;
932
933 for (i = 0; i < level; ++i)
934 ui_out_text (uiout, " ");
935 }
936
937 if (sym != NULL)
938 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
939 else if (msym != NULL)
efd66ac6 940 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
941 else if (!ui_out_is_mi_like_p (uiout))
942 ui_out_field_string (uiout, "function", "??");
943
1e038f67 944 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 945 {
8710b709 946 ui_out_text (uiout, _("\tinst "));
23a7fe75 947 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
948 }
949
1e038f67 950 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 951 {
8710b709 952 ui_out_text (uiout, _("\tat "));
23a7fe75 953 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
954 }
955
afedecd3
MM
956 ui_out_text (uiout, "\n");
957 }
958}
959
960/* The to_call_history method of target record-btrace. */
961
962static void
5df2fcba 963record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
964{
965 struct btrace_thread_info *btinfo;
23a7fe75
MM
966 struct btrace_call_history *history;
967 struct btrace_call_iterator begin, end;
afedecd3
MM
968 struct cleanup *uiout_cleanup;
969 struct ui_out *uiout;
23a7fe75 970 unsigned int context, covered;
afedecd3
MM
971
972 uiout = current_uiout;
973 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
974 "insn history");
afedecd3 975 context = abs (size);
afedecd3
MM
976 if (context == 0)
977 error (_("Bad record function-call-history-size."));
978
23a7fe75
MM
979 btinfo = require_btrace ();
980 history = btinfo->call_history;
981 if (history == NULL)
afedecd3 982 {
07bbe694 983 struct btrace_insn_iterator *replay;
afedecd3 984
23a7fe75 985 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 986
07bbe694
MM
987 /* If we're replaying, we start at the replay position. Otherwise, we
988 start at the tail of the trace. */
989 replay = btinfo->replay;
990 if (replay != NULL)
991 {
992 begin.function = replay->function;
993 begin.btinfo = btinfo;
994 }
995 else
996 btrace_call_end (&begin, btinfo);
997
998 /* We start from here and expand in the requested direction. Then we
999 expand in the other direction, as well, to fill up any remaining
1000 context. */
1001 end = begin;
1002 if (size < 0)
1003 {
1004 /* We want the current position covered, as well. */
1005 covered = btrace_call_next (&end, 1);
1006 covered += btrace_call_prev (&begin, context - covered);
1007 covered += btrace_call_next (&end, context - covered);
1008 }
1009 else
1010 {
1011 covered = btrace_call_next (&end, context);
1012 covered += btrace_call_prev (&begin, context- covered);
1013 }
afedecd3
MM
1014 }
1015 else
1016 {
23a7fe75
MM
1017 begin = history->begin;
1018 end = history->end;
afedecd3 1019
23a7fe75
MM
1020 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1021 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1022
23a7fe75
MM
1023 if (size < 0)
1024 {
1025 end = begin;
1026 covered = btrace_call_prev (&begin, context);
1027 }
1028 else
1029 {
1030 begin = end;
1031 covered = btrace_call_next (&end, context);
1032 }
afedecd3
MM
1033 }
1034
23a7fe75 1035 if (covered > 0)
8710b709 1036 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1037 else
1038 {
1039 if (size < 0)
1040 printf_unfiltered (_("At the start of the branch trace record.\n"));
1041 else
1042 printf_unfiltered (_("At the end of the branch trace record.\n"));
1043 }
afedecd3 1044
23a7fe75 1045 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1046 do_cleanups (uiout_cleanup);
1047}
1048
1049/* The to_call_history_range method of target record-btrace. */
1050
1051static void
f0d960ea
TT
1052record_btrace_call_history_range (struct target_ops *self,
1053 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1054{
1055 struct btrace_thread_info *btinfo;
23a7fe75
MM
1056 struct btrace_call_history *history;
1057 struct btrace_call_iterator begin, end;
afedecd3
MM
1058 struct cleanup *uiout_cleanup;
1059 struct ui_out *uiout;
23a7fe75
MM
1060 unsigned int low, high;
1061 int found;
afedecd3
MM
1062
1063 uiout = current_uiout;
1064 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1065 "func history");
23a7fe75
MM
1066 low = from;
1067 high = to;
afedecd3 1068
23a7fe75 1069 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1070
1071 /* Check for wrap-arounds. */
23a7fe75 1072 if (low != from || high != to)
afedecd3
MM
1073 error (_("Bad range."));
1074
0688d04e 1075 if (high < low)
afedecd3
MM
1076 error (_("Bad range."));
1077
23a7fe75 1078 btinfo = require_btrace ();
afedecd3 1079
23a7fe75
MM
1080 found = btrace_find_call_by_number (&begin, btinfo, low);
1081 if (found == 0)
1082 error (_("Range out of bounds."));
afedecd3 1083
23a7fe75
MM
1084 found = btrace_find_call_by_number (&end, btinfo, high);
1085 if (found == 0)
0688d04e
MM
1086 {
1087 /* Silently truncate the range. */
1088 btrace_call_end (&end, btinfo);
1089 }
1090 else
1091 {
1092 /* We want both begin and end to be inclusive. */
1093 btrace_call_next (&end, 1);
1094 }
afedecd3 1095
8710b709 1096 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1097 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1098
1099 do_cleanups (uiout_cleanup);
1100}
1101
1102/* The to_call_history_from method of target record-btrace. */
1103
1104static void
ec0aea04
TT
1105record_btrace_call_history_from (struct target_ops *self,
1106 ULONGEST from, int size, int flags)
afedecd3
MM
1107{
1108 ULONGEST begin, end, context;
1109
1110 context = abs (size);
0688d04e
MM
1111 if (context == 0)
1112 error (_("Bad record function-call-history-size."));
afedecd3
MM
1113
1114 if (size < 0)
1115 {
1116 end = from;
1117
1118 if (from < context)
1119 begin = 0;
1120 else
0688d04e 1121 begin = from - context + 1;
afedecd3
MM
1122 }
1123 else
1124 {
1125 begin = from;
0688d04e 1126 end = from + context - 1;
afedecd3
MM
1127
1128 /* Check for wrap-around. */
1129 if (end < begin)
1130 end = ULONGEST_MAX;
1131 }
1132
f0d960ea 1133 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1134}
1135
07bbe694
MM
1136/* The to_record_is_replaying method of target record-btrace. */
1137
1138static int
a52eab48 1139record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1140{
1141 struct thread_info *tp;
1142
034f788c 1143 ALL_NON_EXITED_THREADS (tp)
a52eab48 1144 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1145 return 1;
1146
1147 return 0;
1148}
1149
633785ff
MM
1150/* The to_xfer_partial method of target record-btrace. */
1151
9b409511 1152static enum target_xfer_status
633785ff
MM
1153record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1154 const char *annex, gdb_byte *readbuf,
1155 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1156 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1157{
1158 struct target_ops *t;
1159
1160 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1161 if (replay_memory_access == replay_memory_access_read_only
aef92902 1162 && !record_btrace_generating_corefile
a52eab48 1163 && record_btrace_is_replaying (ops, minus_one_ptid))
633785ff
MM
1164 {
1165 switch (object)
1166 {
1167 case TARGET_OBJECT_MEMORY:
1168 {
1169 struct target_section *section;
1170
1171 /* We do not allow writing memory in general. */
1172 if (writebuf != NULL)
9b409511
YQ
1173 {
1174 *xfered_len = len;
bc113b4e 1175 return TARGET_XFER_UNAVAILABLE;
9b409511 1176 }
633785ff
MM
1177
1178 /* We allow reading readonly memory. */
1179 section = target_section_by_addr (ops, offset);
1180 if (section != NULL)
1181 {
1182 /* Check if the section we found is readonly. */
1183 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1184 section->the_bfd_section)
1185 & SEC_READONLY) != 0)
1186 {
1187 /* Truncate the request to fit into this section. */
1188 len = min (len, section->endaddr - offset);
1189 break;
1190 }
1191 }
1192
9b409511 1193 *xfered_len = len;
bc113b4e 1194 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1195 }
1196 }
1197 }
1198
1199 /* Forward the request. */
e75fdfca
TT
1200 ops = ops->beneath;
1201 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1202 offset, len, xfered_len);
633785ff
MM
1203}
1204
1205/* The to_insert_breakpoint method of target record-btrace. */
1206
1207static int
1208record_btrace_insert_breakpoint (struct target_ops *ops,
1209 struct gdbarch *gdbarch,
1210 struct bp_target_info *bp_tgt)
1211{
67b5c0c1
MM
1212 const char *old;
1213 int ret;
633785ff
MM
1214
1215 /* Inserting breakpoints requires accessing memory. Allow it for the
1216 duration of this function. */
67b5c0c1
MM
1217 old = replay_memory_access;
1218 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1219
1220 ret = 0;
492d29ea
PA
1221 TRY
1222 {
1223 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1224 }
492d29ea
PA
1225 CATCH (except, RETURN_MASK_ALL)
1226 {
6c63c96a 1227 replay_memory_access = old;
492d29ea
PA
1228 throw_exception (except);
1229 }
1230 END_CATCH
6c63c96a 1231 replay_memory_access = old;
633785ff
MM
1232
1233 return ret;
1234}
1235
1236/* The to_remove_breakpoint method of target record-btrace. */
1237
1238static int
1239record_btrace_remove_breakpoint (struct target_ops *ops,
1240 struct gdbarch *gdbarch,
1241 struct bp_target_info *bp_tgt)
1242{
67b5c0c1
MM
1243 const char *old;
1244 int ret;
633785ff
MM
1245
1246 /* Removing breakpoints requires accessing memory. Allow it for the
1247 duration of this function. */
67b5c0c1
MM
1248 old = replay_memory_access;
1249 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1250
1251 ret = 0;
492d29ea
PA
1252 TRY
1253 {
1254 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1255 }
492d29ea
PA
1256 CATCH (except, RETURN_MASK_ALL)
1257 {
6c63c96a 1258 replay_memory_access = old;
492d29ea
PA
1259 throw_exception (except);
1260 }
1261 END_CATCH
6c63c96a 1262 replay_memory_access = old;
633785ff
MM
1263
1264 return ret;
1265}
1266
1f3ef581
MM
1267/* The to_fetch_registers method of target record-btrace. */
1268
1269static void
1270record_btrace_fetch_registers (struct target_ops *ops,
1271 struct regcache *regcache, int regno)
1272{
1273 struct btrace_insn_iterator *replay;
1274 struct thread_info *tp;
1275
1276 tp = find_thread_ptid (inferior_ptid);
1277 gdb_assert (tp != NULL);
1278
1279 replay = tp->btrace.replay;
aef92902 1280 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1281 {
1282 const struct btrace_insn *insn;
1283 struct gdbarch *gdbarch;
1284 int pcreg;
1285
1286 gdbarch = get_regcache_arch (regcache);
1287 pcreg = gdbarch_pc_regnum (gdbarch);
1288 if (pcreg < 0)
1289 return;
1290
1291 /* We can only provide the PC register. */
1292 if (regno >= 0 && regno != pcreg)
1293 return;
1294
1295 insn = btrace_insn_get (replay);
1296 gdb_assert (insn != NULL);
1297
1298 regcache_raw_supply (regcache, regno, &insn->pc);
1299 }
1300 else
1301 {
e75fdfca 1302 struct target_ops *t = ops->beneath;
1f3ef581 1303
e75fdfca 1304 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1305 }
1306}
1307
1308/* The to_store_registers method of target record-btrace. */
1309
1310static void
1311record_btrace_store_registers (struct target_ops *ops,
1312 struct regcache *regcache, int regno)
1313{
1314 struct target_ops *t;
1315
a52eab48
MM
1316 if (!record_btrace_generating_corefile
1317 && record_btrace_is_replaying (ops, minus_one_ptid))
1f3ef581
MM
1318 error (_("This record target does not allow writing registers."));
1319
1320 gdb_assert (may_write_registers != 0);
1321
e75fdfca
TT
1322 t = ops->beneath;
1323 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1324}
1325
1326/* The to_prepare_to_store method of target record-btrace. */
1327
1328static void
1329record_btrace_prepare_to_store (struct target_ops *ops,
1330 struct regcache *regcache)
1331{
1332 struct target_ops *t;
1333
a52eab48
MM
1334 if (!record_btrace_generating_corefile
1335 && record_btrace_is_replaying (ops, minus_one_ptid))
1f3ef581
MM
1336 return;
1337
e75fdfca
TT
1338 t = ops->beneath;
1339 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1340}
1341
0b722aec
MM
1342/* The branch trace frame cache. */
1343
1344struct btrace_frame_cache
1345{
1346 /* The thread. */
1347 struct thread_info *tp;
1348
1349 /* The frame info. */
1350 struct frame_info *frame;
1351
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1354};
1355
1356/* A struct btrace_frame_cache hash table indexed by NEXT. */
1357
1358static htab_t bfcache;
1359
1360/* hash_f for htab_create_alloc of bfcache. */
1361
1362static hashval_t
1363bfcache_hash (const void *arg)
1364{
1365 const struct btrace_frame_cache *cache = arg;
1366
1367 return htab_hash_pointer (cache->frame);
1368}
1369
1370/* eq_f for htab_create_alloc of bfcache. */
1371
1372static int
1373bfcache_eq (const void *arg1, const void *arg2)
1374{
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1377
1378 return cache1->frame == cache2->frame;
1379}
1380
1381/* Create a new btrace frame cache. */
1382
1383static struct btrace_frame_cache *
1384bfcache_new (struct frame_info *frame)
1385{
1386 struct btrace_frame_cache *cache;
1387 void **slot;
1388
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1391
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1394 *slot = cache;
1395
1396 return cache;
1397}
1398
1399/* Extract the branch trace function from a branch trace frame. */
1400
1401static const struct btrace_function *
1402btrace_get_frame_function (struct frame_info *frame)
1403{
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1407 void **slot;
1408
1409 pattern.frame = frame;
1410
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1412 if (slot == NULL)
1413 return NULL;
1414
1415 cache = *slot;
1416 return cache->bfun;
1417}
1418
cecac1ab
MM
1419/* Implement stop_reason method for record_btrace_frame_unwind. */
1420
1421static enum unwind_stop_reason
1422record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1423 void **this_cache)
1424{
0b722aec
MM
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1427
1428 cache = *this_cache;
1429 bfun = cache->bfun;
1430 gdb_assert (bfun != NULL);
1431
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1434
1435 return UNWIND_NO_REASON;
cecac1ab
MM
1436}
1437
1438/* Implement this_id method for record_btrace_frame_unwind. */
1439
1440static void
1441record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1443{
0b722aec
MM
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1447
1448 cache = *this_cache;
1449
1450 bfun = cache->bfun;
1451 gdb_assert (bfun != NULL);
1452
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1455
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1458
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1460
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1465}
1466
1467/* Implement prev_register method for record_btrace_frame_unwind. */
1468
1469static struct value *
1470record_btrace_frame_prev_register (struct frame_info *this_frame,
1471 void **this_cache,
1472 int regnum)
1473{
0b722aec
MM
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1478 CORE_ADDR pc;
1479 int pcreg;
1480
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1486
1487 cache = *this_cache;
1488 bfun = cache->bfun;
1489 gdb_assert (bfun != NULL);
1490
1491 caller = bfun->up;
1492 if (caller == NULL)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1495
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1497 {
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1499 pc = insn->pc;
1500 }
1501 else
1502 {
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1504 pc = insn->pc;
1505
1506 pc += gdb_insn_length (gdbarch, pc);
1507 }
1508
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1512
1513 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1514}
1515
1516/* Implement sniffer method for record_btrace_frame_unwind. */
1517
1518static int
1519record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1521 void **this_cache)
1522{
0b722aec
MM
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
cecac1ab 1525 struct thread_info *tp;
0b722aec 1526 struct frame_info *next;
cecac1ab
MM
1527
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1531
0b722aec
MM
1532 bfun = NULL;
1533 next = get_next_frame (this_frame);
1534 if (next == NULL)
1535 {
1536 const struct btrace_insn_iterator *replay;
1537
1538 replay = tp->btrace.replay;
1539 if (replay != NULL)
1540 bfun = replay->function;
1541 }
1542 else
1543 {
1544 const struct btrace_function *callee;
1545
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1548 bfun = callee->up;
1549 }
1550
1551 if (bfun == NULL)
1552 return 0;
1553
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1556
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1559 cache->tp = tp;
1560 cache->bfun = bfun;
1561
1562 *this_cache = cache;
1563 return 1;
1564}
1565
1566/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1567
1568static int
1569record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1571 void **this_cache)
1572{
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1576
1577 next = get_next_frame (this_frame);
1578 if (next == NULL)
1579 return 0;
1580
1581 callee = btrace_get_frame_function (next);
1582 if (callee == NULL)
1583 return 0;
1584
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1586 return 0;
1587
1588 bfun = callee->up;
1589 if (bfun == NULL)
1590 return 0;
1591
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1594
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1598 cache->bfun = bfun;
1599
1600 *this_cache = cache;
1601 return 1;
1602}
1603
1604static void
1605record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1606{
1607 struct btrace_frame_cache *cache;
1608 void **slot;
1609
1610 cache = this_cache;
1611
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1614
1615 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1616}
1617
1618/* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1622 <unavailable>. */
1623
0b722aec 1624const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1625{
1626 NORMAL_FRAME,
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1630 NULL,
0b722aec
MM
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1633};
1634
1635const struct frame_unwind record_btrace_tailcall_frame_unwind =
1636{
1637 TAILCALL_FRAME,
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1641 NULL,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
cecac1ab 1644};
b2f4cfde 1645
ac01945b
TT
1646/* Implement the to_get_unwinder method. */
1647
1648static const struct frame_unwind *
1649record_btrace_to_get_unwinder (struct target_ops *self)
1650{
1651 return &record_btrace_frame_unwind;
1652}
1653
1654/* Implement the to_get_tailcall_unwinder method. */
1655
1656static const struct frame_unwind *
1657record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1658{
1659 return &record_btrace_tailcall_frame_unwind;
1660}
1661
987e68b1
MM
1662/* Return a human-readable string for FLAG. */
1663
1664static const char *
1665btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1666{
1667 switch (flag)
1668 {
1669 case BTHR_STEP:
1670 return "step";
1671
1672 case BTHR_RSTEP:
1673 return "reverse-step";
1674
1675 case BTHR_CONT:
1676 return "cont";
1677
1678 case BTHR_RCONT:
1679 return "reverse-cont";
1680
1681 case BTHR_STOP:
1682 return "stop";
1683 }
1684
1685 return "<invalid>";
1686}
1687
52834460
MM
1688/* Indicate that TP should be resumed according to FLAG. */
1689
1690static void
1691record_btrace_resume_thread (struct thread_info *tp,
1692 enum btrace_thread_flag flag)
1693{
1694 struct btrace_thread_info *btinfo;
1695
987e68b1
MM
1696 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1697 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1698
1699 btinfo = &tp->btrace;
1700
52834460
MM
1701 /* Fetch the latest branch trace. */
1702 btrace_fetch (tp);
1703
0ca912df
MM
1704 /* A resume request overwrites a preceding resume or stop request. */
1705 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1706 btinfo->flags |= flag;
1707}
1708
ec71cc2f
MM
1709/* Get the current frame for TP. */
1710
1711static struct frame_info *
1712get_thread_current_frame (struct thread_info *tp)
1713{
1714 struct frame_info *frame;
1715 ptid_t old_inferior_ptid;
1716 int executing;
1717
1718 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1719 old_inferior_ptid = inferior_ptid;
1720 inferior_ptid = tp->ptid;
1721
1722 /* Clear the executing flag to allow changes to the current frame.
1723 We are not actually running, yet. We just started a reverse execution
1724 command or a record goto command.
1725 For the latter, EXECUTING is false and this has no effect.
1726 For the former, EXECUTING is true and we're in to_wait, about to
1727 move the thread. Since we need to recompute the stack, we temporarily
1728 set EXECUTING to flase. */
1729 executing = is_executing (inferior_ptid);
1730 set_executing (inferior_ptid, 0);
1731
1732 frame = NULL;
1733 TRY
1734 {
1735 frame = get_current_frame ();
1736 }
1737 CATCH (except, RETURN_MASK_ALL)
1738 {
1739 /* Restore the previous execution state. */
1740 set_executing (inferior_ptid, executing);
1741
1742 /* Restore the previous inferior_ptid. */
1743 inferior_ptid = old_inferior_ptid;
1744
1745 throw_exception (except);
1746 }
1747 END_CATCH
1748
1749 /* Restore the previous execution state. */
1750 set_executing (inferior_ptid, executing);
1751
1752 /* Restore the previous inferior_ptid. */
1753 inferior_ptid = old_inferior_ptid;
1754
1755 return frame;
1756}
1757
52834460
MM
1758/* Start replaying a thread. */
1759
1760static struct btrace_insn_iterator *
1761record_btrace_start_replaying (struct thread_info *tp)
1762{
52834460
MM
1763 struct btrace_insn_iterator *replay;
1764 struct btrace_thread_info *btinfo;
52834460
MM
1765
1766 btinfo = &tp->btrace;
1767 replay = NULL;
1768
1769 /* We can't start replaying without trace. */
1770 if (btinfo->begin == NULL)
1771 return NULL;
1772
52834460
MM
1773 /* GDB stores the current frame_id when stepping in order to detects steps
1774 into subroutines.
1775 Since frames are computed differently when we're replaying, we need to
1776 recompute those stored frames and fix them up so we can still detect
1777 subroutines after we started replaying. */
492d29ea 1778 TRY
52834460
MM
1779 {
1780 struct frame_info *frame;
1781 struct frame_id frame_id;
1782 int upd_step_frame_id, upd_step_stack_frame_id;
1783
1784 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1785 frame = get_thread_current_frame (tp);
52834460
MM
1786 frame_id = get_frame_id (frame);
1787
1788 /* Check if we need to update any stepping-related frame id's. */
1789 upd_step_frame_id = frame_id_eq (frame_id,
1790 tp->control.step_frame_id);
1791 upd_step_stack_frame_id = frame_id_eq (frame_id,
1792 tp->control.step_stack_frame_id);
1793
1794 /* We start replaying at the end of the branch trace. This corresponds
1795 to the current instruction. */
8d749320 1796 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1797 btrace_insn_end (replay, btinfo);
1798
31fd9caa
MM
1799 /* Skip gaps at the end of the trace. */
1800 while (btrace_insn_get (replay) == NULL)
1801 {
1802 unsigned int steps;
1803
1804 steps = btrace_insn_prev (replay, 1);
1805 if (steps == 0)
1806 error (_("No trace."));
1807 }
1808
52834460
MM
1809 /* We're not replaying, yet. */
1810 gdb_assert (btinfo->replay == NULL);
1811 btinfo->replay = replay;
1812
1813 /* Make sure we're not using any stale registers. */
1814 registers_changed_ptid (tp->ptid);
1815
1816 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1817 frame = get_thread_current_frame (tp);
52834460
MM
1818 frame_id = get_frame_id (frame);
1819
1820 /* Replace stepping related frames where necessary. */
1821 if (upd_step_frame_id)
1822 tp->control.step_frame_id = frame_id;
1823 if (upd_step_stack_frame_id)
1824 tp->control.step_stack_frame_id = frame_id;
1825 }
492d29ea 1826 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1827 {
1828 xfree (btinfo->replay);
1829 btinfo->replay = NULL;
1830
1831 registers_changed_ptid (tp->ptid);
1832
1833 throw_exception (except);
1834 }
492d29ea 1835 END_CATCH
52834460
MM
1836
1837 return replay;
1838}
1839
1840/* Stop replaying a thread. */
1841
1842static void
1843record_btrace_stop_replaying (struct thread_info *tp)
1844{
1845 struct btrace_thread_info *btinfo;
1846
1847 btinfo = &tp->btrace;
1848
1849 xfree (btinfo->replay);
1850 btinfo->replay = NULL;
1851
1852 /* Make sure we're not leaving any stale registers. */
1853 registers_changed_ptid (tp->ptid);
1854}
1855
e3cfc1c7
MM
1856/* Stop replaying TP if it is at the end of its execution history. */
1857
1858static void
1859record_btrace_stop_replaying_at_end (struct thread_info *tp)
1860{
1861 struct btrace_insn_iterator *replay, end;
1862 struct btrace_thread_info *btinfo;
1863
1864 btinfo = &tp->btrace;
1865 replay = btinfo->replay;
1866
1867 if (replay == NULL)
1868 return;
1869
1870 btrace_insn_end (&end, btinfo);
1871
1872 if (btrace_insn_cmp (replay, &end) == 0)
1873 record_btrace_stop_replaying (tp);
1874}
1875
b2f4cfde
MM
1876/* The to_resume method of target record-btrace. */
1877
1878static void
1879record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1880 enum gdb_signal signal)
1881{
0ca912df 1882 struct thread_info *tp;
52834460 1883 enum btrace_thread_flag flag;
0ca912df 1884 ptid_t orig_ptid;
52834460 1885
987e68b1
MM
1886 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1887 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1888 step ? "step" : "cont");
52834460 1889
0ca912df
MM
1890 orig_ptid = ptid;
1891
1892 /* Store the execution direction of the last resume.
1893
1894 If there is more than one to_resume call, we have to rely on infrun
1895 to not change the execution direction in-between. */
70ad5bff
MM
1896 record_btrace_resume_exec_dir = execution_direction;
1897
0ca912df
MM
1898 /* For all-stop targets... */
1899 if (!target_is_non_stop_p ())
1900 {
1901 /* ...we pick the current thread when asked to resume an entire process
1902 or everything. */
1903 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1904 ptid = inferior_ptid;
1905
1906 tp = find_thread_ptid (ptid);
1907 if (tp == NULL)
1908 error (_("Cannot find thread to resume."));
1909
1910 /* ...and we stop replaying other threads if the thread to resume is not
1911 replaying. */
1912 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1913 ALL_NON_EXITED_THREADS (tp)
1914 record_btrace_stop_replaying (tp);
1915 }
52834460 1916
0ca912df 1917 /* As long as we're not replaying, just forward the request.
52834460 1918
0ca912df
MM
1919 For non-stop targets this means that no thread is replaying. In order to
1920 make progress, we may need to explicitly move replaying threads to the end
1921 of their execution history. */
a52eab48
MM
1922 if ((execution_direction != EXEC_REVERSE)
1923 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 1924 {
e75fdfca 1925 ops = ops->beneath;
0ca912df 1926 return ops->to_resume (ops, orig_ptid, step, signal);
b2f4cfde
MM
1927 }
1928
52834460
MM
1929 /* Compute the btrace thread flag for the requested move. */
1930 if (step == 0)
1931 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1932 else
1933 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1934
52834460
MM
1935 /* We just indicate the resume intent here. The actual stepping happens in
1936 record_btrace_wait below. */
0ca912df
MM
1937 ALL_NON_EXITED_THREADS (tp)
1938 if (ptid_match (tp->ptid, ptid))
1939 record_btrace_resume_thread (tp, flag);
70ad5bff
MM
1940
1941 /* Async support. */
1942 if (target_can_async_p ())
1943 {
6a3753b3 1944 target_async (1);
70ad5bff
MM
1945 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1946 }
52834460
MM
1947}
1948
987e68b1
MM
1949/* Cancel resuming TP. */
1950
1951static void
1952record_btrace_cancel_resume (struct thread_info *tp)
1953{
1954 enum btrace_thread_flag flags;
1955
1956 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1957 if (flags == 0)
1958 return;
1959
1960 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1961 target_pid_to_str (tp->ptid), flags,
1962 btrace_thread_flag_to_str (flags));
1963
1964 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 1965 record_btrace_stop_replaying_at_end (tp);
52834460
MM
1966}
1967
1968/* Return a target_waitstatus indicating that we ran out of history. */
1969
1970static struct target_waitstatus
1971btrace_step_no_history (void)
1972{
1973 struct target_waitstatus status;
1974
1975 status.kind = TARGET_WAITKIND_NO_HISTORY;
1976
1977 return status;
1978}
1979
1980/* Return a target_waitstatus indicating that a step finished. */
1981
1982static struct target_waitstatus
1983btrace_step_stopped (void)
1984{
1985 struct target_waitstatus status;
1986
1987 status.kind = TARGET_WAITKIND_STOPPED;
1988 status.value.sig = GDB_SIGNAL_TRAP;
1989
1990 return status;
1991}
1992
6e4879f0
MM
1993/* Return a target_waitstatus indicating that a thread was stopped as
1994 requested. */
1995
1996static struct target_waitstatus
1997btrace_step_stopped_on_request (void)
1998{
1999 struct target_waitstatus status;
2000
2001 status.kind = TARGET_WAITKIND_STOPPED;
2002 status.value.sig = GDB_SIGNAL_0;
2003
2004 return status;
2005}
2006
d825d248
MM
2007/* Return a target_waitstatus indicating a spurious stop. */
2008
2009static struct target_waitstatus
2010btrace_step_spurious (void)
2011{
2012 struct target_waitstatus status;
2013
2014 status.kind = TARGET_WAITKIND_SPURIOUS;
2015
2016 return status;
2017}
2018
e3cfc1c7
MM
2019/* Return a target_waitstatus indicating that the thread was not resumed. */
2020
2021static struct target_waitstatus
2022btrace_step_no_resumed (void)
2023{
2024 struct target_waitstatus status;
2025
2026 status.kind = TARGET_WAITKIND_NO_RESUMED;
2027
2028 return status;
2029}
2030
2031/* Return a target_waitstatus indicating that we should wait again. */
2032
2033static struct target_waitstatus
2034btrace_step_again (void)
2035{
2036 struct target_waitstatus status;
2037
2038 status.kind = TARGET_WAITKIND_IGNORE;
2039
2040 return status;
2041}
2042
52834460
MM
2043/* Clear the record histories. */
2044
2045static void
2046record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2047{
2048 xfree (btinfo->insn_history);
2049 xfree (btinfo->call_history);
2050
2051 btinfo->insn_history = NULL;
2052 btinfo->call_history = NULL;
2053}
2054
3c615f99
MM
2055/* Check whether TP's current replay position is at a breakpoint. */
2056
2057static int
2058record_btrace_replay_at_breakpoint (struct thread_info *tp)
2059{
2060 struct btrace_insn_iterator *replay;
2061 struct btrace_thread_info *btinfo;
2062 const struct btrace_insn *insn;
2063 struct inferior *inf;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return 0;
2070
2071 insn = btrace_insn_get (replay);
2072 if (insn == NULL)
2073 return 0;
2074
2075 inf = find_inferior_ptid (tp->ptid);
2076 if (inf == NULL)
2077 return 0;
2078
2079 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2080 &btinfo->stop_reason);
2081}
2082
d825d248 2083/* Step one instruction in forward direction. */
52834460
MM
2084
2085static struct target_waitstatus
d825d248 2086record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2087{
2088 struct btrace_insn_iterator *replay, end;
2089 struct btrace_thread_info *btinfo;
52834460 2090
d825d248
MM
2091 btinfo = &tp->btrace;
2092 replay = btinfo->replay;
2093
2094 /* We're done if we're not replaying. */
2095 if (replay == NULL)
2096 return btrace_step_no_history ();
2097
011c71b6
MM
2098 /* Check if we're stepping a breakpoint. */
2099 if (record_btrace_replay_at_breakpoint (tp))
2100 return btrace_step_stopped ();
2101
d825d248
MM
2102 /* Skip gaps during replay. */
2103 do
2104 {
2105 unsigned int steps;
2106
e3cfc1c7
MM
2107 /* We will bail out here if we continue stepping after reaching the end
2108 of the execution history. */
d825d248
MM
2109 steps = btrace_insn_next (replay, 1);
2110 if (steps == 0)
e3cfc1c7 2111 return btrace_step_no_history ();
d825d248
MM
2112 }
2113 while (btrace_insn_get (replay) == NULL);
2114
2115 /* Determine the end of the instruction trace. */
2116 btrace_insn_end (&end, btinfo);
2117
e3cfc1c7
MM
2118 /* The execution trace contains (and ends with) the current instruction.
2119 This instruction has not been executed, yet, so the trace really ends
2120 one instruction earlier. */
d825d248 2121 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2122 return btrace_step_no_history ();
d825d248
MM
2123
2124 return btrace_step_spurious ();
2125}
2126
2127/* Step one instruction in backward direction. */
2128
2129static struct target_waitstatus
2130record_btrace_single_step_backward (struct thread_info *tp)
2131{
2132 struct btrace_insn_iterator *replay;
2133 struct btrace_thread_info *btinfo;
e59fa00f 2134
52834460
MM
2135 btinfo = &tp->btrace;
2136 replay = btinfo->replay;
2137
d825d248
MM
2138 /* Start replaying if we're not already doing so. */
2139 if (replay == NULL)
2140 replay = record_btrace_start_replaying (tp);
2141
2142 /* If we can't step any further, we reached the end of the history.
2143 Skip gaps during replay. */
2144 do
2145 {
2146 unsigned int steps;
2147
2148 steps = btrace_insn_prev (replay, 1);
2149 if (steps == 0)
2150 return btrace_step_no_history ();
2151 }
2152 while (btrace_insn_get (replay) == NULL);
2153
011c71b6
MM
2154 /* Check if we're stepping a breakpoint.
2155
2156 For reverse-stepping, this check is after the step. There is logic in
2157 infrun.c that handles reverse-stepping separately. See, for example,
2158 proceed and adjust_pc_after_break.
2159
2160 This code assumes that for reverse-stepping, PC points to the last
2161 de-executed instruction, whereas for forward-stepping PC points to the
2162 next to-be-executed instruction. */
2163 if (record_btrace_replay_at_breakpoint (tp))
2164 return btrace_step_stopped ();
2165
d825d248
MM
2166 return btrace_step_spurious ();
2167}
2168
2169/* Step a single thread. */
2170
2171static struct target_waitstatus
2172record_btrace_step_thread (struct thread_info *tp)
2173{
2174 struct btrace_thread_info *btinfo;
2175 struct target_waitstatus status;
2176 enum btrace_thread_flag flags;
2177
2178 btinfo = &tp->btrace;
2179
6e4879f0
MM
2180 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2181 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2182
987e68b1
MM
2183 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2184 target_pid_to_str (tp->ptid), flags,
2185 btrace_thread_flag_to_str (flags));
52834460 2186
6e4879f0
MM
2187 /* We can't step without an execution history. */
2188 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2189 return btrace_step_no_history ();
2190
52834460
MM
2191 switch (flags)
2192 {
2193 default:
2194 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2195
6e4879f0
MM
2196 case BTHR_STOP:
2197 return btrace_step_stopped_on_request ();
2198
52834460 2199 case BTHR_STEP:
d825d248
MM
2200 status = record_btrace_single_step_forward (tp);
2201 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2202 break;
52834460
MM
2203
2204 return btrace_step_stopped ();
2205
2206 case BTHR_RSTEP:
d825d248
MM
2207 status = record_btrace_single_step_backward (tp);
2208 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2209 break;
52834460
MM
2210
2211 return btrace_step_stopped ();
2212
2213 case BTHR_CONT:
e3cfc1c7
MM
2214 status = record_btrace_single_step_forward (tp);
2215 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2216 break;
52834460 2217
e3cfc1c7
MM
2218 btinfo->flags |= flags;
2219 return btrace_step_again ();
52834460
MM
2220
2221 case BTHR_RCONT:
e3cfc1c7
MM
2222 status = record_btrace_single_step_backward (tp);
2223 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2224 break;
52834460 2225
e3cfc1c7
MM
2226 btinfo->flags |= flags;
2227 return btrace_step_again ();
2228 }
d825d248 2229
e3cfc1c7
MM
2230 /* We keep threads moving at the end of their execution history. The to_wait
2231 method will stop the thread for whom the event is reported. */
2232 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2233 btinfo->flags |= flags;
52834460 2234
e3cfc1c7 2235 return status;
b2f4cfde
MM
2236}
2237
e3cfc1c7
MM
2238/* A vector of threads. */
2239
2240typedef struct thread_info * tp_t;
2241DEF_VEC_P (tp_t);
2242
a6b5be76
MM
2243/* Announce further events if necessary. */
2244
2245static void
2246record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2247 const VEC (tp_t) *no_history)
2248{
2249 int more_moving, more_no_history;
2250
2251 more_moving = !VEC_empty (tp_t, moving);
2252 more_no_history = !VEC_empty (tp_t, no_history);
2253
2254 if (!more_moving && !more_no_history)
2255 return;
2256
2257 if (more_moving)
2258 DEBUG ("movers pending");
2259
2260 if (more_no_history)
2261 DEBUG ("no-history pending");
2262
2263 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2264}
2265
b2f4cfde
MM
2266/* The to_wait method of target record-btrace. */
2267
2268static ptid_t
2269record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2270 struct target_waitstatus *status, int options)
2271{
e3cfc1c7
MM
2272 VEC (tp_t) *moving, *no_history;
2273 struct thread_info *tp, *eventing;
2274 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2275
2276 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2277
b2f4cfde 2278 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2279 if ((execution_direction != EXEC_REVERSE)
2280 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2281 {
e75fdfca
TT
2282 ops = ops->beneath;
2283 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2284 }
2285
e3cfc1c7
MM
2286 moving = NULL;
2287 no_history = NULL;
2288
2289 make_cleanup (VEC_cleanup (tp_t), &moving);
2290 make_cleanup (VEC_cleanup (tp_t), &no_history);
2291
2292 /* Keep a work list of moving threads. */
2293 ALL_NON_EXITED_THREADS (tp)
2294 if (ptid_match (tp->ptid, ptid)
2295 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2296 VEC_safe_push (tp_t, moving, tp);
2297
2298 if (VEC_empty (tp_t, moving))
52834460 2299 {
e3cfc1c7 2300 *status = btrace_step_no_resumed ();
52834460 2301
e3cfc1c7
MM
2302 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2303 target_waitstatus_to_string (status));
2304
2305 do_cleanups (cleanups);
2306 return null_ptid;
52834460
MM
2307 }
2308
e3cfc1c7
MM
2309 /* Step moving threads one by one, one step each, until either one thread
2310 reports an event or we run out of threads to step.
2311
2312 When stepping more than one thread, chances are that some threads reach
2313 the end of their execution history earlier than others. If we reported
2314 this immediately, all-stop on top of non-stop would stop all threads and
2315 resume the same threads next time. And we would report the same thread
2316 having reached the end of its execution history again.
2317
2318 In the worst case, this would starve the other threads. But even if other
2319 threads would be allowed to make progress, this would result in far too
2320 many intermediate stops.
2321
2322 We therefore delay the reporting of "no execution history" until we have
2323 nothing else to report. By this time, all threads should have moved to
2324 either the beginning or the end of their execution history. There will
2325 be a single user-visible stop. */
2326 eventing = NULL;
2327 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2328 {
2329 unsigned int ix;
2330
2331 ix = 0;
2332 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2333 {
2334 *status = record_btrace_step_thread (tp);
2335
2336 switch (status->kind)
2337 {
2338 case TARGET_WAITKIND_IGNORE:
2339 ix++;
2340 break;
2341
2342 case TARGET_WAITKIND_NO_HISTORY:
2343 VEC_safe_push (tp_t, no_history,
2344 VEC_ordered_remove (tp_t, moving, ix));
2345 break;
2346
2347 default:
2348 eventing = VEC_unordered_remove (tp_t, moving, ix);
2349 break;
2350 }
2351 }
2352 }
2353
2354 if (eventing == NULL)
2355 {
2356 /* We started with at least one moving thread. This thread must have
2357 either stopped or reached the end of its execution history.
2358
2359 In the former case, EVENTING must not be NULL.
2360 In the latter case, NO_HISTORY must not be empty. */
2361 gdb_assert (!VEC_empty (tp_t, no_history));
2362
2363 /* We kept threads moving at the end of their execution history. Stop
2364 EVENTING now that we are going to report its stop. */
2365 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2366 eventing->btrace.flags &= ~BTHR_MOVE;
2367
2368 *status = btrace_step_no_history ();
2369 }
2370
2371 gdb_assert (eventing != NULL);
2372
2373 /* We kept threads replaying at the end of their execution history. Stop
2374 replaying EVENTING now that we are going to report its stop. */
2375 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2376
2377 /* Stop all other threads. */
5953356c 2378 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2379 ALL_NON_EXITED_THREADS (tp)
2380 record_btrace_cancel_resume (tp);
52834460 2381
a6b5be76
MM
2382 /* In async mode, we need to announce further events. */
2383 if (target_is_async_p ())
2384 record_btrace_maybe_mark_async_event (moving, no_history);
2385
52834460 2386 /* Start record histories anew from the current position. */
e3cfc1c7 2387 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2388
2389 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2390 registers_changed_ptid (eventing->ptid);
2391
2392 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2393 target_pid_to_str (eventing->ptid),
2394 target_waitstatus_to_string (status));
52834460 2395
e3cfc1c7
MM
2396 do_cleanups (cleanups);
2397 return eventing->ptid;
52834460
MM
2398}
2399
6e4879f0
MM
2400/* The to_stop method of target record-btrace. */
2401
2402static void
2403record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2404{
2405 DEBUG ("stop %s", target_pid_to_str (ptid));
2406
2407 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2408 if ((execution_direction != EXEC_REVERSE)
2409 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2410 {
2411 ops = ops->beneath;
2412 ops->to_stop (ops, ptid);
2413 }
2414 else
2415 {
2416 struct thread_info *tp;
2417
2418 ALL_NON_EXITED_THREADS (tp)
2419 if (ptid_match (tp->ptid, ptid))
2420 {
2421 tp->btrace.flags &= ~BTHR_MOVE;
2422 tp->btrace.flags |= BTHR_STOP;
2423 }
2424 }
2425 }
2426
52834460
MM
2427/* The to_can_execute_reverse method of target record-btrace. */
2428
2429static int
19db3e69 2430record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2431{
2432 return 1;
2433}
2434
9e8915c6 2435/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2436
9e8915c6
PA
2437static int
2438record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2439{
a52eab48 2440 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2441 {
2442 struct thread_info *tp = inferior_thread ();
2443
2444 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2445 }
2446
2447 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2448}
2449
2450/* The to_supports_stopped_by_sw_breakpoint method of target
2451 record-btrace. */
2452
2453static int
2454record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2455{
a52eab48 2456 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2457 return 1;
2458
2459 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2460}
2461
2462/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2463
2464static int
2465record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2466{
a52eab48 2467 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2468 {
2469 struct thread_info *tp = inferior_thread ();
2470
2471 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2472 }
2473
2474 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2475}
2476
2477/* The to_supports_stopped_by_hw_breakpoint method of target
2478 record-btrace. */
2479
2480static int
2481record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2482{
a52eab48 2483 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2484 return 1;
52834460 2485
9e8915c6 2486 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2487}
2488
e8032dde 2489/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2490
2491static void
e8032dde 2492record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2493{
e8032dde 2494 /* We don't add or remove threads during replay. */
a52eab48 2495 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2496 return;
2497
2498 /* Forward the request. */
e75fdfca 2499 ops = ops->beneath;
e8032dde 2500 ops->to_update_thread_list (ops);
e2887aa3
MM
2501}
2502
2503/* The to_thread_alive method of target record-btrace. */
2504
2505static int
2506record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2507{
2508 /* We don't add or remove threads during replay. */
a52eab48 2509 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2510 return find_thread_ptid (ptid) != NULL;
2511
2512 /* Forward the request. */
e75fdfca
TT
2513 ops = ops->beneath;
2514 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2515}
2516
066ce621
MM
2517/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2518 is stopped. */
2519
2520static void
2521record_btrace_set_replay (struct thread_info *tp,
2522 const struct btrace_insn_iterator *it)
2523{
2524 struct btrace_thread_info *btinfo;
2525
2526 btinfo = &tp->btrace;
2527
2528 if (it == NULL || it->function == NULL)
52834460 2529 record_btrace_stop_replaying (tp);
066ce621
MM
2530 else
2531 {
2532 if (btinfo->replay == NULL)
52834460 2533 record_btrace_start_replaying (tp);
066ce621
MM
2534 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2535 return;
2536
2537 *btinfo->replay = *it;
52834460 2538 registers_changed_ptid (tp->ptid);
066ce621
MM
2539 }
2540
52834460
MM
2541 /* Start anew from the new replay position. */
2542 record_btrace_clear_histories (btinfo);
485668e5
MM
2543
2544 stop_pc = regcache_read_pc (get_current_regcache ());
2545 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2546}
2547
2548/* The to_goto_record_begin method of target record-btrace. */
2549
2550static void
08475817 2551record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2552{
2553 struct thread_info *tp;
2554 struct btrace_insn_iterator begin;
2555
2556 tp = require_btrace_thread ();
2557
2558 btrace_insn_begin (&begin, &tp->btrace);
2559 record_btrace_set_replay (tp, &begin);
066ce621
MM
2560}
2561
2562/* The to_goto_record_end method of target record-btrace. */
2563
2564static void
307a1b91 2565record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2566{
2567 struct thread_info *tp;
2568
2569 tp = require_btrace_thread ();
2570
2571 record_btrace_set_replay (tp, NULL);
066ce621
MM
2572}
2573
2574/* The to_goto_record method of target record-btrace. */
2575
2576static void
606183ac 2577record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2578{
2579 struct thread_info *tp;
2580 struct btrace_insn_iterator it;
2581 unsigned int number;
2582 int found;
2583
2584 number = insn;
2585
2586 /* Check for wrap-arounds. */
2587 if (number != insn)
2588 error (_("Instruction number out of range."));
2589
2590 tp = require_btrace_thread ();
2591
2592 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2593 if (found == 0)
2594 error (_("No such instruction."));
2595
2596 record_btrace_set_replay (tp, &it);
066ce621
MM
2597}
2598
70ad5bff
MM
2599/* The to_execution_direction target method. */
2600
2601static enum exec_direction_kind
2602record_btrace_execution_direction (struct target_ops *self)
2603{
2604 return record_btrace_resume_exec_dir;
2605}
2606
aef92902
MM
2607/* The to_prepare_to_generate_core target method. */
2608
2609static void
2610record_btrace_prepare_to_generate_core (struct target_ops *self)
2611{
2612 record_btrace_generating_corefile = 1;
2613}
2614
2615/* The to_done_generating_core target method. */
2616
2617static void
2618record_btrace_done_generating_core (struct target_ops *self)
2619{
2620 record_btrace_generating_corefile = 0;
2621}
2622
afedecd3
MM
2623/* Initialize the record-btrace target ops. */
2624
2625static void
2626init_record_btrace_ops (void)
2627{
2628 struct target_ops *ops;
2629
2630 ops = &record_btrace_ops;
2631 ops->to_shortname = "record-btrace";
2632 ops->to_longname = "Branch tracing target";
2633 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2634 ops->to_open = record_btrace_open;
2635 ops->to_close = record_btrace_close;
b7d2e916 2636 ops->to_async = record_btrace_async;
afedecd3
MM
2637 ops->to_detach = record_detach;
2638 ops->to_disconnect = record_disconnect;
2639 ops->to_mourn_inferior = record_mourn_inferior;
2640 ops->to_kill = record_kill;
afedecd3
MM
2641 ops->to_stop_recording = record_btrace_stop_recording;
2642 ops->to_info_record = record_btrace_info;
2643 ops->to_insn_history = record_btrace_insn_history;
2644 ops->to_insn_history_from = record_btrace_insn_history_from;
2645 ops->to_insn_history_range = record_btrace_insn_history_range;
2646 ops->to_call_history = record_btrace_call_history;
2647 ops->to_call_history_from = record_btrace_call_history_from;
2648 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2649 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2650 ops->to_xfer_partial = record_btrace_xfer_partial;
2651 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2652 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2653 ops->to_fetch_registers = record_btrace_fetch_registers;
2654 ops->to_store_registers = record_btrace_store_registers;
2655 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2656 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2657 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2658 ops->to_resume = record_btrace_resume;
2659 ops->to_wait = record_btrace_wait;
6e4879f0 2660 ops->to_stop = record_btrace_stop;
e8032dde 2661 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2662 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2663 ops->to_goto_record_begin = record_btrace_goto_begin;
2664 ops->to_goto_record_end = record_btrace_goto_end;
2665 ops->to_goto_record = record_btrace_goto;
52834460 2666 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2667 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2668 ops->to_supports_stopped_by_sw_breakpoint
2669 = record_btrace_supports_stopped_by_sw_breakpoint;
2670 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2671 ops->to_supports_stopped_by_hw_breakpoint
2672 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2673 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2674 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2675 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2676 ops->to_stratum = record_stratum;
2677 ops->to_magic = OPS_MAGIC;
2678}
2679
f4abbc16
MM
2680/* Start recording in BTS format. */
2681
2682static void
2683cmd_record_btrace_bts_start (char *args, int from_tty)
2684{
f4abbc16
MM
2685 if (args != NULL && *args != 0)
2686 error (_("Invalid argument."));
2687
2688 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2689
492d29ea
PA
2690 TRY
2691 {
2692 execute_command ("target record-btrace", from_tty);
2693 }
2694 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2695 {
2696 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2697 throw_exception (exception);
2698 }
492d29ea 2699 END_CATCH
f4abbc16
MM
2700}
2701
b20a6524 2702/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2703
2704static void
b20a6524 2705cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2706{
2707 if (args != NULL && *args != 0)
2708 error (_("Invalid argument."));
2709
b20a6524 2710 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2711
492d29ea
PA
2712 TRY
2713 {
2714 execute_command ("target record-btrace", from_tty);
2715 }
2716 CATCH (exception, RETURN_MASK_ALL)
2717 {
2718 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2719 throw_exception (exception);
2720 }
2721 END_CATCH
afedecd3
MM
2722}
2723
b20a6524
MM
2724/* Alias for "target record". */
2725
2726static void
2727cmd_record_btrace_start (char *args, int from_tty)
2728{
2729 if (args != NULL && *args != 0)
2730 error (_("Invalid argument."));
2731
2732 record_btrace_conf.format = BTRACE_FORMAT_PT;
2733
2734 TRY
2735 {
2736 execute_command ("target record-btrace", from_tty);
2737 }
2738 CATCH (exception, RETURN_MASK_ALL)
2739 {
2740 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2741
2742 TRY
2743 {
2744 execute_command ("target record-btrace", from_tty);
2745 }
2746 CATCH (exception, RETURN_MASK_ALL)
2747 {
2748 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2749 throw_exception (exception);
2750 }
2751 END_CATCH
2752 }
2753 END_CATCH
2754}
2755
67b5c0c1
MM
2756/* The "set record btrace" command. */
2757
2758static void
2759cmd_set_record_btrace (char *args, int from_tty)
2760{
2761 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2762}
2763
2764/* The "show record btrace" command. */
2765
2766static void
2767cmd_show_record_btrace (char *args, int from_tty)
2768{
2769 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2770}
2771
2772/* The "show record btrace replay-memory-access" command. */
2773
2774static void
2775cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2776 struct cmd_list_element *c, const char *value)
2777{
2778 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2779 replay_memory_access);
2780}
2781
d33501a5
MM
2782/* The "set record btrace bts" command. */
2783
2784static void
2785cmd_set_record_btrace_bts (char *args, int from_tty)
2786{
2787 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2788 "by an appropriate subcommand.\n"));
d33501a5
MM
2789 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2790 all_commands, gdb_stdout);
2791}
2792
2793/* The "show record btrace bts" command. */
2794
2795static void
2796cmd_show_record_btrace_bts (char *args, int from_tty)
2797{
2798 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2799}
2800
b20a6524
MM
2801/* The "set record btrace pt" command. */
2802
2803static void
2804cmd_set_record_btrace_pt (char *args, int from_tty)
2805{
2806 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2807 "by an appropriate subcommand.\n"));
2808 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2809 all_commands, gdb_stdout);
2810}
2811
2812/* The "show record btrace pt" command. */
2813
2814static void
2815cmd_show_record_btrace_pt (char *args, int from_tty)
2816{
2817 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2818}
2819
2820/* The "record bts buffer-size" show value function. */
2821
2822static void
2823show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2824 struct cmd_list_element *c,
2825 const char *value)
2826{
2827 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2828 value);
2829}
2830
2831/* The "record pt buffer-size" show value function. */
2832
2833static void
2834show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2835 struct cmd_list_element *c,
2836 const char *value)
2837{
2838 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2839 value);
2840}
2841
afedecd3
MM
2842void _initialize_record_btrace (void);
2843
2844/* Initialize btrace commands. */
2845
2846void
2847_initialize_record_btrace (void)
2848{
f4abbc16
MM
2849 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2850 _("Start branch trace recording."), &record_btrace_cmdlist,
2851 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2852 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2853
f4abbc16
MM
2854 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2855 _("\
2856Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2857The processor stores a from/to record for each branch into a cyclic buffer.\n\
2858This format may not be available on all processors."),
2859 &record_btrace_cmdlist);
2860 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2861
b20a6524
MM
2862 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2863 _("\
2864Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2865This format may not be available on all processors."),
2866 &record_btrace_cmdlist);
2867 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2868
67b5c0c1
MM
2869 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2870 _("Set record options"), &set_record_btrace_cmdlist,
2871 "set record btrace ", 0, &set_record_cmdlist);
2872
2873 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2874 _("Show record options"), &show_record_btrace_cmdlist,
2875 "show record btrace ", 0, &show_record_cmdlist);
2876
2877 add_setshow_enum_cmd ("replay-memory-access", no_class,
2878 replay_memory_access_types, &replay_memory_access, _("\
2879Set what memory accesses are allowed during replay."), _("\
2880Show what memory accesses are allowed during replay."),
2881 _("Default is READ-ONLY.\n\n\
2882The btrace record target does not trace data.\n\
2883The memory therefore corresponds to the live target and not \
2884to the current replay position.\n\n\
2885When READ-ONLY, allow accesses to read-only memory during replay.\n\
2886When READ-WRITE, allow accesses to read-only and read-write memory during \
2887replay."),
2888 NULL, cmd_show_replay_memory_access,
2889 &set_record_btrace_cmdlist,
2890 &show_record_btrace_cmdlist);
2891
d33501a5
MM
2892 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2893 _("Set record btrace bts options"),
2894 &set_record_btrace_bts_cmdlist,
2895 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2896
2897 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2898 _("Show record btrace bts options"),
2899 &show_record_btrace_bts_cmdlist,
2900 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2901
2902 add_setshow_uinteger_cmd ("buffer-size", no_class,
2903 &record_btrace_conf.bts.size,
2904 _("Set the record/replay bts buffer size."),
2905 _("Show the record/replay bts buffer size."), _("\
2906When starting recording request a trace buffer of this size. \
2907The actual buffer size may differ from the requested size. \
2908Use \"info record\" to see the actual buffer size.\n\n\
2909Bigger buffers allow longer recording but also take more time to process \
2910the recorded execution trace.\n\n\
b20a6524
MM
2911The trace buffer size may not be changed while recording."), NULL,
2912 show_record_bts_buffer_size_value,
d33501a5
MM
2913 &set_record_btrace_bts_cmdlist,
2914 &show_record_btrace_bts_cmdlist);
2915
b20a6524
MM
2916 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2917 _("Set record btrace pt options"),
2918 &set_record_btrace_pt_cmdlist,
2919 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2920
2921 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2922 _("Show record btrace pt options"),
2923 &show_record_btrace_pt_cmdlist,
2924 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2925
2926 add_setshow_uinteger_cmd ("buffer-size", no_class,
2927 &record_btrace_conf.pt.size,
2928 _("Set the record/replay pt buffer size."),
2929 _("Show the record/replay pt buffer size."), _("\
2930Bigger buffers allow longer recording but also take more time to process \
2931the recorded execution.\n\
2932The actual buffer size may differ from the requested size. Use \"info record\" \
2933to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2934 &set_record_btrace_pt_cmdlist,
2935 &show_record_btrace_pt_cmdlist);
2936
afedecd3
MM
2937 init_record_btrace_ops ();
2938 add_target (&record_btrace_ops);
0b722aec
MM
2939
2940 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2941 xcalloc, xfree);
d33501a5
MM
2942
2943 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2944 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2945}
This page took 0.425585 seconds and 4 git commands to generate.