btrace: resume all requested threads
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
afedecd3
MM
204/* The to_open method of target record-btrace. */
205
206static void
014f9477 207record_btrace_open (const char *args, int from_tty)
afedecd3
MM
208{
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
8213266a 214 record_preopen ();
afedecd3
MM
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
52834460
MM
219 if (non_stop)
220 error (_("Record btrace can't debug inferior in non-stop mode."));
221
afedecd3
MM
222 gdb_assert (record_btrace_thread_observer == NULL);
223
224 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 225 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
226 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
227 {
f4abbc16 228 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
229
230 make_cleanup (record_btrace_disable_callback, tp);
231 }
232
233 record_btrace_auto_enable ();
234
235 push_target (&record_btrace_ops);
236
70ad5bff
MM
237 record_btrace_async_inferior_event_handler
238 = create_async_event_handler (record_btrace_handle_async_inferior_event,
239 NULL);
aef92902 240 record_btrace_generating_corefile = 0;
70ad5bff 241
afedecd3
MM
242 observer_notify_record_changed (current_inferior (), 1);
243
244 discard_cleanups (disable_chain);
245}
246
247/* The to_stop_recording method of target record-btrace. */
248
249static void
c6cd7c02 250record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
251{
252 struct thread_info *tp;
253
254 DEBUG ("stop recording");
255
256 record_btrace_auto_disable ();
257
034f788c 258 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
259 if (tp->btrace.target != NULL)
260 btrace_disable (tp);
261}
262
263/* The to_close method of target record-btrace. */
264
265static void
de90e03d 266record_btrace_close (struct target_ops *self)
afedecd3 267{
568e808b
MM
268 struct thread_info *tp;
269
70ad5bff
MM
270 if (record_btrace_async_inferior_event_handler != NULL)
271 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
272
99c819ee
MM
273 /* Make sure automatic recording gets disabled even if we did not stop
274 recording before closing the record-btrace target. */
275 record_btrace_auto_disable ();
276
568e808b
MM
277 /* We should have already stopped recording.
278 Tear down btrace in case we have not. */
034f788c 279 ALL_NON_EXITED_THREADS (tp)
568e808b 280 btrace_teardown (tp);
afedecd3
MM
281}
282
b7d2e916
PA
283/* The to_async method of target record-btrace. */
284
285static void
6a3753b3 286record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 287{
6a3753b3 288 if (enable)
b7d2e916
PA
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 else
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292
6a3753b3 293 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
294}
295
d33501a5
MM
296/* Adjusts the size and returns a human readable size suffix. */
297
298static const char *
299record_btrace_adjust_size (unsigned int *size)
300{
301 unsigned int sz;
302
303 sz = *size;
304
305 if ((sz & ((1u << 30) - 1)) == 0)
306 {
307 *size = sz >> 30;
308 return "GB";
309 }
310 else if ((sz & ((1u << 20) - 1)) == 0)
311 {
312 *size = sz >> 20;
313 return "MB";
314 }
315 else if ((sz & ((1u << 10) - 1)) == 0)
316 {
317 *size = sz >> 10;
318 return "kB";
319 }
320 else
321 return "";
322}
323
324/* Print a BTS configuration. */
325
326static void
327record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
328{
329 const char *suffix;
330 unsigned int size;
331
332 size = conf->size;
333 if (size > 0)
334 {
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
337 }
338}
339
b20a6524
MM
340/* Print an Intel(R) Processor Trace configuration. */
341
342static void
343record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
344{
345 const char *suffix;
346 unsigned int size;
347
348 size = conf->size;
349 if (size > 0)
350 {
351 suffix = record_btrace_adjust_size (&size);
352 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
353 }
354}
355
d33501a5
MM
356/* Print a branch tracing configuration. */
357
358static void
359record_btrace_print_conf (const struct btrace_config *conf)
360{
361 printf_unfiltered (_("Recording format: %s.\n"),
362 btrace_format_string (conf->format));
363
364 switch (conf->format)
365 {
366 case BTRACE_FORMAT_NONE:
367 return;
368
369 case BTRACE_FORMAT_BTS:
370 record_btrace_print_bts_conf (&conf->bts);
371 return;
b20a6524
MM
372
373 case BTRACE_FORMAT_PT:
374 record_btrace_print_pt_conf (&conf->pt);
375 return;
d33501a5
MM
376 }
377
378 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
379}
380
afedecd3
MM
381/* The to_info_record method of target record-btrace. */
382
383static void
630d6a4a 384record_btrace_info (struct target_ops *self)
afedecd3
MM
385{
386 struct btrace_thread_info *btinfo;
f4abbc16 387 const struct btrace_config *conf;
afedecd3 388 struct thread_info *tp;
31fd9caa 389 unsigned int insns, calls, gaps;
afedecd3
MM
390
391 DEBUG ("info");
392
393 tp = find_thread_ptid (inferior_ptid);
394 if (tp == NULL)
395 error (_("No thread."));
396
f4abbc16
MM
397 btinfo = &tp->btrace;
398
399 conf = btrace_conf (btinfo);
400 if (conf != NULL)
d33501a5 401 record_btrace_print_conf (conf);
f4abbc16 402
afedecd3
MM
403 btrace_fetch (tp);
404
23a7fe75
MM
405 insns = 0;
406 calls = 0;
31fd9caa 407 gaps = 0;
23a7fe75 408
6e07b1d2 409 if (!btrace_is_empty (tp))
23a7fe75
MM
410 {
411 struct btrace_call_iterator call;
412 struct btrace_insn_iterator insn;
413
414 btrace_call_end (&call, btinfo);
415 btrace_call_prev (&call, 1);
5de9129b 416 calls = btrace_call_number (&call);
23a7fe75
MM
417
418 btrace_insn_end (&insn, btinfo);
31fd9caa 419
5de9129b 420 insns = btrace_insn_number (&insn);
31fd9caa
MM
421 if (insns != 0)
422 {
423 /* The last instruction does not really belong to the trace. */
424 insns -= 1;
425 }
426 else
427 {
428 unsigned int steps;
429
430 /* Skip gaps at the end. */
431 do
432 {
433 steps = btrace_insn_prev (&insn, 1);
434 if (steps == 0)
435 break;
436
437 insns = btrace_insn_number (&insn);
438 }
439 while (insns == 0);
440 }
441
442 gaps = btinfo->ngaps;
23a7fe75 443 }
afedecd3 444
31fd9caa
MM
445 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
446 "for thread %d (%s).\n"), insns, calls, gaps,
447 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
448
449 if (btrace_is_replaying (tp))
450 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
451 btrace_insn_number (btinfo->replay));
afedecd3
MM
452}
453
31fd9caa
MM
454/* Print a decode error. */
455
456static void
457btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
458 enum btrace_format format)
459{
460 const char *errstr;
461 int is_error;
462
463 errstr = _("unknown");
464 is_error = 1;
465
466 switch (format)
467 {
468 default:
469 break;
470
471 case BTRACE_FORMAT_BTS:
472 switch (errcode)
473 {
474 default:
475 break;
476
477 case BDE_BTS_OVERFLOW:
478 errstr = _("instruction overflow");
479 break;
480
481 case BDE_BTS_INSN_SIZE:
482 errstr = _("unknown instruction");
483 break;
484 }
485 break;
b20a6524
MM
486
487#if defined (HAVE_LIBIPT)
488 case BTRACE_FORMAT_PT:
489 switch (errcode)
490 {
491 case BDE_PT_USER_QUIT:
492 is_error = 0;
493 errstr = _("trace decode cancelled");
494 break;
495
496 case BDE_PT_DISABLED:
497 is_error = 0;
498 errstr = _("disabled");
499 break;
500
501 case BDE_PT_OVERFLOW:
502 is_error = 0;
503 errstr = _("overflow");
504 break;
505
506 default:
507 if (errcode < 0)
508 errstr = pt_errstr (pt_errcode (errcode));
509 break;
510 }
511 break;
512#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
513 }
514
515 ui_out_text (uiout, _("["));
516 if (is_error)
517 {
518 ui_out_text (uiout, _("decode error ("));
519 ui_out_field_int (uiout, "errcode", errcode);
520 ui_out_text (uiout, _("): "));
521 }
522 ui_out_text (uiout, errstr);
523 ui_out_text (uiout, _("]\n"));
524}
525
afedecd3
MM
526/* Print an unsigned int. */
527
528static void
529ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
530{
531 ui_out_field_fmt (uiout, fld, "%u", val);
532}
533
534/* Disassemble a section of the recorded instruction trace. */
535
536static void
23a7fe75 537btrace_insn_history (struct ui_out *uiout,
31fd9caa 538 const struct btrace_thread_info *btinfo,
23a7fe75
MM
539 const struct btrace_insn_iterator *begin,
540 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
541{
542 struct gdbarch *gdbarch;
23a7fe75 543 struct btrace_insn_iterator it;
afedecd3 544
23a7fe75
MM
545 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
546 btrace_insn_number (end));
afedecd3
MM
547
548 gdbarch = target_gdbarch ();
549
23a7fe75 550 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 551 {
23a7fe75
MM
552 const struct btrace_insn *insn;
553
554 insn = btrace_insn_get (&it);
555
31fd9caa
MM
556 /* A NULL instruction indicates a gap in the trace. */
557 if (insn == NULL)
558 {
559 const struct btrace_config *conf;
560
561 conf = btrace_conf (btinfo);
afedecd3 562
31fd9caa
MM
563 /* We have trace so we must have a configuration. */
564 gdb_assert (conf != NULL);
565
566 btrace_ui_out_decode_error (uiout, it.function->errcode,
567 conf->format);
568 }
569 else
570 {
da8c46d2
MM
571 char prefix[4];
572
573 /* We may add a speculation prefix later. We use the same space
574 that is used for the pc prefix. */
575 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
576 strncpy (prefix, pc_prefix (insn->pc), 3);
577 else
578 {
579 prefix[0] = ' ';
580 prefix[1] = ' ';
581 prefix[2] = ' ';
582 }
583 prefix[3] = 0;
584
31fd9caa
MM
585 /* Print the instruction index. */
586 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
587 ui_out_text (uiout, "\t");
588
da8c46d2
MM
589 /* Indicate speculative execution by a leading '?'. */
590 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
591 prefix[0] = '?';
592
593 /* Print the prefix; we tell gdb_disassembly below to omit it. */
594 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
595
31fd9caa
MM
596 /* Disassembly with '/m' flag may not produce the expected result.
597 See PR gdb/11833. */
da8c46d2
MM
598 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
599 1, insn->pc, insn->pc + 1);
31fd9caa 600 }
afedecd3
MM
601 }
602}
603
604/* The to_insn_history method of target record-btrace. */
605
606static void
7a6c5609 607record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
608{
609 struct btrace_thread_info *btinfo;
23a7fe75
MM
610 struct btrace_insn_history *history;
611 struct btrace_insn_iterator begin, end;
afedecd3
MM
612 struct cleanup *uiout_cleanup;
613 struct ui_out *uiout;
23a7fe75 614 unsigned int context, covered;
afedecd3
MM
615
616 uiout = current_uiout;
617 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
618 "insn history");
afedecd3 619 context = abs (size);
afedecd3
MM
620 if (context == 0)
621 error (_("Bad record instruction-history-size."));
622
23a7fe75
MM
623 btinfo = require_btrace ();
624 history = btinfo->insn_history;
625 if (history == NULL)
afedecd3 626 {
07bbe694 627 struct btrace_insn_iterator *replay;
afedecd3 628
23a7fe75 629 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 630
07bbe694
MM
631 /* If we're replaying, we start at the replay position. Otherwise, we
632 start at the tail of the trace. */
633 replay = btinfo->replay;
634 if (replay != NULL)
635 begin = *replay;
636 else
637 btrace_insn_end (&begin, btinfo);
638
639 /* We start from here and expand in the requested direction. Then we
640 expand in the other direction, as well, to fill up any remaining
641 context. */
642 end = begin;
643 if (size < 0)
644 {
645 /* We want the current position covered, as well. */
646 covered = btrace_insn_next (&end, 1);
647 covered += btrace_insn_prev (&begin, context - covered);
648 covered += btrace_insn_next (&end, context - covered);
649 }
650 else
651 {
652 covered = btrace_insn_next (&end, context);
653 covered += btrace_insn_prev (&begin, context - covered);
654 }
afedecd3
MM
655 }
656 else
657 {
23a7fe75
MM
658 begin = history->begin;
659 end = history->end;
afedecd3 660
23a7fe75
MM
661 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
662 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 663
23a7fe75
MM
664 if (size < 0)
665 {
666 end = begin;
667 covered = btrace_insn_prev (&begin, context);
668 }
669 else
670 {
671 begin = end;
672 covered = btrace_insn_next (&end, context);
673 }
afedecd3
MM
674 }
675
23a7fe75 676 if (covered > 0)
31fd9caa 677 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
678 else
679 {
680 if (size < 0)
681 printf_unfiltered (_("At the start of the branch trace record.\n"));
682 else
683 printf_unfiltered (_("At the end of the branch trace record.\n"));
684 }
afedecd3 685
23a7fe75 686 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
687 do_cleanups (uiout_cleanup);
688}
689
690/* The to_insn_history_range method of target record-btrace. */
691
692static void
4e99c6b7
TT
693record_btrace_insn_history_range (struct target_ops *self,
694 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
695{
696 struct btrace_thread_info *btinfo;
23a7fe75
MM
697 struct btrace_insn_history *history;
698 struct btrace_insn_iterator begin, end;
afedecd3
MM
699 struct cleanup *uiout_cleanup;
700 struct ui_out *uiout;
23a7fe75
MM
701 unsigned int low, high;
702 int found;
afedecd3
MM
703
704 uiout = current_uiout;
705 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
706 "insn history");
23a7fe75
MM
707 low = from;
708 high = to;
afedecd3 709
23a7fe75 710 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
711
712 /* Check for wrap-arounds. */
23a7fe75 713 if (low != from || high != to)
afedecd3
MM
714 error (_("Bad range."));
715
0688d04e 716 if (high < low)
afedecd3
MM
717 error (_("Bad range."));
718
23a7fe75 719 btinfo = require_btrace ();
afedecd3 720
23a7fe75
MM
721 found = btrace_find_insn_by_number (&begin, btinfo, low);
722 if (found == 0)
723 error (_("Range out of bounds."));
afedecd3 724
23a7fe75
MM
725 found = btrace_find_insn_by_number (&end, btinfo, high);
726 if (found == 0)
0688d04e
MM
727 {
728 /* Silently truncate the range. */
729 btrace_insn_end (&end, btinfo);
730 }
731 else
732 {
733 /* We want both begin and end to be inclusive. */
734 btrace_insn_next (&end, 1);
735 }
afedecd3 736
31fd9caa 737 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 738 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
739
740 do_cleanups (uiout_cleanup);
741}
742
743/* The to_insn_history_from method of target record-btrace. */
744
745static void
9abc3ff3
TT
746record_btrace_insn_history_from (struct target_ops *self,
747 ULONGEST from, int size, int flags)
afedecd3
MM
748{
749 ULONGEST begin, end, context;
750
751 context = abs (size);
0688d04e
MM
752 if (context == 0)
753 error (_("Bad record instruction-history-size."));
afedecd3
MM
754
755 if (size < 0)
756 {
757 end = from;
758
759 if (from < context)
760 begin = 0;
761 else
0688d04e 762 begin = from - context + 1;
afedecd3
MM
763 }
764 else
765 {
766 begin = from;
0688d04e 767 end = from + context - 1;
afedecd3
MM
768
769 /* Check for wrap-around. */
770 if (end < begin)
771 end = ULONGEST_MAX;
772 }
773
4e99c6b7 774 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
775}
776
777/* Print the instruction number range for a function call history line. */
778
779static void
23a7fe75
MM
780btrace_call_history_insn_range (struct ui_out *uiout,
781 const struct btrace_function *bfun)
afedecd3 782{
7acbe133
MM
783 unsigned int begin, end, size;
784
785 size = VEC_length (btrace_insn_s, bfun->insn);
786 gdb_assert (size > 0);
afedecd3 787
23a7fe75 788 begin = bfun->insn_offset;
7acbe133 789 end = begin + size - 1;
afedecd3 790
23a7fe75 791 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 792 ui_out_text (uiout, ",");
23a7fe75 793 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
794}
795
ce0dfbea
MM
796/* Compute the lowest and highest source line for the instructions in BFUN
797 and return them in PBEGIN and PEND.
798 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
799 result from inlining or macro expansion. */
800
801static void
802btrace_compute_src_line_range (const struct btrace_function *bfun,
803 int *pbegin, int *pend)
804{
805 struct btrace_insn *insn;
806 struct symtab *symtab;
807 struct symbol *sym;
808 unsigned int idx;
809 int begin, end;
810
811 begin = INT_MAX;
812 end = INT_MIN;
813
814 sym = bfun->sym;
815 if (sym == NULL)
816 goto out;
817
818 symtab = symbol_symtab (sym);
819
820 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
821 {
822 struct symtab_and_line sal;
823
824 sal = find_pc_line (insn->pc, 0);
825 if (sal.symtab != symtab || sal.line == 0)
826 continue;
827
828 begin = min (begin, sal.line);
829 end = max (end, sal.line);
830 }
831
832 out:
833 *pbegin = begin;
834 *pend = end;
835}
836
afedecd3
MM
837/* Print the source line information for a function call history line. */
838
839static void
23a7fe75
MM
840btrace_call_history_src_line (struct ui_out *uiout,
841 const struct btrace_function *bfun)
afedecd3
MM
842{
843 struct symbol *sym;
23a7fe75 844 int begin, end;
afedecd3
MM
845
846 sym = bfun->sym;
847 if (sym == NULL)
848 return;
849
850 ui_out_field_string (uiout, "file",
08be3fe3 851 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 852
ce0dfbea 853 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 854 if (end < begin)
afedecd3
MM
855 return;
856
857 ui_out_text (uiout, ":");
23a7fe75 858 ui_out_field_int (uiout, "min line", begin);
afedecd3 859
23a7fe75 860 if (end == begin)
afedecd3
MM
861 return;
862
8710b709 863 ui_out_text (uiout, ",");
23a7fe75 864 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
865}
866
0b722aec
MM
867/* Get the name of a branch trace function. */
868
869static const char *
870btrace_get_bfun_name (const struct btrace_function *bfun)
871{
872 struct minimal_symbol *msym;
873 struct symbol *sym;
874
875 if (bfun == NULL)
876 return "??";
877
878 msym = bfun->msym;
879 sym = bfun->sym;
880
881 if (sym != NULL)
882 return SYMBOL_PRINT_NAME (sym);
883 else if (msym != NULL)
efd66ac6 884 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
885 else
886 return "??";
887}
888
afedecd3
MM
889/* Disassemble a section of the recorded function trace. */
890
891static void
23a7fe75 892btrace_call_history (struct ui_out *uiout,
8710b709 893 const struct btrace_thread_info *btinfo,
23a7fe75
MM
894 const struct btrace_call_iterator *begin,
895 const struct btrace_call_iterator *end,
afedecd3
MM
896 enum record_print_flag flags)
897{
23a7fe75 898 struct btrace_call_iterator it;
afedecd3 899
23a7fe75
MM
900 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
901 btrace_call_number (end));
afedecd3 902
23a7fe75 903 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 904 {
23a7fe75
MM
905 const struct btrace_function *bfun;
906 struct minimal_symbol *msym;
907 struct symbol *sym;
908
909 bfun = btrace_call_get (&it);
23a7fe75 910 sym = bfun->sym;
0b722aec 911 msym = bfun->msym;
23a7fe75 912
afedecd3 913 /* Print the function index. */
23a7fe75 914 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
915 ui_out_text (uiout, "\t");
916
31fd9caa
MM
917 /* Indicate gaps in the trace. */
918 if (bfun->errcode != 0)
919 {
920 const struct btrace_config *conf;
921
922 conf = btrace_conf (btinfo);
923
924 /* We have trace so we must have a configuration. */
925 gdb_assert (conf != NULL);
926
927 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
928
929 continue;
930 }
931
8710b709
MM
932 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
933 {
934 int level = bfun->level + btinfo->level, i;
935
936 for (i = 0; i < level; ++i)
937 ui_out_text (uiout, " ");
938 }
939
940 if (sym != NULL)
941 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
942 else if (msym != NULL)
efd66ac6 943 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
944 else if (!ui_out_is_mi_like_p (uiout))
945 ui_out_field_string (uiout, "function", "??");
946
1e038f67 947 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 948 {
8710b709 949 ui_out_text (uiout, _("\tinst "));
23a7fe75 950 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
951 }
952
1e038f67 953 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 954 {
8710b709 955 ui_out_text (uiout, _("\tat "));
23a7fe75 956 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
957 }
958
afedecd3
MM
959 ui_out_text (uiout, "\n");
960 }
961}
962
963/* The to_call_history method of target record-btrace. */
964
965static void
5df2fcba 966record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
967{
968 struct btrace_thread_info *btinfo;
23a7fe75
MM
969 struct btrace_call_history *history;
970 struct btrace_call_iterator begin, end;
afedecd3
MM
971 struct cleanup *uiout_cleanup;
972 struct ui_out *uiout;
23a7fe75 973 unsigned int context, covered;
afedecd3
MM
974
975 uiout = current_uiout;
976 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
977 "insn history");
afedecd3 978 context = abs (size);
afedecd3
MM
979 if (context == 0)
980 error (_("Bad record function-call-history-size."));
981
23a7fe75
MM
982 btinfo = require_btrace ();
983 history = btinfo->call_history;
984 if (history == NULL)
afedecd3 985 {
07bbe694 986 struct btrace_insn_iterator *replay;
afedecd3 987
23a7fe75 988 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 989
07bbe694
MM
990 /* If we're replaying, we start at the replay position. Otherwise, we
991 start at the tail of the trace. */
992 replay = btinfo->replay;
993 if (replay != NULL)
994 {
995 begin.function = replay->function;
996 begin.btinfo = btinfo;
997 }
998 else
999 btrace_call_end (&begin, btinfo);
1000
1001 /* We start from here and expand in the requested direction. Then we
1002 expand in the other direction, as well, to fill up any remaining
1003 context. */
1004 end = begin;
1005 if (size < 0)
1006 {
1007 /* We want the current position covered, as well. */
1008 covered = btrace_call_next (&end, 1);
1009 covered += btrace_call_prev (&begin, context - covered);
1010 covered += btrace_call_next (&end, context - covered);
1011 }
1012 else
1013 {
1014 covered = btrace_call_next (&end, context);
1015 covered += btrace_call_prev (&begin, context- covered);
1016 }
afedecd3
MM
1017 }
1018 else
1019 {
23a7fe75
MM
1020 begin = history->begin;
1021 end = history->end;
afedecd3 1022
23a7fe75
MM
1023 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1024 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1025
23a7fe75
MM
1026 if (size < 0)
1027 {
1028 end = begin;
1029 covered = btrace_call_prev (&begin, context);
1030 }
1031 else
1032 {
1033 begin = end;
1034 covered = btrace_call_next (&end, context);
1035 }
afedecd3
MM
1036 }
1037
23a7fe75 1038 if (covered > 0)
8710b709 1039 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1040 else
1041 {
1042 if (size < 0)
1043 printf_unfiltered (_("At the start of the branch trace record.\n"));
1044 else
1045 printf_unfiltered (_("At the end of the branch trace record.\n"));
1046 }
afedecd3 1047
23a7fe75 1048 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1049 do_cleanups (uiout_cleanup);
1050}
1051
1052/* The to_call_history_range method of target record-btrace. */
1053
1054static void
f0d960ea
TT
1055record_btrace_call_history_range (struct target_ops *self,
1056 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1057{
1058 struct btrace_thread_info *btinfo;
23a7fe75
MM
1059 struct btrace_call_history *history;
1060 struct btrace_call_iterator begin, end;
afedecd3
MM
1061 struct cleanup *uiout_cleanup;
1062 struct ui_out *uiout;
23a7fe75
MM
1063 unsigned int low, high;
1064 int found;
afedecd3
MM
1065
1066 uiout = current_uiout;
1067 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1068 "func history");
23a7fe75
MM
1069 low = from;
1070 high = to;
afedecd3 1071
23a7fe75 1072 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1073
1074 /* Check for wrap-arounds. */
23a7fe75 1075 if (low != from || high != to)
afedecd3
MM
1076 error (_("Bad range."));
1077
0688d04e 1078 if (high < low)
afedecd3
MM
1079 error (_("Bad range."));
1080
23a7fe75 1081 btinfo = require_btrace ();
afedecd3 1082
23a7fe75
MM
1083 found = btrace_find_call_by_number (&begin, btinfo, low);
1084 if (found == 0)
1085 error (_("Range out of bounds."));
afedecd3 1086
23a7fe75
MM
1087 found = btrace_find_call_by_number (&end, btinfo, high);
1088 if (found == 0)
0688d04e
MM
1089 {
1090 /* Silently truncate the range. */
1091 btrace_call_end (&end, btinfo);
1092 }
1093 else
1094 {
1095 /* We want both begin and end to be inclusive. */
1096 btrace_call_next (&end, 1);
1097 }
afedecd3 1098
8710b709 1099 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1100 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1101
1102 do_cleanups (uiout_cleanup);
1103}
1104
1105/* The to_call_history_from method of target record-btrace. */
1106
1107static void
ec0aea04
TT
1108record_btrace_call_history_from (struct target_ops *self,
1109 ULONGEST from, int size, int flags)
afedecd3
MM
1110{
1111 ULONGEST begin, end, context;
1112
1113 context = abs (size);
0688d04e
MM
1114 if (context == 0)
1115 error (_("Bad record function-call-history-size."));
afedecd3
MM
1116
1117 if (size < 0)
1118 {
1119 end = from;
1120
1121 if (from < context)
1122 begin = 0;
1123 else
0688d04e 1124 begin = from - context + 1;
afedecd3
MM
1125 }
1126 else
1127 {
1128 begin = from;
0688d04e 1129 end = from + context - 1;
afedecd3
MM
1130
1131 /* Check for wrap-around. */
1132 if (end < begin)
1133 end = ULONGEST_MAX;
1134 }
1135
f0d960ea 1136 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1137}
1138
07bbe694
MM
1139/* The to_record_is_replaying method of target record-btrace. */
1140
1141static int
1c63c994 1142record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1143{
1144 struct thread_info *tp;
1145
034f788c 1146 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1147 if (btrace_is_replaying (tp))
1148 return 1;
1149
1150 return 0;
1151}
1152
633785ff
MM
1153/* The to_xfer_partial method of target record-btrace. */
1154
9b409511 1155static enum target_xfer_status
633785ff
MM
1156record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1157 const char *annex, gdb_byte *readbuf,
1158 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1159 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1160{
1161 struct target_ops *t;
1162
1163 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1164 if (replay_memory_access == replay_memory_access_read_only
aef92902 1165 && !record_btrace_generating_corefile
67b5c0c1 1166 && record_btrace_is_replaying (ops))
633785ff
MM
1167 {
1168 switch (object)
1169 {
1170 case TARGET_OBJECT_MEMORY:
1171 {
1172 struct target_section *section;
1173
1174 /* We do not allow writing memory in general. */
1175 if (writebuf != NULL)
9b409511
YQ
1176 {
1177 *xfered_len = len;
bc113b4e 1178 return TARGET_XFER_UNAVAILABLE;
9b409511 1179 }
633785ff
MM
1180
1181 /* We allow reading readonly memory. */
1182 section = target_section_by_addr (ops, offset);
1183 if (section != NULL)
1184 {
1185 /* Check if the section we found is readonly. */
1186 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1187 section->the_bfd_section)
1188 & SEC_READONLY) != 0)
1189 {
1190 /* Truncate the request to fit into this section. */
1191 len = min (len, section->endaddr - offset);
1192 break;
1193 }
1194 }
1195
9b409511 1196 *xfered_len = len;
bc113b4e 1197 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1198 }
1199 }
1200 }
1201
1202 /* Forward the request. */
e75fdfca
TT
1203 ops = ops->beneath;
1204 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1205 offset, len, xfered_len);
633785ff
MM
1206}
1207
1208/* The to_insert_breakpoint method of target record-btrace. */
1209
1210static int
1211record_btrace_insert_breakpoint (struct target_ops *ops,
1212 struct gdbarch *gdbarch,
1213 struct bp_target_info *bp_tgt)
1214{
67b5c0c1
MM
1215 const char *old;
1216 int ret;
633785ff
MM
1217
1218 /* Inserting breakpoints requires accessing memory. Allow it for the
1219 duration of this function. */
67b5c0c1
MM
1220 old = replay_memory_access;
1221 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1222
1223 ret = 0;
492d29ea
PA
1224 TRY
1225 {
1226 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1227 }
492d29ea
PA
1228 CATCH (except, RETURN_MASK_ALL)
1229 {
6c63c96a 1230 replay_memory_access = old;
492d29ea
PA
1231 throw_exception (except);
1232 }
1233 END_CATCH
6c63c96a 1234 replay_memory_access = old;
633785ff
MM
1235
1236 return ret;
1237}
1238
1239/* The to_remove_breakpoint method of target record-btrace. */
1240
1241static int
1242record_btrace_remove_breakpoint (struct target_ops *ops,
1243 struct gdbarch *gdbarch,
1244 struct bp_target_info *bp_tgt)
1245{
67b5c0c1
MM
1246 const char *old;
1247 int ret;
633785ff
MM
1248
1249 /* Removing breakpoints requires accessing memory. Allow it for the
1250 duration of this function. */
67b5c0c1
MM
1251 old = replay_memory_access;
1252 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1253
1254 ret = 0;
492d29ea
PA
1255 TRY
1256 {
1257 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1258 }
492d29ea
PA
1259 CATCH (except, RETURN_MASK_ALL)
1260 {
6c63c96a 1261 replay_memory_access = old;
492d29ea
PA
1262 throw_exception (except);
1263 }
1264 END_CATCH
6c63c96a 1265 replay_memory_access = old;
633785ff
MM
1266
1267 return ret;
1268}
1269
1f3ef581
MM
1270/* The to_fetch_registers method of target record-btrace. */
1271
1272static void
1273record_btrace_fetch_registers (struct target_ops *ops,
1274 struct regcache *regcache, int regno)
1275{
1276 struct btrace_insn_iterator *replay;
1277 struct thread_info *tp;
1278
1279 tp = find_thread_ptid (inferior_ptid);
1280 gdb_assert (tp != NULL);
1281
1282 replay = tp->btrace.replay;
aef92902 1283 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1284 {
1285 const struct btrace_insn *insn;
1286 struct gdbarch *gdbarch;
1287 int pcreg;
1288
1289 gdbarch = get_regcache_arch (regcache);
1290 pcreg = gdbarch_pc_regnum (gdbarch);
1291 if (pcreg < 0)
1292 return;
1293
1294 /* We can only provide the PC register. */
1295 if (regno >= 0 && regno != pcreg)
1296 return;
1297
1298 insn = btrace_insn_get (replay);
1299 gdb_assert (insn != NULL);
1300
1301 regcache_raw_supply (regcache, regno, &insn->pc);
1302 }
1303 else
1304 {
e75fdfca 1305 struct target_ops *t = ops->beneath;
1f3ef581 1306
e75fdfca 1307 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1308 }
1309}
1310
1311/* The to_store_registers method of target record-btrace. */
1312
1313static void
1314record_btrace_store_registers (struct target_ops *ops,
1315 struct regcache *regcache, int regno)
1316{
1317 struct target_ops *t;
1318
aef92902 1319 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1320 error (_("This record target does not allow writing registers."));
1321
1322 gdb_assert (may_write_registers != 0);
1323
e75fdfca
TT
1324 t = ops->beneath;
1325 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1326}
1327
1328/* The to_prepare_to_store method of target record-btrace. */
1329
1330static void
1331record_btrace_prepare_to_store (struct target_ops *ops,
1332 struct regcache *regcache)
1333{
1334 struct target_ops *t;
1335
aef92902 1336 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1337 return;
1338
e75fdfca
TT
1339 t = ops->beneath;
1340 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1341}
1342
0b722aec
MM
1343/* The branch trace frame cache. */
1344
1345struct btrace_frame_cache
1346{
1347 /* The thread. */
1348 struct thread_info *tp;
1349
1350 /* The frame info. */
1351 struct frame_info *frame;
1352
1353 /* The branch trace function segment. */
1354 const struct btrace_function *bfun;
1355};
1356
1357/* A struct btrace_frame_cache hash table indexed by NEXT. */
1358
1359static htab_t bfcache;
1360
1361/* hash_f for htab_create_alloc of bfcache. */
1362
1363static hashval_t
1364bfcache_hash (const void *arg)
1365{
1366 const struct btrace_frame_cache *cache = arg;
1367
1368 return htab_hash_pointer (cache->frame);
1369}
1370
1371/* eq_f for htab_create_alloc of bfcache. */
1372
1373static int
1374bfcache_eq (const void *arg1, const void *arg2)
1375{
1376 const struct btrace_frame_cache *cache1 = arg1;
1377 const struct btrace_frame_cache *cache2 = arg2;
1378
1379 return cache1->frame == cache2->frame;
1380}
1381
1382/* Create a new btrace frame cache. */
1383
1384static struct btrace_frame_cache *
1385bfcache_new (struct frame_info *frame)
1386{
1387 struct btrace_frame_cache *cache;
1388 void **slot;
1389
1390 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1391 cache->frame = frame;
1392
1393 slot = htab_find_slot (bfcache, cache, INSERT);
1394 gdb_assert (*slot == NULL);
1395 *slot = cache;
1396
1397 return cache;
1398}
1399
1400/* Extract the branch trace function from a branch trace frame. */
1401
1402static const struct btrace_function *
1403btrace_get_frame_function (struct frame_info *frame)
1404{
1405 const struct btrace_frame_cache *cache;
1406 const struct btrace_function *bfun;
1407 struct btrace_frame_cache pattern;
1408 void **slot;
1409
1410 pattern.frame = frame;
1411
1412 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1413 if (slot == NULL)
1414 return NULL;
1415
1416 cache = *slot;
1417 return cache->bfun;
1418}
1419
cecac1ab
MM
1420/* Implement stop_reason method for record_btrace_frame_unwind. */
1421
1422static enum unwind_stop_reason
1423record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1424 void **this_cache)
1425{
0b722aec
MM
1426 const struct btrace_frame_cache *cache;
1427 const struct btrace_function *bfun;
1428
1429 cache = *this_cache;
1430 bfun = cache->bfun;
1431 gdb_assert (bfun != NULL);
1432
1433 if (bfun->up == NULL)
1434 return UNWIND_UNAVAILABLE;
1435
1436 return UNWIND_NO_REASON;
cecac1ab
MM
1437}
1438
1439/* Implement this_id method for record_btrace_frame_unwind. */
1440
1441static void
1442record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1443 struct frame_id *this_id)
1444{
0b722aec
MM
1445 const struct btrace_frame_cache *cache;
1446 const struct btrace_function *bfun;
1447 CORE_ADDR code, special;
1448
1449 cache = *this_cache;
1450
1451 bfun = cache->bfun;
1452 gdb_assert (bfun != NULL);
1453
1454 while (bfun->segment.prev != NULL)
1455 bfun = bfun->segment.prev;
1456
1457 code = get_frame_func (this_frame);
1458 special = bfun->number;
1459
1460 *this_id = frame_id_build_unavailable_stack_special (code, special);
1461
1462 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1463 btrace_get_bfun_name (cache->bfun),
1464 core_addr_to_string_nz (this_id->code_addr),
1465 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1466}
1467
1468/* Implement prev_register method for record_btrace_frame_unwind. */
1469
1470static struct value *
1471record_btrace_frame_prev_register (struct frame_info *this_frame,
1472 void **this_cache,
1473 int regnum)
1474{
0b722aec
MM
1475 const struct btrace_frame_cache *cache;
1476 const struct btrace_function *bfun, *caller;
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1479 CORE_ADDR pc;
1480 int pcreg;
1481
1482 gdbarch = get_frame_arch (this_frame);
1483 pcreg = gdbarch_pc_regnum (gdbarch);
1484 if (pcreg < 0 || regnum != pcreg)
1485 throw_error (NOT_AVAILABLE_ERROR,
1486 _("Registers are not available in btrace record history"));
1487
1488 cache = *this_cache;
1489 bfun = cache->bfun;
1490 gdb_assert (bfun != NULL);
1491
1492 caller = bfun->up;
1493 if (caller == NULL)
1494 throw_error (NOT_AVAILABLE_ERROR,
1495 _("No caller in btrace record history"));
1496
1497 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1498 {
1499 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1500 pc = insn->pc;
1501 }
1502 else
1503 {
1504 insn = VEC_last (btrace_insn_s, caller->insn);
1505 pc = insn->pc;
1506
1507 pc += gdb_insn_length (gdbarch, pc);
1508 }
1509
1510 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1511 btrace_get_bfun_name (bfun), bfun->level,
1512 core_addr_to_string_nz (pc));
1513
1514 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1515}
1516
1517/* Implement sniffer method for record_btrace_frame_unwind. */
1518
1519static int
1520record_btrace_frame_sniffer (const struct frame_unwind *self,
1521 struct frame_info *this_frame,
1522 void **this_cache)
1523{
0b722aec
MM
1524 const struct btrace_function *bfun;
1525 struct btrace_frame_cache *cache;
cecac1ab 1526 struct thread_info *tp;
0b722aec 1527 struct frame_info *next;
cecac1ab
MM
1528
1529 /* THIS_FRAME does not contain a reference to its thread. */
1530 tp = find_thread_ptid (inferior_ptid);
1531 gdb_assert (tp != NULL);
1532
0b722aec
MM
1533 bfun = NULL;
1534 next = get_next_frame (this_frame);
1535 if (next == NULL)
1536 {
1537 const struct btrace_insn_iterator *replay;
1538
1539 replay = tp->btrace.replay;
1540 if (replay != NULL)
1541 bfun = replay->function;
1542 }
1543 else
1544 {
1545 const struct btrace_function *callee;
1546
1547 callee = btrace_get_frame_function (next);
1548 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1549 bfun = callee->up;
1550 }
1551
1552 if (bfun == NULL)
1553 return 0;
1554
1555 DEBUG ("[frame] sniffed frame for %s on level %d",
1556 btrace_get_bfun_name (bfun), bfun->level);
1557
1558 /* This is our frame. Initialize the frame cache. */
1559 cache = bfcache_new (this_frame);
1560 cache->tp = tp;
1561 cache->bfun = bfun;
1562
1563 *this_cache = cache;
1564 return 1;
1565}
1566
1567/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1568
1569static int
1570record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1571 struct frame_info *this_frame,
1572 void **this_cache)
1573{
1574 const struct btrace_function *bfun, *callee;
1575 struct btrace_frame_cache *cache;
1576 struct frame_info *next;
1577
1578 next = get_next_frame (this_frame);
1579 if (next == NULL)
1580 return 0;
1581
1582 callee = btrace_get_frame_function (next);
1583 if (callee == NULL)
1584 return 0;
1585
1586 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1587 return 0;
1588
1589 bfun = callee->up;
1590 if (bfun == NULL)
1591 return 0;
1592
1593 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1594 btrace_get_bfun_name (bfun), bfun->level);
1595
1596 /* This is our frame. Initialize the frame cache. */
1597 cache = bfcache_new (this_frame);
1598 cache->tp = find_thread_ptid (inferior_ptid);
1599 cache->bfun = bfun;
1600
1601 *this_cache = cache;
1602 return 1;
1603}
1604
1605static void
1606record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1607{
1608 struct btrace_frame_cache *cache;
1609 void **slot;
1610
1611 cache = this_cache;
1612
1613 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1614 gdb_assert (slot != NULL);
1615
1616 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1617}
1618
1619/* btrace recording does not store previous memory content, neither the stack
1620 frames content. Any unwinding would return errorneous results as the stack
1621 contents no longer matches the changed PC value restored from history.
1622 Therefore this unwinder reports any possibly unwound registers as
1623 <unavailable>. */
1624
0b722aec 1625const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1626{
1627 NORMAL_FRAME,
1628 record_btrace_frame_unwind_stop_reason,
1629 record_btrace_frame_this_id,
1630 record_btrace_frame_prev_register,
1631 NULL,
0b722aec
MM
1632 record_btrace_frame_sniffer,
1633 record_btrace_frame_dealloc_cache
1634};
1635
1636const struct frame_unwind record_btrace_tailcall_frame_unwind =
1637{
1638 TAILCALL_FRAME,
1639 record_btrace_frame_unwind_stop_reason,
1640 record_btrace_frame_this_id,
1641 record_btrace_frame_prev_register,
1642 NULL,
1643 record_btrace_tailcall_frame_sniffer,
1644 record_btrace_frame_dealloc_cache
cecac1ab 1645};
b2f4cfde 1646
ac01945b
TT
1647/* Implement the to_get_unwinder method. */
1648
1649static const struct frame_unwind *
1650record_btrace_to_get_unwinder (struct target_ops *self)
1651{
1652 return &record_btrace_frame_unwind;
1653}
1654
1655/* Implement the to_get_tailcall_unwinder method. */
1656
1657static const struct frame_unwind *
1658record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1659{
1660 return &record_btrace_tailcall_frame_unwind;
1661}
1662
987e68b1
MM
1663/* Return a human-readable string for FLAG. */
1664
1665static const char *
1666btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1667{
1668 switch (flag)
1669 {
1670 case BTHR_STEP:
1671 return "step";
1672
1673 case BTHR_RSTEP:
1674 return "reverse-step";
1675
1676 case BTHR_CONT:
1677 return "cont";
1678
1679 case BTHR_RCONT:
1680 return "reverse-cont";
1681
1682 case BTHR_STOP:
1683 return "stop";
1684 }
1685
1686 return "<invalid>";
1687}
1688
52834460
MM
1689/* Indicate that TP should be resumed according to FLAG. */
1690
1691static void
1692record_btrace_resume_thread (struct thread_info *tp,
1693 enum btrace_thread_flag flag)
1694{
1695 struct btrace_thread_info *btinfo;
1696
987e68b1
MM
1697 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1698 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1699
1700 btinfo = &tp->btrace;
1701
52834460
MM
1702 /* Fetch the latest branch trace. */
1703 btrace_fetch (tp);
1704
0ca912df
MM
1705 /* A resume request overwrites a preceding resume or stop request. */
1706 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1707 btinfo->flags |= flag;
1708}
1709
52834460
MM
1710/* Start replaying a thread. */
1711
1712static struct btrace_insn_iterator *
1713record_btrace_start_replaying (struct thread_info *tp)
1714{
52834460
MM
1715 struct btrace_insn_iterator *replay;
1716 struct btrace_thread_info *btinfo;
1717 int executing;
1718
1719 btinfo = &tp->btrace;
1720 replay = NULL;
1721
1722 /* We can't start replaying without trace. */
1723 if (btinfo->begin == NULL)
1724 return NULL;
1725
1726 /* Clear the executing flag to allow changes to the current frame.
1727 We are not actually running, yet. We just started a reverse execution
1728 command or a record goto command.
1729 For the latter, EXECUTING is false and this has no effect.
1730 For the former, EXECUTING is true and we're in to_wait, about to
1731 move the thread. Since we need to recompute the stack, we temporarily
1732 set EXECUTING to flase. */
1733 executing = is_executing (tp->ptid);
1734 set_executing (tp->ptid, 0);
1735
1736 /* GDB stores the current frame_id when stepping in order to detects steps
1737 into subroutines.
1738 Since frames are computed differently when we're replaying, we need to
1739 recompute those stored frames and fix them up so we can still detect
1740 subroutines after we started replaying. */
492d29ea 1741 TRY
52834460
MM
1742 {
1743 struct frame_info *frame;
1744 struct frame_id frame_id;
1745 int upd_step_frame_id, upd_step_stack_frame_id;
1746
1747 /* The current frame without replaying - computed via normal unwind. */
1748 frame = get_current_frame ();
1749 frame_id = get_frame_id (frame);
1750
1751 /* Check if we need to update any stepping-related frame id's. */
1752 upd_step_frame_id = frame_id_eq (frame_id,
1753 tp->control.step_frame_id);
1754 upd_step_stack_frame_id = frame_id_eq (frame_id,
1755 tp->control.step_stack_frame_id);
1756
1757 /* We start replaying at the end of the branch trace. This corresponds
1758 to the current instruction. */
8d749320 1759 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1760 btrace_insn_end (replay, btinfo);
1761
31fd9caa
MM
1762 /* Skip gaps at the end of the trace. */
1763 while (btrace_insn_get (replay) == NULL)
1764 {
1765 unsigned int steps;
1766
1767 steps = btrace_insn_prev (replay, 1);
1768 if (steps == 0)
1769 error (_("No trace."));
1770 }
1771
52834460
MM
1772 /* We're not replaying, yet. */
1773 gdb_assert (btinfo->replay == NULL);
1774 btinfo->replay = replay;
1775
1776 /* Make sure we're not using any stale registers. */
1777 registers_changed_ptid (tp->ptid);
1778
1779 /* The current frame with replaying - computed via btrace unwind. */
1780 frame = get_current_frame ();
1781 frame_id = get_frame_id (frame);
1782
1783 /* Replace stepping related frames where necessary. */
1784 if (upd_step_frame_id)
1785 tp->control.step_frame_id = frame_id;
1786 if (upd_step_stack_frame_id)
1787 tp->control.step_stack_frame_id = frame_id;
1788 }
492d29ea 1789 CATCH (except, RETURN_MASK_ALL)
52834460 1790 {
6c63c96a
PA
1791 /* Restore the previous execution state. */
1792 set_executing (tp->ptid, executing);
1793
52834460
MM
1794 xfree (btinfo->replay);
1795 btinfo->replay = NULL;
1796
1797 registers_changed_ptid (tp->ptid);
1798
1799 throw_exception (except);
1800 }
492d29ea 1801 END_CATCH
52834460 1802
6c63c96a
PA
1803 /* Restore the previous execution state. */
1804 set_executing (tp->ptid, executing);
1805
52834460
MM
1806 return replay;
1807}
1808
1809/* Stop replaying a thread. */
1810
1811static void
1812record_btrace_stop_replaying (struct thread_info *tp)
1813{
1814 struct btrace_thread_info *btinfo;
1815
1816 btinfo = &tp->btrace;
1817
1818 xfree (btinfo->replay);
1819 btinfo->replay = NULL;
1820
1821 /* Make sure we're not leaving any stale registers. */
1822 registers_changed_ptid (tp->ptid);
1823}
1824
e3cfc1c7
MM
1825/* Stop replaying TP if it is at the end of its execution history. */
1826
1827static void
1828record_btrace_stop_replaying_at_end (struct thread_info *tp)
1829{
1830 struct btrace_insn_iterator *replay, end;
1831 struct btrace_thread_info *btinfo;
1832
1833 btinfo = &tp->btrace;
1834 replay = btinfo->replay;
1835
1836 if (replay == NULL)
1837 return;
1838
1839 btrace_insn_end (&end, btinfo);
1840
1841 if (btrace_insn_cmp (replay, &end) == 0)
1842 record_btrace_stop_replaying (tp);
1843}
1844
b2f4cfde
MM
1845/* The to_resume method of target record-btrace. */
1846
1847static void
1848record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1849 enum gdb_signal signal)
1850{
0ca912df 1851 struct thread_info *tp;
52834460 1852 enum btrace_thread_flag flag;
0ca912df 1853 ptid_t orig_ptid;
52834460 1854
987e68b1
MM
1855 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1856 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1857 step ? "step" : "cont");
52834460 1858
0ca912df
MM
1859 orig_ptid = ptid;
1860
1861 /* Store the execution direction of the last resume.
1862
1863 If there is more than one to_resume call, we have to rely on infrun
1864 to not change the execution direction in-between. */
70ad5bff
MM
1865 record_btrace_resume_exec_dir = execution_direction;
1866
0ca912df
MM
1867 /* For all-stop targets... */
1868 if (!target_is_non_stop_p ())
1869 {
1870 /* ...we pick the current thread when asked to resume an entire process
1871 or everything. */
1872 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1873 ptid = inferior_ptid;
1874
1875 tp = find_thread_ptid (ptid);
1876 if (tp == NULL)
1877 error (_("Cannot find thread to resume."));
1878
1879 /* ...and we stop replaying other threads if the thread to resume is not
1880 replaying. */
1881 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1882 ALL_NON_EXITED_THREADS (tp)
1883 record_btrace_stop_replaying (tp);
1884 }
52834460 1885
0ca912df 1886 /* As long as we're not replaying, just forward the request.
52834460 1887
0ca912df
MM
1888 For non-stop targets this means that no thread is replaying. In order to
1889 make progress, we may need to explicitly move replaying threads to the end
1890 of their execution history. */
1c63c994 1891 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1892 {
e75fdfca 1893 ops = ops->beneath;
0ca912df 1894 return ops->to_resume (ops, orig_ptid, step, signal);
b2f4cfde
MM
1895 }
1896
52834460
MM
1897 /* Compute the btrace thread flag for the requested move. */
1898 if (step == 0)
1899 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1900 else
1901 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1902
52834460
MM
1903 /* We just indicate the resume intent here. The actual stepping happens in
1904 record_btrace_wait below. */
0ca912df
MM
1905 ALL_NON_EXITED_THREADS (tp)
1906 if (ptid_match (tp->ptid, ptid))
1907 record_btrace_resume_thread (tp, flag);
70ad5bff
MM
1908
1909 /* Async support. */
1910 if (target_can_async_p ())
1911 {
6a3753b3 1912 target_async (1);
70ad5bff
MM
1913 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1914 }
52834460
MM
1915}
1916
987e68b1
MM
1917/* Cancel resuming TP. */
1918
1919static void
1920record_btrace_cancel_resume (struct thread_info *tp)
1921{
1922 enum btrace_thread_flag flags;
1923
1924 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1925 if (flags == 0)
1926 return;
1927
1928 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1929 target_pid_to_str (tp->ptid), flags,
1930 btrace_thread_flag_to_str (flags));
1931
1932 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 1933 record_btrace_stop_replaying_at_end (tp);
52834460
MM
1934}
1935
1936/* Return a target_waitstatus indicating that we ran out of history. */
1937
1938static struct target_waitstatus
1939btrace_step_no_history (void)
1940{
1941 struct target_waitstatus status;
1942
1943 status.kind = TARGET_WAITKIND_NO_HISTORY;
1944
1945 return status;
1946}
1947
1948/* Return a target_waitstatus indicating that a step finished. */
1949
1950static struct target_waitstatus
1951btrace_step_stopped (void)
1952{
1953 struct target_waitstatus status;
1954
1955 status.kind = TARGET_WAITKIND_STOPPED;
1956 status.value.sig = GDB_SIGNAL_TRAP;
1957
1958 return status;
1959}
1960
6e4879f0
MM
1961/* Return a target_waitstatus indicating that a thread was stopped as
1962 requested. */
1963
1964static struct target_waitstatus
1965btrace_step_stopped_on_request (void)
1966{
1967 struct target_waitstatus status;
1968
1969 status.kind = TARGET_WAITKIND_STOPPED;
1970 status.value.sig = GDB_SIGNAL_0;
1971
1972 return status;
1973}
1974
d825d248
MM
1975/* Return a target_waitstatus indicating a spurious stop. */
1976
1977static struct target_waitstatus
1978btrace_step_spurious (void)
1979{
1980 struct target_waitstatus status;
1981
1982 status.kind = TARGET_WAITKIND_SPURIOUS;
1983
1984 return status;
1985}
1986
e3cfc1c7
MM
1987/* Return a target_waitstatus indicating that the thread was not resumed. */
1988
1989static struct target_waitstatus
1990btrace_step_no_resumed (void)
1991{
1992 struct target_waitstatus status;
1993
1994 status.kind = TARGET_WAITKIND_NO_RESUMED;
1995
1996 return status;
1997}
1998
1999/* Return a target_waitstatus indicating that we should wait again. */
2000
2001static struct target_waitstatus
2002btrace_step_again (void)
2003{
2004 struct target_waitstatus status;
2005
2006 status.kind = TARGET_WAITKIND_IGNORE;
2007
2008 return status;
2009}
2010
52834460
MM
2011/* Clear the record histories. */
2012
2013static void
2014record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2015{
2016 xfree (btinfo->insn_history);
2017 xfree (btinfo->call_history);
2018
2019 btinfo->insn_history = NULL;
2020 btinfo->call_history = NULL;
2021}
2022
3c615f99
MM
2023/* Check whether TP's current replay position is at a breakpoint. */
2024
2025static int
2026record_btrace_replay_at_breakpoint (struct thread_info *tp)
2027{
2028 struct btrace_insn_iterator *replay;
2029 struct btrace_thread_info *btinfo;
2030 const struct btrace_insn *insn;
2031 struct inferior *inf;
2032
2033 btinfo = &tp->btrace;
2034 replay = btinfo->replay;
2035
2036 if (replay == NULL)
2037 return 0;
2038
2039 insn = btrace_insn_get (replay);
2040 if (insn == NULL)
2041 return 0;
2042
2043 inf = find_inferior_ptid (tp->ptid);
2044 if (inf == NULL)
2045 return 0;
2046
2047 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2048 &btinfo->stop_reason);
2049}
2050
d825d248 2051/* Step one instruction in forward direction. */
52834460
MM
2052
2053static struct target_waitstatus
d825d248 2054record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2055{
2056 struct btrace_insn_iterator *replay, end;
2057 struct btrace_thread_info *btinfo;
52834460 2058
d825d248
MM
2059 btinfo = &tp->btrace;
2060 replay = btinfo->replay;
2061
2062 /* We're done if we're not replaying. */
2063 if (replay == NULL)
2064 return btrace_step_no_history ();
2065
011c71b6
MM
2066 /* Check if we're stepping a breakpoint. */
2067 if (record_btrace_replay_at_breakpoint (tp))
2068 return btrace_step_stopped ();
2069
d825d248
MM
2070 /* Skip gaps during replay. */
2071 do
2072 {
2073 unsigned int steps;
2074
e3cfc1c7
MM
2075 /* We will bail out here if we continue stepping after reaching the end
2076 of the execution history. */
d825d248
MM
2077 steps = btrace_insn_next (replay, 1);
2078 if (steps == 0)
e3cfc1c7 2079 return btrace_step_no_history ();
d825d248
MM
2080 }
2081 while (btrace_insn_get (replay) == NULL);
2082
2083 /* Determine the end of the instruction trace. */
2084 btrace_insn_end (&end, btinfo);
2085
e3cfc1c7
MM
2086 /* The execution trace contains (and ends with) the current instruction.
2087 This instruction has not been executed, yet, so the trace really ends
2088 one instruction earlier. */
d825d248 2089 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2090 return btrace_step_no_history ();
d825d248
MM
2091
2092 return btrace_step_spurious ();
2093}
2094
2095/* Step one instruction in backward direction. */
2096
2097static struct target_waitstatus
2098record_btrace_single_step_backward (struct thread_info *tp)
2099{
2100 struct btrace_insn_iterator *replay;
2101 struct btrace_thread_info *btinfo;
e59fa00f 2102
52834460
MM
2103 btinfo = &tp->btrace;
2104 replay = btinfo->replay;
2105
d825d248
MM
2106 /* Start replaying if we're not already doing so. */
2107 if (replay == NULL)
2108 replay = record_btrace_start_replaying (tp);
2109
2110 /* If we can't step any further, we reached the end of the history.
2111 Skip gaps during replay. */
2112 do
2113 {
2114 unsigned int steps;
2115
2116 steps = btrace_insn_prev (replay, 1);
2117 if (steps == 0)
2118 return btrace_step_no_history ();
2119 }
2120 while (btrace_insn_get (replay) == NULL);
2121
011c71b6
MM
2122 /* Check if we're stepping a breakpoint.
2123
2124 For reverse-stepping, this check is after the step. There is logic in
2125 infrun.c that handles reverse-stepping separately. See, for example,
2126 proceed and adjust_pc_after_break.
2127
2128 This code assumes that for reverse-stepping, PC points to the last
2129 de-executed instruction, whereas for forward-stepping PC points to the
2130 next to-be-executed instruction. */
2131 if (record_btrace_replay_at_breakpoint (tp))
2132 return btrace_step_stopped ();
2133
d825d248
MM
2134 return btrace_step_spurious ();
2135}
2136
2137/* Step a single thread. */
2138
2139static struct target_waitstatus
2140record_btrace_step_thread (struct thread_info *tp)
2141{
2142 struct btrace_thread_info *btinfo;
2143 struct target_waitstatus status;
2144 enum btrace_thread_flag flags;
2145
2146 btinfo = &tp->btrace;
2147
6e4879f0
MM
2148 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2149 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2150
987e68b1
MM
2151 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2152 target_pid_to_str (tp->ptid), flags,
2153 btrace_thread_flag_to_str (flags));
52834460 2154
6e4879f0
MM
2155 /* We can't step without an execution history. */
2156 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2157 return btrace_step_no_history ();
2158
52834460
MM
2159 switch (flags)
2160 {
2161 default:
2162 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2163
6e4879f0
MM
2164 case BTHR_STOP:
2165 return btrace_step_stopped_on_request ();
2166
52834460 2167 case BTHR_STEP:
d825d248
MM
2168 status = record_btrace_single_step_forward (tp);
2169 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2170 break;
52834460
MM
2171
2172 return btrace_step_stopped ();
2173
2174 case BTHR_RSTEP:
d825d248
MM
2175 status = record_btrace_single_step_backward (tp);
2176 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2177 break;
52834460
MM
2178
2179 return btrace_step_stopped ();
2180
2181 case BTHR_CONT:
e3cfc1c7
MM
2182 status = record_btrace_single_step_forward (tp);
2183 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2184 break;
52834460 2185
e3cfc1c7
MM
2186 btinfo->flags |= flags;
2187 return btrace_step_again ();
52834460
MM
2188
2189 case BTHR_RCONT:
e3cfc1c7
MM
2190 status = record_btrace_single_step_backward (tp);
2191 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2192 break;
52834460 2193
e3cfc1c7
MM
2194 btinfo->flags |= flags;
2195 return btrace_step_again ();
2196 }
d825d248 2197
e3cfc1c7
MM
2198 /* We keep threads moving at the end of their execution history. The to_wait
2199 method will stop the thread for whom the event is reported. */
2200 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2201 btinfo->flags |= flags;
52834460 2202
e3cfc1c7 2203 return status;
b2f4cfde
MM
2204}
2205
e3cfc1c7
MM
2206/* A vector of threads. */
2207
2208typedef struct thread_info * tp_t;
2209DEF_VEC_P (tp_t);
2210
b2f4cfde
MM
2211/* The to_wait method of target record-btrace. */
2212
2213static ptid_t
2214record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2215 struct target_waitstatus *status, int options)
2216{
e3cfc1c7
MM
2217 VEC (tp_t) *moving, *no_history;
2218 struct thread_info *tp, *eventing;
2219 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2220
2221 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2222
b2f4cfde 2223 /* As long as we're not replaying, just forward the request. */
1c63c994 2224 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 2225 {
e75fdfca
TT
2226 ops = ops->beneath;
2227 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2228 }
2229
e3cfc1c7
MM
2230 moving = NULL;
2231 no_history = NULL;
2232
2233 make_cleanup (VEC_cleanup (tp_t), &moving);
2234 make_cleanup (VEC_cleanup (tp_t), &no_history);
2235
2236 /* Keep a work list of moving threads. */
2237 ALL_NON_EXITED_THREADS (tp)
2238 if (ptid_match (tp->ptid, ptid)
2239 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2240 VEC_safe_push (tp_t, moving, tp);
2241
2242 if (VEC_empty (tp_t, moving))
52834460 2243 {
e3cfc1c7 2244 *status = btrace_step_no_resumed ();
52834460 2245
e3cfc1c7
MM
2246 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2247 target_waitstatus_to_string (status));
2248
2249 do_cleanups (cleanups);
2250 return null_ptid;
52834460
MM
2251 }
2252
e3cfc1c7
MM
2253 /* Step moving threads one by one, one step each, until either one thread
2254 reports an event or we run out of threads to step.
2255
2256 When stepping more than one thread, chances are that some threads reach
2257 the end of their execution history earlier than others. If we reported
2258 this immediately, all-stop on top of non-stop would stop all threads and
2259 resume the same threads next time. And we would report the same thread
2260 having reached the end of its execution history again.
2261
2262 In the worst case, this would starve the other threads. But even if other
2263 threads would be allowed to make progress, this would result in far too
2264 many intermediate stops.
2265
2266 We therefore delay the reporting of "no execution history" until we have
2267 nothing else to report. By this time, all threads should have moved to
2268 either the beginning or the end of their execution history. There will
2269 be a single user-visible stop. */
2270 eventing = NULL;
2271 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2272 {
2273 unsigned int ix;
2274
2275 ix = 0;
2276 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2277 {
2278 *status = record_btrace_step_thread (tp);
2279
2280 switch (status->kind)
2281 {
2282 case TARGET_WAITKIND_IGNORE:
2283 ix++;
2284 break;
2285
2286 case TARGET_WAITKIND_NO_HISTORY:
2287 VEC_safe_push (tp_t, no_history,
2288 VEC_ordered_remove (tp_t, moving, ix));
2289 break;
2290
2291 default:
2292 eventing = VEC_unordered_remove (tp_t, moving, ix);
2293 break;
2294 }
2295 }
2296 }
2297
2298 if (eventing == NULL)
2299 {
2300 /* We started with at least one moving thread. This thread must have
2301 either stopped or reached the end of its execution history.
2302
2303 In the former case, EVENTING must not be NULL.
2304 In the latter case, NO_HISTORY must not be empty. */
2305 gdb_assert (!VEC_empty (tp_t, no_history));
2306
2307 /* We kept threads moving at the end of their execution history. Stop
2308 EVENTING now that we are going to report its stop. */
2309 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2310 eventing->btrace.flags &= ~BTHR_MOVE;
2311
2312 *status = btrace_step_no_history ();
2313 }
2314
2315 gdb_assert (eventing != NULL);
2316
2317 /* We kept threads replaying at the end of their execution history. Stop
2318 replaying EVENTING now that we are going to report its stop. */
2319 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2320
2321 /* Stop all other threads. */
5953356c 2322 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2323 ALL_NON_EXITED_THREADS (tp)
2324 record_btrace_cancel_resume (tp);
52834460
MM
2325
2326 /* Start record histories anew from the current position. */
e3cfc1c7 2327 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2328
2329 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2330 registers_changed_ptid (eventing->ptid);
2331
2332 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2333 target_pid_to_str (eventing->ptid),
2334 target_waitstatus_to_string (status));
52834460 2335
e3cfc1c7
MM
2336 do_cleanups (cleanups);
2337 return eventing->ptid;
52834460
MM
2338}
2339
6e4879f0
MM
2340/* The to_stop method of target record-btrace. */
2341
2342static void
2343record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2344{
2345 DEBUG ("stop %s", target_pid_to_str (ptid));
2346
2347 /* As long as we're not replaying, just forward the request. */
2348 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2349 {
2350 ops = ops->beneath;
2351 ops->to_stop (ops, ptid);
2352 }
2353 else
2354 {
2355 struct thread_info *tp;
2356
2357 ALL_NON_EXITED_THREADS (tp)
2358 if (ptid_match (tp->ptid, ptid))
2359 {
2360 tp->btrace.flags &= ~BTHR_MOVE;
2361 tp->btrace.flags |= BTHR_STOP;
2362 }
2363 }
2364 }
2365
52834460
MM
2366/* The to_can_execute_reverse method of target record-btrace. */
2367
2368static int
19db3e69 2369record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2370{
2371 return 1;
2372}
2373
9e8915c6 2374/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2375
9e8915c6
PA
2376static int
2377record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2378{
1c63c994 2379 if (record_btrace_is_replaying (ops))
9e8915c6
PA
2380 {
2381 struct thread_info *tp = inferior_thread ();
2382
2383 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2384 }
2385
2386 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2387}
2388
2389/* The to_supports_stopped_by_sw_breakpoint method of target
2390 record-btrace. */
2391
2392static int
2393record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2394{
2395 if (record_btrace_is_replaying (ops))
2396 return 1;
2397
2398 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2399}
2400
2401/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2402
2403static int
2404record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2405{
2406 if (record_btrace_is_replaying (ops))
2407 {
2408 struct thread_info *tp = inferior_thread ();
2409
2410 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2411 }
2412
2413 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2414}
2415
2416/* The to_supports_stopped_by_hw_breakpoint method of target
2417 record-btrace. */
2418
2419static int
2420record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2421{
2422 if (record_btrace_is_replaying (ops))
2423 return 1;
52834460 2424
9e8915c6 2425 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2426}
2427
e8032dde 2428/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2429
2430static void
e8032dde 2431record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2432{
e8032dde 2433 /* We don't add or remove threads during replay. */
1c63c994 2434 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2435 return;
2436
2437 /* Forward the request. */
e75fdfca 2438 ops = ops->beneath;
e8032dde 2439 ops->to_update_thread_list (ops);
e2887aa3
MM
2440}
2441
2442/* The to_thread_alive method of target record-btrace. */
2443
2444static int
2445record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2446{
2447 /* We don't add or remove threads during replay. */
1c63c994 2448 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2449 return find_thread_ptid (ptid) != NULL;
2450
2451 /* Forward the request. */
e75fdfca
TT
2452 ops = ops->beneath;
2453 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2454}
2455
066ce621
MM
2456/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2457 is stopped. */
2458
2459static void
2460record_btrace_set_replay (struct thread_info *tp,
2461 const struct btrace_insn_iterator *it)
2462{
2463 struct btrace_thread_info *btinfo;
2464
2465 btinfo = &tp->btrace;
2466
2467 if (it == NULL || it->function == NULL)
52834460 2468 record_btrace_stop_replaying (tp);
066ce621
MM
2469 else
2470 {
2471 if (btinfo->replay == NULL)
52834460 2472 record_btrace_start_replaying (tp);
066ce621
MM
2473 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2474 return;
2475
2476 *btinfo->replay = *it;
52834460 2477 registers_changed_ptid (tp->ptid);
066ce621
MM
2478 }
2479
52834460
MM
2480 /* Start anew from the new replay position. */
2481 record_btrace_clear_histories (btinfo);
485668e5
MM
2482
2483 stop_pc = regcache_read_pc (get_current_regcache ());
2484 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2485}
2486
2487/* The to_goto_record_begin method of target record-btrace. */
2488
2489static void
08475817 2490record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2491{
2492 struct thread_info *tp;
2493 struct btrace_insn_iterator begin;
2494
2495 tp = require_btrace_thread ();
2496
2497 btrace_insn_begin (&begin, &tp->btrace);
2498 record_btrace_set_replay (tp, &begin);
066ce621
MM
2499}
2500
2501/* The to_goto_record_end method of target record-btrace. */
2502
2503static void
307a1b91 2504record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2505{
2506 struct thread_info *tp;
2507
2508 tp = require_btrace_thread ();
2509
2510 record_btrace_set_replay (tp, NULL);
066ce621
MM
2511}
2512
2513/* The to_goto_record method of target record-btrace. */
2514
2515static void
606183ac 2516record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2517{
2518 struct thread_info *tp;
2519 struct btrace_insn_iterator it;
2520 unsigned int number;
2521 int found;
2522
2523 number = insn;
2524
2525 /* Check for wrap-arounds. */
2526 if (number != insn)
2527 error (_("Instruction number out of range."));
2528
2529 tp = require_btrace_thread ();
2530
2531 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2532 if (found == 0)
2533 error (_("No such instruction."));
2534
2535 record_btrace_set_replay (tp, &it);
066ce621
MM
2536}
2537
70ad5bff
MM
2538/* The to_execution_direction target method. */
2539
2540static enum exec_direction_kind
2541record_btrace_execution_direction (struct target_ops *self)
2542{
2543 return record_btrace_resume_exec_dir;
2544}
2545
aef92902
MM
2546/* The to_prepare_to_generate_core target method. */
2547
2548static void
2549record_btrace_prepare_to_generate_core (struct target_ops *self)
2550{
2551 record_btrace_generating_corefile = 1;
2552}
2553
2554/* The to_done_generating_core target method. */
2555
2556static void
2557record_btrace_done_generating_core (struct target_ops *self)
2558{
2559 record_btrace_generating_corefile = 0;
2560}
2561
afedecd3
MM
2562/* Initialize the record-btrace target ops. */
2563
2564static void
2565init_record_btrace_ops (void)
2566{
2567 struct target_ops *ops;
2568
2569 ops = &record_btrace_ops;
2570 ops->to_shortname = "record-btrace";
2571 ops->to_longname = "Branch tracing target";
2572 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2573 ops->to_open = record_btrace_open;
2574 ops->to_close = record_btrace_close;
b7d2e916 2575 ops->to_async = record_btrace_async;
afedecd3
MM
2576 ops->to_detach = record_detach;
2577 ops->to_disconnect = record_disconnect;
2578 ops->to_mourn_inferior = record_mourn_inferior;
2579 ops->to_kill = record_kill;
afedecd3
MM
2580 ops->to_stop_recording = record_btrace_stop_recording;
2581 ops->to_info_record = record_btrace_info;
2582 ops->to_insn_history = record_btrace_insn_history;
2583 ops->to_insn_history_from = record_btrace_insn_history_from;
2584 ops->to_insn_history_range = record_btrace_insn_history_range;
2585 ops->to_call_history = record_btrace_call_history;
2586 ops->to_call_history_from = record_btrace_call_history_from;
2587 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2588 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2589 ops->to_xfer_partial = record_btrace_xfer_partial;
2590 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2591 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2592 ops->to_fetch_registers = record_btrace_fetch_registers;
2593 ops->to_store_registers = record_btrace_store_registers;
2594 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2595 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2596 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2597 ops->to_resume = record_btrace_resume;
2598 ops->to_wait = record_btrace_wait;
6e4879f0 2599 ops->to_stop = record_btrace_stop;
e8032dde 2600 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2601 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2602 ops->to_goto_record_begin = record_btrace_goto_begin;
2603 ops->to_goto_record_end = record_btrace_goto_end;
2604 ops->to_goto_record = record_btrace_goto;
52834460 2605 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2606 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2607 ops->to_supports_stopped_by_sw_breakpoint
2608 = record_btrace_supports_stopped_by_sw_breakpoint;
2609 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2610 ops->to_supports_stopped_by_hw_breakpoint
2611 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2612 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2613 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2614 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2615 ops->to_stratum = record_stratum;
2616 ops->to_magic = OPS_MAGIC;
2617}
2618
f4abbc16
MM
2619/* Start recording in BTS format. */
2620
2621static void
2622cmd_record_btrace_bts_start (char *args, int from_tty)
2623{
f4abbc16
MM
2624 if (args != NULL && *args != 0)
2625 error (_("Invalid argument."));
2626
2627 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2628
492d29ea
PA
2629 TRY
2630 {
2631 execute_command ("target record-btrace", from_tty);
2632 }
2633 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2634 {
2635 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2636 throw_exception (exception);
2637 }
492d29ea 2638 END_CATCH
f4abbc16
MM
2639}
2640
b20a6524 2641/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2642
2643static void
b20a6524 2644cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2645{
2646 if (args != NULL && *args != 0)
2647 error (_("Invalid argument."));
2648
b20a6524 2649 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2650
492d29ea
PA
2651 TRY
2652 {
2653 execute_command ("target record-btrace", from_tty);
2654 }
2655 CATCH (exception, RETURN_MASK_ALL)
2656 {
2657 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2658 throw_exception (exception);
2659 }
2660 END_CATCH
afedecd3
MM
2661}
2662
b20a6524
MM
2663/* Alias for "target record". */
2664
2665static void
2666cmd_record_btrace_start (char *args, int from_tty)
2667{
2668 if (args != NULL && *args != 0)
2669 error (_("Invalid argument."));
2670
2671 record_btrace_conf.format = BTRACE_FORMAT_PT;
2672
2673 TRY
2674 {
2675 execute_command ("target record-btrace", from_tty);
2676 }
2677 CATCH (exception, RETURN_MASK_ALL)
2678 {
2679 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2680
2681 TRY
2682 {
2683 execute_command ("target record-btrace", from_tty);
2684 }
2685 CATCH (exception, RETURN_MASK_ALL)
2686 {
2687 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2688 throw_exception (exception);
2689 }
2690 END_CATCH
2691 }
2692 END_CATCH
2693}
2694
67b5c0c1
MM
2695/* The "set record btrace" command. */
2696
2697static void
2698cmd_set_record_btrace (char *args, int from_tty)
2699{
2700 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2701}
2702
2703/* The "show record btrace" command. */
2704
2705static void
2706cmd_show_record_btrace (char *args, int from_tty)
2707{
2708 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2709}
2710
2711/* The "show record btrace replay-memory-access" command. */
2712
2713static void
2714cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2715 struct cmd_list_element *c, const char *value)
2716{
2717 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2718 replay_memory_access);
2719}
2720
d33501a5
MM
2721/* The "set record btrace bts" command. */
2722
2723static void
2724cmd_set_record_btrace_bts (char *args, int from_tty)
2725{
2726 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2727 "by an appropriate subcommand.\n"));
d33501a5
MM
2728 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2729 all_commands, gdb_stdout);
2730}
2731
2732/* The "show record btrace bts" command. */
2733
2734static void
2735cmd_show_record_btrace_bts (char *args, int from_tty)
2736{
2737 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2738}
2739
b20a6524
MM
2740/* The "set record btrace pt" command. */
2741
2742static void
2743cmd_set_record_btrace_pt (char *args, int from_tty)
2744{
2745 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2746 "by an appropriate subcommand.\n"));
2747 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2748 all_commands, gdb_stdout);
2749}
2750
2751/* The "show record btrace pt" command. */
2752
2753static void
2754cmd_show_record_btrace_pt (char *args, int from_tty)
2755{
2756 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2757}
2758
2759/* The "record bts buffer-size" show value function. */
2760
2761static void
2762show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2763 struct cmd_list_element *c,
2764 const char *value)
2765{
2766 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2767 value);
2768}
2769
2770/* The "record pt buffer-size" show value function. */
2771
2772static void
2773show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2774 struct cmd_list_element *c,
2775 const char *value)
2776{
2777 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2778 value);
2779}
2780
afedecd3
MM
2781void _initialize_record_btrace (void);
2782
2783/* Initialize btrace commands. */
2784
2785void
2786_initialize_record_btrace (void)
2787{
f4abbc16
MM
2788 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2789 _("Start branch trace recording."), &record_btrace_cmdlist,
2790 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2791 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2792
f4abbc16
MM
2793 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2794 _("\
2795Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2796The processor stores a from/to record for each branch into a cyclic buffer.\n\
2797This format may not be available on all processors."),
2798 &record_btrace_cmdlist);
2799 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2800
b20a6524
MM
2801 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2802 _("\
2803Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2804This format may not be available on all processors."),
2805 &record_btrace_cmdlist);
2806 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2807
67b5c0c1
MM
2808 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2809 _("Set record options"), &set_record_btrace_cmdlist,
2810 "set record btrace ", 0, &set_record_cmdlist);
2811
2812 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2813 _("Show record options"), &show_record_btrace_cmdlist,
2814 "show record btrace ", 0, &show_record_cmdlist);
2815
2816 add_setshow_enum_cmd ("replay-memory-access", no_class,
2817 replay_memory_access_types, &replay_memory_access, _("\
2818Set what memory accesses are allowed during replay."), _("\
2819Show what memory accesses are allowed during replay."),
2820 _("Default is READ-ONLY.\n\n\
2821The btrace record target does not trace data.\n\
2822The memory therefore corresponds to the live target and not \
2823to the current replay position.\n\n\
2824When READ-ONLY, allow accesses to read-only memory during replay.\n\
2825When READ-WRITE, allow accesses to read-only and read-write memory during \
2826replay."),
2827 NULL, cmd_show_replay_memory_access,
2828 &set_record_btrace_cmdlist,
2829 &show_record_btrace_cmdlist);
2830
d33501a5
MM
2831 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2832 _("Set record btrace bts options"),
2833 &set_record_btrace_bts_cmdlist,
2834 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2835
2836 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2837 _("Show record btrace bts options"),
2838 &show_record_btrace_bts_cmdlist,
2839 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2840
2841 add_setshow_uinteger_cmd ("buffer-size", no_class,
2842 &record_btrace_conf.bts.size,
2843 _("Set the record/replay bts buffer size."),
2844 _("Show the record/replay bts buffer size."), _("\
2845When starting recording request a trace buffer of this size. \
2846The actual buffer size may differ from the requested size. \
2847Use \"info record\" to see the actual buffer size.\n\n\
2848Bigger buffers allow longer recording but also take more time to process \
2849the recorded execution trace.\n\n\
b20a6524
MM
2850The trace buffer size may not be changed while recording."), NULL,
2851 show_record_bts_buffer_size_value,
d33501a5
MM
2852 &set_record_btrace_bts_cmdlist,
2853 &show_record_btrace_bts_cmdlist);
2854
b20a6524
MM
2855 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2856 _("Set record btrace pt options"),
2857 &set_record_btrace_pt_cmdlist,
2858 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2859
2860 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2861 _("Show record btrace pt options"),
2862 &show_record_btrace_pt_cmdlist,
2863 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2864
2865 add_setshow_uinteger_cmd ("buffer-size", no_class,
2866 &record_btrace_conf.pt.size,
2867 _("Set the record/replay pt buffer size."),
2868 _("Show the record/replay pt buffer size."), _("\
2869Bigger buffers allow longer recording but also take more time to process \
2870the recorded execution.\n\
2871The actual buffer size may differ from the requested size. Use \"info record\" \
2872to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2873 &set_record_btrace_pt_cmdlist,
2874 &show_record_btrace_pt_cmdlist);
2875
afedecd3
MM
2876 init_record_btrace_ops ();
2877 add_target (&record_btrace_ops);
0b722aec
MM
2878
2879 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2880 xcalloc, xfree);
d33501a5
MM
2881
2882 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2883 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2884}
This page took 0.416581 seconds and 4 git commands to generate.