record-btrace: indicate gaps
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
d33501a5
MM
79/* Command lists for "set/show record btrace". */
80static struct cmd_list_element *set_record_btrace_cmdlist;
81static struct cmd_list_element *show_record_btrace_cmdlist;
82
83/* Command lists for "set/show record btrace bts". */
84static struct cmd_list_element *set_record_btrace_bts_cmdlist;
85static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86
afedecd3
MM
87/* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90#define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100/* Update the branch trace for the current thread and return a pointer to its
066ce621 101 thread_info.
afedecd3
MM
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
066ce621
MM
106static struct thread_info *
107require_btrace_thread (void)
afedecd3
MM
108{
109 struct thread_info *tp;
afedecd3
MM
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
6e07b1d2 119 if (btrace_is_empty (tp))
afedecd3
MM
120 error (_("No trace."));
121
066ce621
MM
122 return tp;
123}
124
125/* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131static struct btrace_thread_info *
132require_btrace (void)
133{
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
afedecd3
MM
139}
140
141/* Enable branch tracing for one thread. Warn on errors. */
142
143static void
144record_btrace_enable_warn (struct thread_info *tp)
145{
146 volatile struct gdb_exception error;
147
148 TRY_CATCH (error, RETURN_MASK_ERROR)
f4abbc16 149 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
150
151 if (error.message != NULL)
152 warning ("%s", error.message);
153}
154
155/* Callback function to disable branch tracing for one thread. */
156
157static void
158record_btrace_disable_callback (void *arg)
159{
160 struct thread_info *tp;
161
162 tp = arg;
163
164 btrace_disable (tp);
165}
166
167/* Enable automatic tracing of new threads. */
168
169static void
170record_btrace_auto_enable (void)
171{
172 DEBUG ("attach thread observer");
173
174 record_btrace_thread_observer
175 = observer_attach_new_thread (record_btrace_enable_warn);
176}
177
178/* Disable automatic tracing of new threads. */
179
180static void
181record_btrace_auto_disable (void)
182{
183 /* The observer may have been detached, already. */
184 if (record_btrace_thread_observer == NULL)
185 return;
186
187 DEBUG ("detach thread observer");
188
189 observer_detach_new_thread (record_btrace_thread_observer);
190 record_btrace_thread_observer = NULL;
191}
192
70ad5bff
MM
193/* The record-btrace async event handler function. */
194
195static void
196record_btrace_handle_async_inferior_event (gdb_client_data data)
197{
198 inferior_event_handler (INF_REG_EVENT, NULL);
199}
200
afedecd3
MM
201/* The to_open method of target record-btrace. */
202
203static void
014f9477 204record_btrace_open (const char *args, int from_tty)
afedecd3
MM
205{
206 struct cleanup *disable_chain;
207 struct thread_info *tp;
208
209 DEBUG ("open");
210
8213266a 211 record_preopen ();
afedecd3
MM
212
213 if (!target_has_execution)
214 error (_("The program is not being run."));
215
52834460
MM
216 if (non_stop)
217 error (_("Record btrace can't debug inferior in non-stop mode."));
218
afedecd3
MM
219 gdb_assert (record_btrace_thread_observer == NULL);
220
221 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 222 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
224 {
f4abbc16 225 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
226
227 make_cleanup (record_btrace_disable_callback, tp);
228 }
229
230 record_btrace_auto_enable ();
231
232 push_target (&record_btrace_ops);
233
70ad5bff
MM
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
236 NULL);
aef92902 237 record_btrace_generating_corefile = 0;
70ad5bff 238
afedecd3
MM
239 observer_notify_record_changed (current_inferior (), 1);
240
241 discard_cleanups (disable_chain);
242}
243
244/* The to_stop_recording method of target record-btrace. */
245
246static void
c6cd7c02 247record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
248{
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
034f788c 255 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258}
259
260/* The to_close method of target record-btrace. */
261
262static void
de90e03d 263record_btrace_close (struct target_ops *self)
afedecd3 264{
568e808b
MM
265 struct thread_info *tp;
266
70ad5bff
MM
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
99c819ee
MM
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
568e808b
MM
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
034f788c 276 ALL_NON_EXITED_THREADS (tp)
568e808b 277 btrace_teardown (tp);
afedecd3
MM
278}
279
b7d2e916
PA
280/* The to_async method of target record-btrace. */
281
282static void
283record_btrace_async (struct target_ops *ops,
284 void (*callback) (enum inferior_event_type event_type,
285 void *context),
286 void *context)
287{
288 if (callback != NULL)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 else
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292
293 ops->beneath->to_async (ops->beneath, callback, context);
294}
295
d33501a5
MM
296/* Adjusts the size and returns a human readable size suffix. */
297
298static const char *
299record_btrace_adjust_size (unsigned int *size)
300{
301 unsigned int sz;
302
303 sz = *size;
304
305 if ((sz & ((1u << 30) - 1)) == 0)
306 {
307 *size = sz >> 30;
308 return "GB";
309 }
310 else if ((sz & ((1u << 20) - 1)) == 0)
311 {
312 *size = sz >> 20;
313 return "MB";
314 }
315 else if ((sz & ((1u << 10) - 1)) == 0)
316 {
317 *size = sz >> 10;
318 return "kB";
319 }
320 else
321 return "";
322}
323
324/* Print a BTS configuration. */
325
326static void
327record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
328{
329 const char *suffix;
330 unsigned int size;
331
332 size = conf->size;
333 if (size > 0)
334 {
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
337 }
338}
339
340/* Print a branch tracing configuration. */
341
342static void
343record_btrace_print_conf (const struct btrace_config *conf)
344{
345 printf_unfiltered (_("Recording format: %s.\n"),
346 btrace_format_string (conf->format));
347
348 switch (conf->format)
349 {
350 case BTRACE_FORMAT_NONE:
351 return;
352
353 case BTRACE_FORMAT_BTS:
354 record_btrace_print_bts_conf (&conf->bts);
355 return;
356 }
357
358 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
359}
360
afedecd3
MM
361/* The to_info_record method of target record-btrace. */
362
363static void
630d6a4a 364record_btrace_info (struct target_ops *self)
afedecd3
MM
365{
366 struct btrace_thread_info *btinfo;
f4abbc16 367 const struct btrace_config *conf;
afedecd3 368 struct thread_info *tp;
31fd9caa 369 unsigned int insns, calls, gaps;
afedecd3
MM
370
371 DEBUG ("info");
372
373 tp = find_thread_ptid (inferior_ptid);
374 if (tp == NULL)
375 error (_("No thread."));
376
f4abbc16
MM
377 btinfo = &tp->btrace;
378
379 conf = btrace_conf (btinfo);
380 if (conf != NULL)
d33501a5 381 record_btrace_print_conf (conf);
f4abbc16 382
afedecd3
MM
383 btrace_fetch (tp);
384
23a7fe75
MM
385 insns = 0;
386 calls = 0;
31fd9caa 387 gaps = 0;
23a7fe75 388
6e07b1d2 389 if (!btrace_is_empty (tp))
23a7fe75
MM
390 {
391 struct btrace_call_iterator call;
392 struct btrace_insn_iterator insn;
393
394 btrace_call_end (&call, btinfo);
395 btrace_call_prev (&call, 1);
5de9129b 396 calls = btrace_call_number (&call);
23a7fe75
MM
397
398 btrace_insn_end (&insn, btinfo);
31fd9caa 399
5de9129b 400 insns = btrace_insn_number (&insn);
31fd9caa
MM
401 if (insns != 0)
402 {
403 /* The last instruction does not really belong to the trace. */
404 insns -= 1;
405 }
406 else
407 {
408 unsigned int steps;
409
410 /* Skip gaps at the end. */
411 do
412 {
413 steps = btrace_insn_prev (&insn, 1);
414 if (steps == 0)
415 break;
416
417 insns = btrace_insn_number (&insn);
418 }
419 while (insns == 0);
420 }
421
422 gaps = btinfo->ngaps;
23a7fe75 423 }
afedecd3 424
31fd9caa
MM
425 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
426 "for thread %d (%s).\n"), insns, calls, gaps,
427 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
428
429 if (btrace_is_replaying (tp))
430 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
431 btrace_insn_number (btinfo->replay));
afedecd3
MM
432}
433
31fd9caa
MM
434/* Print a decode error. */
435
436static void
437btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
438 enum btrace_format format)
439{
440 const char *errstr;
441 int is_error;
442
443 errstr = _("unknown");
444 is_error = 1;
445
446 switch (format)
447 {
448 default:
449 break;
450
451 case BTRACE_FORMAT_BTS:
452 switch (errcode)
453 {
454 default:
455 break;
456
457 case BDE_BTS_OVERFLOW:
458 errstr = _("instruction overflow");
459 break;
460
461 case BDE_BTS_INSN_SIZE:
462 errstr = _("unknown instruction");
463 break;
464 }
465 break;
466 }
467
468 ui_out_text (uiout, _("["));
469 if (is_error)
470 {
471 ui_out_text (uiout, _("decode error ("));
472 ui_out_field_int (uiout, "errcode", errcode);
473 ui_out_text (uiout, _("): "));
474 }
475 ui_out_text (uiout, errstr);
476 ui_out_text (uiout, _("]\n"));
477}
478
afedecd3
MM
479/* Print an unsigned int. */
480
481static void
482ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
483{
484 ui_out_field_fmt (uiout, fld, "%u", val);
485}
486
487/* Disassemble a section of the recorded instruction trace. */
488
489static void
23a7fe75 490btrace_insn_history (struct ui_out *uiout,
31fd9caa 491 const struct btrace_thread_info *btinfo,
23a7fe75
MM
492 const struct btrace_insn_iterator *begin,
493 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
494{
495 struct gdbarch *gdbarch;
23a7fe75 496 struct btrace_insn_iterator it;
afedecd3 497
23a7fe75
MM
498 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
499 btrace_insn_number (end));
afedecd3
MM
500
501 gdbarch = target_gdbarch ();
502
23a7fe75 503 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 504 {
23a7fe75
MM
505 const struct btrace_insn *insn;
506
507 insn = btrace_insn_get (&it);
508
31fd9caa
MM
509 /* A NULL instruction indicates a gap in the trace. */
510 if (insn == NULL)
511 {
512 const struct btrace_config *conf;
513
514 conf = btrace_conf (btinfo);
afedecd3 515
31fd9caa
MM
516 /* We have trace so we must have a configuration. */
517 gdb_assert (conf != NULL);
518
519 btrace_ui_out_decode_error (uiout, it.function->errcode,
520 conf->format);
521 }
522 else
523 {
524 /* Print the instruction index. */
525 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
526 ui_out_text (uiout, "\t");
527
528 /* Disassembly with '/m' flag may not produce the expected result.
529 See PR gdb/11833. */
530 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
531 insn->pc + 1);
532 }
afedecd3
MM
533 }
534}
535
536/* The to_insn_history method of target record-btrace. */
537
538static void
7a6c5609 539record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
540{
541 struct btrace_thread_info *btinfo;
23a7fe75
MM
542 struct btrace_insn_history *history;
543 struct btrace_insn_iterator begin, end;
afedecd3
MM
544 struct cleanup *uiout_cleanup;
545 struct ui_out *uiout;
23a7fe75 546 unsigned int context, covered;
afedecd3
MM
547
548 uiout = current_uiout;
549 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
550 "insn history");
afedecd3 551 context = abs (size);
afedecd3
MM
552 if (context == 0)
553 error (_("Bad record instruction-history-size."));
554
23a7fe75
MM
555 btinfo = require_btrace ();
556 history = btinfo->insn_history;
557 if (history == NULL)
afedecd3 558 {
07bbe694 559 struct btrace_insn_iterator *replay;
afedecd3 560
23a7fe75 561 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 562
07bbe694
MM
563 /* If we're replaying, we start at the replay position. Otherwise, we
564 start at the tail of the trace. */
565 replay = btinfo->replay;
566 if (replay != NULL)
567 begin = *replay;
568 else
569 btrace_insn_end (&begin, btinfo);
570
571 /* We start from here and expand in the requested direction. Then we
572 expand in the other direction, as well, to fill up any remaining
573 context. */
574 end = begin;
575 if (size < 0)
576 {
577 /* We want the current position covered, as well. */
578 covered = btrace_insn_next (&end, 1);
579 covered += btrace_insn_prev (&begin, context - covered);
580 covered += btrace_insn_next (&end, context - covered);
581 }
582 else
583 {
584 covered = btrace_insn_next (&end, context);
585 covered += btrace_insn_prev (&begin, context - covered);
586 }
afedecd3
MM
587 }
588 else
589 {
23a7fe75
MM
590 begin = history->begin;
591 end = history->end;
afedecd3 592
23a7fe75
MM
593 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
594 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 595
23a7fe75
MM
596 if (size < 0)
597 {
598 end = begin;
599 covered = btrace_insn_prev (&begin, context);
600 }
601 else
602 {
603 begin = end;
604 covered = btrace_insn_next (&end, context);
605 }
afedecd3
MM
606 }
607
23a7fe75 608 if (covered > 0)
31fd9caa 609 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
610 else
611 {
612 if (size < 0)
613 printf_unfiltered (_("At the start of the branch trace record.\n"));
614 else
615 printf_unfiltered (_("At the end of the branch trace record.\n"));
616 }
afedecd3 617
23a7fe75 618 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
619 do_cleanups (uiout_cleanup);
620}
621
622/* The to_insn_history_range method of target record-btrace. */
623
624static void
4e99c6b7
TT
625record_btrace_insn_history_range (struct target_ops *self,
626 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
627{
628 struct btrace_thread_info *btinfo;
23a7fe75
MM
629 struct btrace_insn_history *history;
630 struct btrace_insn_iterator begin, end;
afedecd3
MM
631 struct cleanup *uiout_cleanup;
632 struct ui_out *uiout;
23a7fe75
MM
633 unsigned int low, high;
634 int found;
afedecd3
MM
635
636 uiout = current_uiout;
637 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
638 "insn history");
23a7fe75
MM
639 low = from;
640 high = to;
afedecd3 641
23a7fe75 642 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
643
644 /* Check for wrap-arounds. */
23a7fe75 645 if (low != from || high != to)
afedecd3
MM
646 error (_("Bad range."));
647
0688d04e 648 if (high < low)
afedecd3
MM
649 error (_("Bad range."));
650
23a7fe75 651 btinfo = require_btrace ();
afedecd3 652
23a7fe75
MM
653 found = btrace_find_insn_by_number (&begin, btinfo, low);
654 if (found == 0)
655 error (_("Range out of bounds."));
afedecd3 656
23a7fe75
MM
657 found = btrace_find_insn_by_number (&end, btinfo, high);
658 if (found == 0)
0688d04e
MM
659 {
660 /* Silently truncate the range. */
661 btrace_insn_end (&end, btinfo);
662 }
663 else
664 {
665 /* We want both begin and end to be inclusive. */
666 btrace_insn_next (&end, 1);
667 }
afedecd3 668
31fd9caa 669 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 670 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
671
672 do_cleanups (uiout_cleanup);
673}
674
675/* The to_insn_history_from method of target record-btrace. */
676
677static void
9abc3ff3
TT
678record_btrace_insn_history_from (struct target_ops *self,
679 ULONGEST from, int size, int flags)
afedecd3
MM
680{
681 ULONGEST begin, end, context;
682
683 context = abs (size);
0688d04e
MM
684 if (context == 0)
685 error (_("Bad record instruction-history-size."));
afedecd3
MM
686
687 if (size < 0)
688 {
689 end = from;
690
691 if (from < context)
692 begin = 0;
693 else
0688d04e 694 begin = from - context + 1;
afedecd3
MM
695 }
696 else
697 {
698 begin = from;
0688d04e 699 end = from + context - 1;
afedecd3
MM
700
701 /* Check for wrap-around. */
702 if (end < begin)
703 end = ULONGEST_MAX;
704 }
705
4e99c6b7 706 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
707}
708
709/* Print the instruction number range for a function call history line. */
710
711static void
23a7fe75
MM
712btrace_call_history_insn_range (struct ui_out *uiout,
713 const struct btrace_function *bfun)
afedecd3 714{
7acbe133
MM
715 unsigned int begin, end, size;
716
717 size = VEC_length (btrace_insn_s, bfun->insn);
718 gdb_assert (size > 0);
afedecd3 719
23a7fe75 720 begin = bfun->insn_offset;
7acbe133 721 end = begin + size - 1;
afedecd3 722
23a7fe75 723 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 724 ui_out_text (uiout, ",");
23a7fe75 725 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
726}
727
728/* Print the source line information for a function call history line. */
729
730static void
23a7fe75
MM
731btrace_call_history_src_line (struct ui_out *uiout,
732 const struct btrace_function *bfun)
afedecd3
MM
733{
734 struct symbol *sym;
23a7fe75 735 int begin, end;
afedecd3
MM
736
737 sym = bfun->sym;
738 if (sym == NULL)
739 return;
740
741 ui_out_field_string (uiout, "file",
08be3fe3 742 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 743
23a7fe75
MM
744 begin = bfun->lbegin;
745 end = bfun->lend;
746
747 if (end < begin)
afedecd3
MM
748 return;
749
750 ui_out_text (uiout, ":");
23a7fe75 751 ui_out_field_int (uiout, "min line", begin);
afedecd3 752
23a7fe75 753 if (end == begin)
afedecd3
MM
754 return;
755
8710b709 756 ui_out_text (uiout, ",");
23a7fe75 757 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
758}
759
0b722aec
MM
760/* Get the name of a branch trace function. */
761
762static const char *
763btrace_get_bfun_name (const struct btrace_function *bfun)
764{
765 struct minimal_symbol *msym;
766 struct symbol *sym;
767
768 if (bfun == NULL)
769 return "??";
770
771 msym = bfun->msym;
772 sym = bfun->sym;
773
774 if (sym != NULL)
775 return SYMBOL_PRINT_NAME (sym);
776 else if (msym != NULL)
efd66ac6 777 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
778 else
779 return "??";
780}
781
afedecd3
MM
782/* Disassemble a section of the recorded function trace. */
783
784static void
23a7fe75 785btrace_call_history (struct ui_out *uiout,
8710b709 786 const struct btrace_thread_info *btinfo,
23a7fe75
MM
787 const struct btrace_call_iterator *begin,
788 const struct btrace_call_iterator *end,
afedecd3
MM
789 enum record_print_flag flags)
790{
23a7fe75 791 struct btrace_call_iterator it;
afedecd3 792
23a7fe75
MM
793 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
794 btrace_call_number (end));
afedecd3 795
23a7fe75 796 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 797 {
23a7fe75
MM
798 const struct btrace_function *bfun;
799 struct minimal_symbol *msym;
800 struct symbol *sym;
801
802 bfun = btrace_call_get (&it);
23a7fe75 803 sym = bfun->sym;
0b722aec 804 msym = bfun->msym;
23a7fe75 805
afedecd3 806 /* Print the function index. */
23a7fe75 807 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
808 ui_out_text (uiout, "\t");
809
31fd9caa
MM
810 /* Indicate gaps in the trace. */
811 if (bfun->errcode != 0)
812 {
813 const struct btrace_config *conf;
814
815 conf = btrace_conf (btinfo);
816
817 /* We have trace so we must have a configuration. */
818 gdb_assert (conf != NULL);
819
820 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
821
822 continue;
823 }
824
8710b709
MM
825 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
826 {
827 int level = bfun->level + btinfo->level, i;
828
829 for (i = 0; i < level; ++i)
830 ui_out_text (uiout, " ");
831 }
832
833 if (sym != NULL)
834 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
835 else if (msym != NULL)
efd66ac6 836 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
837 else if (!ui_out_is_mi_like_p (uiout))
838 ui_out_field_string (uiout, "function", "??");
839
1e038f67 840 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 841 {
8710b709 842 ui_out_text (uiout, _("\tinst "));
23a7fe75 843 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
844 }
845
1e038f67 846 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 847 {
8710b709 848 ui_out_text (uiout, _("\tat "));
23a7fe75 849 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
850 }
851
afedecd3
MM
852 ui_out_text (uiout, "\n");
853 }
854}
855
856/* The to_call_history method of target record-btrace. */
857
858static void
5df2fcba 859record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
860{
861 struct btrace_thread_info *btinfo;
23a7fe75
MM
862 struct btrace_call_history *history;
863 struct btrace_call_iterator begin, end;
afedecd3
MM
864 struct cleanup *uiout_cleanup;
865 struct ui_out *uiout;
23a7fe75 866 unsigned int context, covered;
afedecd3
MM
867
868 uiout = current_uiout;
869 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
870 "insn history");
afedecd3 871 context = abs (size);
afedecd3
MM
872 if (context == 0)
873 error (_("Bad record function-call-history-size."));
874
23a7fe75
MM
875 btinfo = require_btrace ();
876 history = btinfo->call_history;
877 if (history == NULL)
afedecd3 878 {
07bbe694 879 struct btrace_insn_iterator *replay;
afedecd3 880
23a7fe75 881 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 882
07bbe694
MM
883 /* If we're replaying, we start at the replay position. Otherwise, we
884 start at the tail of the trace. */
885 replay = btinfo->replay;
886 if (replay != NULL)
887 {
888 begin.function = replay->function;
889 begin.btinfo = btinfo;
890 }
891 else
892 btrace_call_end (&begin, btinfo);
893
894 /* We start from here and expand in the requested direction. Then we
895 expand in the other direction, as well, to fill up any remaining
896 context. */
897 end = begin;
898 if (size < 0)
899 {
900 /* We want the current position covered, as well. */
901 covered = btrace_call_next (&end, 1);
902 covered += btrace_call_prev (&begin, context - covered);
903 covered += btrace_call_next (&end, context - covered);
904 }
905 else
906 {
907 covered = btrace_call_next (&end, context);
908 covered += btrace_call_prev (&begin, context- covered);
909 }
afedecd3
MM
910 }
911 else
912 {
23a7fe75
MM
913 begin = history->begin;
914 end = history->end;
afedecd3 915
23a7fe75
MM
916 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
917 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 918
23a7fe75
MM
919 if (size < 0)
920 {
921 end = begin;
922 covered = btrace_call_prev (&begin, context);
923 }
924 else
925 {
926 begin = end;
927 covered = btrace_call_next (&end, context);
928 }
afedecd3
MM
929 }
930
23a7fe75 931 if (covered > 0)
8710b709 932 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
933 else
934 {
935 if (size < 0)
936 printf_unfiltered (_("At the start of the branch trace record.\n"));
937 else
938 printf_unfiltered (_("At the end of the branch trace record.\n"));
939 }
afedecd3 940
23a7fe75 941 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
942 do_cleanups (uiout_cleanup);
943}
944
945/* The to_call_history_range method of target record-btrace. */
946
947static void
f0d960ea
TT
948record_btrace_call_history_range (struct target_ops *self,
949 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
950{
951 struct btrace_thread_info *btinfo;
23a7fe75
MM
952 struct btrace_call_history *history;
953 struct btrace_call_iterator begin, end;
afedecd3
MM
954 struct cleanup *uiout_cleanup;
955 struct ui_out *uiout;
23a7fe75
MM
956 unsigned int low, high;
957 int found;
afedecd3
MM
958
959 uiout = current_uiout;
960 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
961 "func history");
23a7fe75
MM
962 low = from;
963 high = to;
afedecd3 964
23a7fe75 965 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
966
967 /* Check for wrap-arounds. */
23a7fe75 968 if (low != from || high != to)
afedecd3
MM
969 error (_("Bad range."));
970
0688d04e 971 if (high < low)
afedecd3
MM
972 error (_("Bad range."));
973
23a7fe75 974 btinfo = require_btrace ();
afedecd3 975
23a7fe75
MM
976 found = btrace_find_call_by_number (&begin, btinfo, low);
977 if (found == 0)
978 error (_("Range out of bounds."));
afedecd3 979
23a7fe75
MM
980 found = btrace_find_call_by_number (&end, btinfo, high);
981 if (found == 0)
0688d04e
MM
982 {
983 /* Silently truncate the range. */
984 btrace_call_end (&end, btinfo);
985 }
986 else
987 {
988 /* We want both begin and end to be inclusive. */
989 btrace_call_next (&end, 1);
990 }
afedecd3 991
8710b709 992 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 993 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
994
995 do_cleanups (uiout_cleanup);
996}
997
998/* The to_call_history_from method of target record-btrace. */
999
1000static void
ec0aea04
TT
1001record_btrace_call_history_from (struct target_ops *self,
1002 ULONGEST from, int size, int flags)
afedecd3
MM
1003{
1004 ULONGEST begin, end, context;
1005
1006 context = abs (size);
0688d04e
MM
1007 if (context == 0)
1008 error (_("Bad record function-call-history-size."));
afedecd3
MM
1009
1010 if (size < 0)
1011 {
1012 end = from;
1013
1014 if (from < context)
1015 begin = 0;
1016 else
0688d04e 1017 begin = from - context + 1;
afedecd3
MM
1018 }
1019 else
1020 {
1021 begin = from;
0688d04e 1022 end = from + context - 1;
afedecd3
MM
1023
1024 /* Check for wrap-around. */
1025 if (end < begin)
1026 end = ULONGEST_MAX;
1027 }
1028
f0d960ea 1029 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1030}
1031
07bbe694
MM
1032/* The to_record_is_replaying method of target record-btrace. */
1033
1034static int
1c63c994 1035record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1036{
1037 struct thread_info *tp;
1038
034f788c 1039 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1040 if (btrace_is_replaying (tp))
1041 return 1;
1042
1043 return 0;
1044}
1045
633785ff
MM
1046/* The to_xfer_partial method of target record-btrace. */
1047
9b409511 1048static enum target_xfer_status
633785ff
MM
1049record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1050 const char *annex, gdb_byte *readbuf,
1051 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1052 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1053{
1054 struct target_ops *t;
1055
1056 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1057 if (replay_memory_access == replay_memory_access_read_only
aef92902 1058 && !record_btrace_generating_corefile
67b5c0c1 1059 && record_btrace_is_replaying (ops))
633785ff
MM
1060 {
1061 switch (object)
1062 {
1063 case TARGET_OBJECT_MEMORY:
1064 {
1065 struct target_section *section;
1066
1067 /* We do not allow writing memory in general. */
1068 if (writebuf != NULL)
9b409511
YQ
1069 {
1070 *xfered_len = len;
bc113b4e 1071 return TARGET_XFER_UNAVAILABLE;
9b409511 1072 }
633785ff
MM
1073
1074 /* We allow reading readonly memory. */
1075 section = target_section_by_addr (ops, offset);
1076 if (section != NULL)
1077 {
1078 /* Check if the section we found is readonly. */
1079 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1080 section->the_bfd_section)
1081 & SEC_READONLY) != 0)
1082 {
1083 /* Truncate the request to fit into this section. */
1084 len = min (len, section->endaddr - offset);
1085 break;
1086 }
1087 }
1088
9b409511 1089 *xfered_len = len;
bc113b4e 1090 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1091 }
1092 }
1093 }
1094
1095 /* Forward the request. */
e75fdfca
TT
1096 ops = ops->beneath;
1097 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1098 offset, len, xfered_len);
633785ff
MM
1099}
1100
1101/* The to_insert_breakpoint method of target record-btrace. */
1102
1103static int
1104record_btrace_insert_breakpoint (struct target_ops *ops,
1105 struct gdbarch *gdbarch,
1106 struct bp_target_info *bp_tgt)
1107{
1108 volatile struct gdb_exception except;
67b5c0c1
MM
1109 const char *old;
1110 int ret;
633785ff
MM
1111
1112 /* Inserting breakpoints requires accessing memory. Allow it for the
1113 duration of this function. */
67b5c0c1
MM
1114 old = replay_memory_access;
1115 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1116
1117 ret = 0;
1118 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 1119 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 1120
67b5c0c1 1121 replay_memory_access = old;
633785ff
MM
1122
1123 if (except.reason < 0)
1124 throw_exception (except);
1125
1126 return ret;
1127}
1128
1129/* The to_remove_breakpoint method of target record-btrace. */
1130
1131static int
1132record_btrace_remove_breakpoint (struct target_ops *ops,
1133 struct gdbarch *gdbarch,
1134 struct bp_target_info *bp_tgt)
1135{
1136 volatile struct gdb_exception except;
67b5c0c1
MM
1137 const char *old;
1138 int ret;
633785ff
MM
1139
1140 /* Removing breakpoints requires accessing memory. Allow it for the
1141 duration of this function. */
67b5c0c1
MM
1142 old = replay_memory_access;
1143 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1144
1145 ret = 0;
1146 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 1147 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 1148
67b5c0c1 1149 replay_memory_access = old;
633785ff
MM
1150
1151 if (except.reason < 0)
1152 throw_exception (except);
1153
1154 return ret;
1155}
1156
1f3ef581
MM
1157/* The to_fetch_registers method of target record-btrace. */
1158
1159static void
1160record_btrace_fetch_registers (struct target_ops *ops,
1161 struct regcache *regcache, int regno)
1162{
1163 struct btrace_insn_iterator *replay;
1164 struct thread_info *tp;
1165
1166 tp = find_thread_ptid (inferior_ptid);
1167 gdb_assert (tp != NULL);
1168
1169 replay = tp->btrace.replay;
aef92902 1170 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1171 {
1172 const struct btrace_insn *insn;
1173 struct gdbarch *gdbarch;
1174 int pcreg;
1175
1176 gdbarch = get_regcache_arch (regcache);
1177 pcreg = gdbarch_pc_regnum (gdbarch);
1178 if (pcreg < 0)
1179 return;
1180
1181 /* We can only provide the PC register. */
1182 if (regno >= 0 && regno != pcreg)
1183 return;
1184
1185 insn = btrace_insn_get (replay);
1186 gdb_assert (insn != NULL);
1187
1188 regcache_raw_supply (regcache, regno, &insn->pc);
1189 }
1190 else
1191 {
e75fdfca 1192 struct target_ops *t = ops->beneath;
1f3ef581 1193
e75fdfca 1194 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1195 }
1196}
1197
1198/* The to_store_registers method of target record-btrace. */
1199
1200static void
1201record_btrace_store_registers (struct target_ops *ops,
1202 struct regcache *regcache, int regno)
1203{
1204 struct target_ops *t;
1205
aef92902 1206 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1207 error (_("This record target does not allow writing registers."));
1208
1209 gdb_assert (may_write_registers != 0);
1210
e75fdfca
TT
1211 t = ops->beneath;
1212 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1213}
1214
1215/* The to_prepare_to_store method of target record-btrace. */
1216
1217static void
1218record_btrace_prepare_to_store (struct target_ops *ops,
1219 struct regcache *regcache)
1220{
1221 struct target_ops *t;
1222
aef92902 1223 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1224 return;
1225
e75fdfca
TT
1226 t = ops->beneath;
1227 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1228}
1229
0b722aec
MM
1230/* The branch trace frame cache. */
1231
1232struct btrace_frame_cache
1233{
1234 /* The thread. */
1235 struct thread_info *tp;
1236
1237 /* The frame info. */
1238 struct frame_info *frame;
1239
1240 /* The branch trace function segment. */
1241 const struct btrace_function *bfun;
1242};
1243
1244/* A struct btrace_frame_cache hash table indexed by NEXT. */
1245
1246static htab_t bfcache;
1247
1248/* hash_f for htab_create_alloc of bfcache. */
1249
1250static hashval_t
1251bfcache_hash (const void *arg)
1252{
1253 const struct btrace_frame_cache *cache = arg;
1254
1255 return htab_hash_pointer (cache->frame);
1256}
1257
1258/* eq_f for htab_create_alloc of bfcache. */
1259
1260static int
1261bfcache_eq (const void *arg1, const void *arg2)
1262{
1263 const struct btrace_frame_cache *cache1 = arg1;
1264 const struct btrace_frame_cache *cache2 = arg2;
1265
1266 return cache1->frame == cache2->frame;
1267}
1268
1269/* Create a new btrace frame cache. */
1270
1271static struct btrace_frame_cache *
1272bfcache_new (struct frame_info *frame)
1273{
1274 struct btrace_frame_cache *cache;
1275 void **slot;
1276
1277 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1278 cache->frame = frame;
1279
1280 slot = htab_find_slot (bfcache, cache, INSERT);
1281 gdb_assert (*slot == NULL);
1282 *slot = cache;
1283
1284 return cache;
1285}
1286
1287/* Extract the branch trace function from a branch trace frame. */
1288
1289static const struct btrace_function *
1290btrace_get_frame_function (struct frame_info *frame)
1291{
1292 const struct btrace_frame_cache *cache;
1293 const struct btrace_function *bfun;
1294 struct btrace_frame_cache pattern;
1295 void **slot;
1296
1297 pattern.frame = frame;
1298
1299 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1300 if (slot == NULL)
1301 return NULL;
1302
1303 cache = *slot;
1304 return cache->bfun;
1305}
1306
cecac1ab
MM
1307/* Implement stop_reason method for record_btrace_frame_unwind. */
1308
1309static enum unwind_stop_reason
1310record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1311 void **this_cache)
1312{
0b722aec
MM
1313 const struct btrace_frame_cache *cache;
1314 const struct btrace_function *bfun;
1315
1316 cache = *this_cache;
1317 bfun = cache->bfun;
1318 gdb_assert (bfun != NULL);
1319
1320 if (bfun->up == NULL)
1321 return UNWIND_UNAVAILABLE;
1322
1323 return UNWIND_NO_REASON;
cecac1ab
MM
1324}
1325
1326/* Implement this_id method for record_btrace_frame_unwind. */
1327
1328static void
1329record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1330 struct frame_id *this_id)
1331{
0b722aec
MM
1332 const struct btrace_frame_cache *cache;
1333 const struct btrace_function *bfun;
1334 CORE_ADDR code, special;
1335
1336 cache = *this_cache;
1337
1338 bfun = cache->bfun;
1339 gdb_assert (bfun != NULL);
1340
1341 while (bfun->segment.prev != NULL)
1342 bfun = bfun->segment.prev;
1343
1344 code = get_frame_func (this_frame);
1345 special = bfun->number;
1346
1347 *this_id = frame_id_build_unavailable_stack_special (code, special);
1348
1349 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1350 btrace_get_bfun_name (cache->bfun),
1351 core_addr_to_string_nz (this_id->code_addr),
1352 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1353}
1354
1355/* Implement prev_register method for record_btrace_frame_unwind. */
1356
1357static struct value *
1358record_btrace_frame_prev_register (struct frame_info *this_frame,
1359 void **this_cache,
1360 int regnum)
1361{
0b722aec
MM
1362 const struct btrace_frame_cache *cache;
1363 const struct btrace_function *bfun, *caller;
1364 const struct btrace_insn *insn;
1365 struct gdbarch *gdbarch;
1366 CORE_ADDR pc;
1367 int pcreg;
1368
1369 gdbarch = get_frame_arch (this_frame);
1370 pcreg = gdbarch_pc_regnum (gdbarch);
1371 if (pcreg < 0 || regnum != pcreg)
1372 throw_error (NOT_AVAILABLE_ERROR,
1373 _("Registers are not available in btrace record history"));
1374
1375 cache = *this_cache;
1376 bfun = cache->bfun;
1377 gdb_assert (bfun != NULL);
1378
1379 caller = bfun->up;
1380 if (caller == NULL)
1381 throw_error (NOT_AVAILABLE_ERROR,
1382 _("No caller in btrace record history"));
1383
1384 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1385 {
1386 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1387 pc = insn->pc;
1388 }
1389 else
1390 {
1391 insn = VEC_last (btrace_insn_s, caller->insn);
1392 pc = insn->pc;
1393
1394 pc += gdb_insn_length (gdbarch, pc);
1395 }
1396
1397 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1398 btrace_get_bfun_name (bfun), bfun->level,
1399 core_addr_to_string_nz (pc));
1400
1401 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1402}
1403
1404/* Implement sniffer method for record_btrace_frame_unwind. */
1405
1406static int
1407record_btrace_frame_sniffer (const struct frame_unwind *self,
1408 struct frame_info *this_frame,
1409 void **this_cache)
1410{
0b722aec
MM
1411 const struct btrace_function *bfun;
1412 struct btrace_frame_cache *cache;
cecac1ab 1413 struct thread_info *tp;
0b722aec 1414 struct frame_info *next;
cecac1ab
MM
1415
1416 /* THIS_FRAME does not contain a reference to its thread. */
1417 tp = find_thread_ptid (inferior_ptid);
1418 gdb_assert (tp != NULL);
1419
0b722aec
MM
1420 bfun = NULL;
1421 next = get_next_frame (this_frame);
1422 if (next == NULL)
1423 {
1424 const struct btrace_insn_iterator *replay;
1425
1426 replay = tp->btrace.replay;
1427 if (replay != NULL)
1428 bfun = replay->function;
1429 }
1430 else
1431 {
1432 const struct btrace_function *callee;
1433
1434 callee = btrace_get_frame_function (next);
1435 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1436 bfun = callee->up;
1437 }
1438
1439 if (bfun == NULL)
1440 return 0;
1441
1442 DEBUG ("[frame] sniffed frame for %s on level %d",
1443 btrace_get_bfun_name (bfun), bfun->level);
1444
1445 /* This is our frame. Initialize the frame cache. */
1446 cache = bfcache_new (this_frame);
1447 cache->tp = tp;
1448 cache->bfun = bfun;
1449
1450 *this_cache = cache;
1451 return 1;
1452}
1453
1454/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1455
1456static int
1457record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1458 struct frame_info *this_frame,
1459 void **this_cache)
1460{
1461 const struct btrace_function *bfun, *callee;
1462 struct btrace_frame_cache *cache;
1463 struct frame_info *next;
1464
1465 next = get_next_frame (this_frame);
1466 if (next == NULL)
1467 return 0;
1468
1469 callee = btrace_get_frame_function (next);
1470 if (callee == NULL)
1471 return 0;
1472
1473 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1474 return 0;
1475
1476 bfun = callee->up;
1477 if (bfun == NULL)
1478 return 0;
1479
1480 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1481 btrace_get_bfun_name (bfun), bfun->level);
1482
1483 /* This is our frame. Initialize the frame cache. */
1484 cache = bfcache_new (this_frame);
1485 cache->tp = find_thread_ptid (inferior_ptid);
1486 cache->bfun = bfun;
1487
1488 *this_cache = cache;
1489 return 1;
1490}
1491
1492static void
1493record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1494{
1495 struct btrace_frame_cache *cache;
1496 void **slot;
1497
1498 cache = this_cache;
1499
1500 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1501 gdb_assert (slot != NULL);
1502
1503 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1504}
1505
1506/* btrace recording does not store previous memory content, neither the stack
1507 frames content. Any unwinding would return errorneous results as the stack
1508 contents no longer matches the changed PC value restored from history.
1509 Therefore this unwinder reports any possibly unwound registers as
1510 <unavailable>. */
1511
0b722aec 1512const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1513{
1514 NORMAL_FRAME,
1515 record_btrace_frame_unwind_stop_reason,
1516 record_btrace_frame_this_id,
1517 record_btrace_frame_prev_register,
1518 NULL,
0b722aec
MM
1519 record_btrace_frame_sniffer,
1520 record_btrace_frame_dealloc_cache
1521};
1522
1523const struct frame_unwind record_btrace_tailcall_frame_unwind =
1524{
1525 TAILCALL_FRAME,
1526 record_btrace_frame_unwind_stop_reason,
1527 record_btrace_frame_this_id,
1528 record_btrace_frame_prev_register,
1529 NULL,
1530 record_btrace_tailcall_frame_sniffer,
1531 record_btrace_frame_dealloc_cache
cecac1ab 1532};
b2f4cfde 1533
ac01945b
TT
1534/* Implement the to_get_unwinder method. */
1535
1536static const struct frame_unwind *
1537record_btrace_to_get_unwinder (struct target_ops *self)
1538{
1539 return &record_btrace_frame_unwind;
1540}
1541
1542/* Implement the to_get_tailcall_unwinder method. */
1543
1544static const struct frame_unwind *
1545record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1546{
1547 return &record_btrace_tailcall_frame_unwind;
1548}
1549
52834460
MM
1550/* Indicate that TP should be resumed according to FLAG. */
1551
1552static void
1553record_btrace_resume_thread (struct thread_info *tp,
1554 enum btrace_thread_flag flag)
1555{
1556 struct btrace_thread_info *btinfo;
1557
1558 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1559
1560 btinfo = &tp->btrace;
1561
1562 if ((btinfo->flags & BTHR_MOVE) != 0)
1563 error (_("Thread already moving."));
1564
1565 /* Fetch the latest branch trace. */
1566 btrace_fetch (tp);
1567
1568 btinfo->flags |= flag;
1569}
1570
1571/* Find the thread to resume given a PTID. */
1572
1573static struct thread_info *
1574record_btrace_find_resume_thread (ptid_t ptid)
1575{
1576 struct thread_info *tp;
1577
1578 /* When asked to resume everything, we pick the current thread. */
1579 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1580 ptid = inferior_ptid;
1581
1582 return find_thread_ptid (ptid);
1583}
1584
1585/* Start replaying a thread. */
1586
1587static struct btrace_insn_iterator *
1588record_btrace_start_replaying (struct thread_info *tp)
1589{
1590 volatile struct gdb_exception except;
1591 struct btrace_insn_iterator *replay;
1592 struct btrace_thread_info *btinfo;
1593 int executing;
1594
1595 btinfo = &tp->btrace;
1596 replay = NULL;
1597
1598 /* We can't start replaying without trace. */
1599 if (btinfo->begin == NULL)
1600 return NULL;
1601
1602 /* Clear the executing flag to allow changes to the current frame.
1603 We are not actually running, yet. We just started a reverse execution
1604 command or a record goto command.
1605 For the latter, EXECUTING is false and this has no effect.
1606 For the former, EXECUTING is true and we're in to_wait, about to
1607 move the thread. Since we need to recompute the stack, we temporarily
1608 set EXECUTING to flase. */
1609 executing = is_executing (tp->ptid);
1610 set_executing (tp->ptid, 0);
1611
1612 /* GDB stores the current frame_id when stepping in order to detects steps
1613 into subroutines.
1614 Since frames are computed differently when we're replaying, we need to
1615 recompute those stored frames and fix them up so we can still detect
1616 subroutines after we started replaying. */
1617 TRY_CATCH (except, RETURN_MASK_ALL)
1618 {
1619 struct frame_info *frame;
1620 struct frame_id frame_id;
1621 int upd_step_frame_id, upd_step_stack_frame_id;
1622
1623 /* The current frame without replaying - computed via normal unwind. */
1624 frame = get_current_frame ();
1625 frame_id = get_frame_id (frame);
1626
1627 /* Check if we need to update any stepping-related frame id's. */
1628 upd_step_frame_id = frame_id_eq (frame_id,
1629 tp->control.step_frame_id);
1630 upd_step_stack_frame_id = frame_id_eq (frame_id,
1631 tp->control.step_stack_frame_id);
1632
1633 /* We start replaying at the end of the branch trace. This corresponds
1634 to the current instruction. */
1635 replay = xmalloc (sizeof (*replay));
1636 btrace_insn_end (replay, btinfo);
1637
31fd9caa
MM
1638 /* Skip gaps at the end of the trace. */
1639 while (btrace_insn_get (replay) == NULL)
1640 {
1641 unsigned int steps;
1642
1643 steps = btrace_insn_prev (replay, 1);
1644 if (steps == 0)
1645 error (_("No trace."));
1646 }
1647
52834460
MM
1648 /* We're not replaying, yet. */
1649 gdb_assert (btinfo->replay == NULL);
1650 btinfo->replay = replay;
1651
1652 /* Make sure we're not using any stale registers. */
1653 registers_changed_ptid (tp->ptid);
1654
1655 /* The current frame with replaying - computed via btrace unwind. */
1656 frame = get_current_frame ();
1657 frame_id = get_frame_id (frame);
1658
1659 /* Replace stepping related frames where necessary. */
1660 if (upd_step_frame_id)
1661 tp->control.step_frame_id = frame_id;
1662 if (upd_step_stack_frame_id)
1663 tp->control.step_stack_frame_id = frame_id;
1664 }
1665
1666 /* Restore the previous execution state. */
1667 set_executing (tp->ptid, executing);
1668
1669 if (except.reason < 0)
1670 {
1671 xfree (btinfo->replay);
1672 btinfo->replay = NULL;
1673
1674 registers_changed_ptid (tp->ptid);
1675
1676 throw_exception (except);
1677 }
1678
1679 return replay;
1680}
1681
1682/* Stop replaying a thread. */
1683
1684static void
1685record_btrace_stop_replaying (struct thread_info *tp)
1686{
1687 struct btrace_thread_info *btinfo;
1688
1689 btinfo = &tp->btrace;
1690
1691 xfree (btinfo->replay);
1692 btinfo->replay = NULL;
1693
1694 /* Make sure we're not leaving any stale registers. */
1695 registers_changed_ptid (tp->ptid);
1696}
1697
b2f4cfde
MM
1698/* The to_resume method of target record-btrace. */
1699
1700static void
1701record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1702 enum gdb_signal signal)
1703{
52834460
MM
1704 struct thread_info *tp, *other;
1705 enum btrace_thread_flag flag;
1706
1707 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1708
70ad5bff
MM
1709 /* Store the execution direction of the last resume. */
1710 record_btrace_resume_exec_dir = execution_direction;
1711
52834460
MM
1712 tp = record_btrace_find_resume_thread (ptid);
1713 if (tp == NULL)
1714 error (_("Cannot find thread to resume."));
1715
1716 /* Stop replaying other threads if the thread to resume is not replaying. */
1717 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1718 ALL_NON_EXITED_THREADS (other)
52834460
MM
1719 record_btrace_stop_replaying (other);
1720
b2f4cfde 1721 /* As long as we're not replaying, just forward the request. */
1c63c994 1722 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1723 {
e75fdfca
TT
1724 ops = ops->beneath;
1725 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1726 }
1727
52834460
MM
1728 /* Compute the btrace thread flag for the requested move. */
1729 if (step == 0)
1730 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1731 else
1732 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1733
1734 /* At the moment, we only move a single thread. We could also move
1735 all threads in parallel by single-stepping each resumed thread
1736 until the first runs into an event.
1737 When we do that, we would want to continue all other threads.
1738 For now, just resume one thread to not confuse to_wait. */
1739 record_btrace_resume_thread (tp, flag);
1740
1741 /* We just indicate the resume intent here. The actual stepping happens in
1742 record_btrace_wait below. */
70ad5bff
MM
1743
1744 /* Async support. */
1745 if (target_can_async_p ())
1746 {
1747 target_async (inferior_event_handler, 0);
1748 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1749 }
52834460
MM
1750}
1751
1752/* Find a thread to move. */
1753
1754static struct thread_info *
1755record_btrace_find_thread_to_move (ptid_t ptid)
1756{
1757 struct thread_info *tp;
1758
1759 /* First check the parameter thread. */
1760 tp = find_thread_ptid (ptid);
1761 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1762 return tp;
1763
1764 /* Otherwise, find one other thread that has been resumed. */
034f788c 1765 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1766 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1767 return tp;
1768
1769 return NULL;
1770}
1771
1772/* Return a target_waitstatus indicating that we ran out of history. */
1773
1774static struct target_waitstatus
1775btrace_step_no_history (void)
1776{
1777 struct target_waitstatus status;
1778
1779 status.kind = TARGET_WAITKIND_NO_HISTORY;
1780
1781 return status;
1782}
1783
1784/* Return a target_waitstatus indicating that a step finished. */
1785
1786static struct target_waitstatus
1787btrace_step_stopped (void)
1788{
1789 struct target_waitstatus status;
1790
1791 status.kind = TARGET_WAITKIND_STOPPED;
1792 status.value.sig = GDB_SIGNAL_TRAP;
1793
1794 return status;
1795}
1796
1797/* Clear the record histories. */
1798
1799static void
1800record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1801{
1802 xfree (btinfo->insn_history);
1803 xfree (btinfo->call_history);
1804
1805 btinfo->insn_history = NULL;
1806 btinfo->call_history = NULL;
1807}
1808
1809/* Step a single thread. */
1810
1811static struct target_waitstatus
1812record_btrace_step_thread (struct thread_info *tp)
1813{
1814 struct btrace_insn_iterator *replay, end;
1815 struct btrace_thread_info *btinfo;
1816 struct address_space *aspace;
1817 struct inferior *inf;
1818 enum btrace_thread_flag flags;
1819 unsigned int steps;
1820
e59fa00f
MM
1821 /* We can't step without an execution history. */
1822 if (btrace_is_empty (tp))
1823 return btrace_step_no_history ();
1824
52834460
MM
1825 btinfo = &tp->btrace;
1826 replay = btinfo->replay;
1827
1828 flags = btinfo->flags & BTHR_MOVE;
1829 btinfo->flags &= ~BTHR_MOVE;
1830
1831 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1832
1833 switch (flags)
1834 {
1835 default:
1836 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1837
1838 case BTHR_STEP:
1839 /* We're done if we're not replaying. */
1840 if (replay == NULL)
1841 return btrace_step_no_history ();
1842
31fd9caa
MM
1843 /* Skip gaps during replay. */
1844 do
1845 {
1846 steps = btrace_insn_next (replay, 1);
1847 if (steps == 0)
1848 {
1849 record_btrace_stop_replaying (tp);
1850 return btrace_step_no_history ();
1851 }
1852 }
1853 while (btrace_insn_get (replay) == NULL);
52834460
MM
1854
1855 /* Determine the end of the instruction trace. */
1856 btrace_insn_end (&end, btinfo);
1857
1858 /* We stop replaying if we reached the end of the trace. */
1859 if (btrace_insn_cmp (replay, &end) == 0)
1860 record_btrace_stop_replaying (tp);
1861
1862 return btrace_step_stopped ();
1863
1864 case BTHR_RSTEP:
1865 /* Start replaying if we're not already doing so. */
1866 if (replay == NULL)
1867 replay = record_btrace_start_replaying (tp);
1868
31fd9caa
MM
1869 /* If we can't step any further, we reached the end of the history.
1870 Skip gaps during replay. */
1871 do
1872 {
1873 steps = btrace_insn_prev (replay, 1);
1874 if (steps == 0)
1875 return btrace_step_no_history ();
1876
1877 }
1878 while (btrace_insn_get (replay) == NULL);
52834460
MM
1879
1880 return btrace_step_stopped ();
1881
1882 case BTHR_CONT:
1883 /* We're done if we're not replaying. */
1884 if (replay == NULL)
1885 return btrace_step_no_history ();
1886
c9657e70 1887 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1888 aspace = inf->aspace;
1889
1890 /* Determine the end of the instruction trace. */
1891 btrace_insn_end (&end, btinfo);
1892
1893 for (;;)
1894 {
1895 const struct btrace_insn *insn;
1896
31fd9caa
MM
1897 /* Skip gaps during replay. */
1898 do
1899 {
1900 steps = btrace_insn_next (replay, 1);
1901 if (steps == 0)
1902 {
1903 record_btrace_stop_replaying (tp);
1904 return btrace_step_no_history ();
1905 }
1906
1907 insn = btrace_insn_get (replay);
1908 }
1909 while (insn == NULL);
52834460
MM
1910
1911 /* We stop replaying if we reached the end of the trace. */
1912 if (btrace_insn_cmp (replay, &end) == 0)
1913 {
1914 record_btrace_stop_replaying (tp);
1915 return btrace_step_no_history ();
1916 }
1917
52834460
MM
1918 DEBUG ("stepping %d (%s) ... %s", tp->num,
1919 target_pid_to_str (tp->ptid),
1920 core_addr_to_string_nz (insn->pc));
1921
1922 if (breakpoint_here_p (aspace, insn->pc))
1923 return btrace_step_stopped ();
1924 }
1925
1926 case BTHR_RCONT:
1927 /* Start replaying if we're not already doing so. */
1928 if (replay == NULL)
1929 replay = record_btrace_start_replaying (tp);
1930
c9657e70 1931 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1932 aspace = inf->aspace;
1933
1934 for (;;)
1935 {
1936 const struct btrace_insn *insn;
1937
31fd9caa
MM
1938 /* If we can't step any further, we reached the end of the history.
1939 Skip gaps during replay. */
1940 do
1941 {
1942 steps = btrace_insn_prev (replay, 1);
1943 if (steps == 0)
1944 return btrace_step_no_history ();
52834460 1945
31fd9caa
MM
1946 insn = btrace_insn_get (replay);
1947 }
1948 while (insn == NULL);
52834460
MM
1949
1950 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1951 target_pid_to_str (tp->ptid),
1952 core_addr_to_string_nz (insn->pc));
1953
1954 if (breakpoint_here_p (aspace, insn->pc))
1955 return btrace_step_stopped ();
1956 }
1957 }
b2f4cfde
MM
1958}
1959
1960/* The to_wait method of target record-btrace. */
1961
1962static ptid_t
1963record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1964 struct target_waitstatus *status, int options)
1965{
52834460
MM
1966 struct thread_info *tp, *other;
1967
1968 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1969
b2f4cfde 1970 /* As long as we're not replaying, just forward the request. */
1c63c994 1971 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1972 {
e75fdfca
TT
1973 ops = ops->beneath;
1974 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
1975 }
1976
52834460
MM
1977 /* Let's find a thread to move. */
1978 tp = record_btrace_find_thread_to_move (ptid);
1979 if (tp == NULL)
1980 {
1981 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1982
1983 status->kind = TARGET_WAITKIND_IGNORE;
1984 return minus_one_ptid;
1985 }
1986
1987 /* We only move a single thread. We're not able to correlate threads. */
1988 *status = record_btrace_step_thread (tp);
1989
1990 /* Stop all other threads. */
1991 if (!non_stop)
034f788c 1992 ALL_NON_EXITED_THREADS (other)
52834460
MM
1993 other->btrace.flags &= ~BTHR_MOVE;
1994
1995 /* Start record histories anew from the current position. */
1996 record_btrace_clear_histories (&tp->btrace);
1997
1998 /* We moved the replay position but did not update registers. */
1999 registers_changed_ptid (tp->ptid);
2000
2001 return tp->ptid;
2002}
2003
2004/* The to_can_execute_reverse method of target record-btrace. */
2005
2006static int
19db3e69 2007record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2008{
2009 return 1;
2010}
2011
2012/* The to_decr_pc_after_break method of target record-btrace. */
2013
2014static CORE_ADDR
2015record_btrace_decr_pc_after_break (struct target_ops *ops,
2016 struct gdbarch *gdbarch)
2017{
2018 /* When replaying, we do not actually execute the breakpoint instruction
2019 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 2020 if (record_btrace_is_replaying (ops))
52834460
MM
2021 return 0;
2022
c0eca49f 2023 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
2024}
2025
e8032dde 2026/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2027
2028static void
e8032dde 2029record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2030{
e8032dde 2031 /* We don't add or remove threads during replay. */
1c63c994 2032 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2033 return;
2034
2035 /* Forward the request. */
e75fdfca 2036 ops = ops->beneath;
e8032dde 2037 ops->to_update_thread_list (ops);
e2887aa3
MM
2038}
2039
2040/* The to_thread_alive method of target record-btrace. */
2041
2042static int
2043record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2044{
2045 /* We don't add or remove threads during replay. */
1c63c994 2046 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2047 return find_thread_ptid (ptid) != NULL;
2048
2049 /* Forward the request. */
e75fdfca
TT
2050 ops = ops->beneath;
2051 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2052}
2053
066ce621
MM
2054/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2055 is stopped. */
2056
2057static void
2058record_btrace_set_replay (struct thread_info *tp,
2059 const struct btrace_insn_iterator *it)
2060{
2061 struct btrace_thread_info *btinfo;
2062
2063 btinfo = &tp->btrace;
2064
2065 if (it == NULL || it->function == NULL)
52834460 2066 record_btrace_stop_replaying (tp);
066ce621
MM
2067 else
2068 {
2069 if (btinfo->replay == NULL)
52834460 2070 record_btrace_start_replaying (tp);
066ce621
MM
2071 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2072 return;
2073
2074 *btinfo->replay = *it;
52834460 2075 registers_changed_ptid (tp->ptid);
066ce621
MM
2076 }
2077
52834460
MM
2078 /* Start anew from the new replay position. */
2079 record_btrace_clear_histories (btinfo);
066ce621
MM
2080}
2081
2082/* The to_goto_record_begin method of target record-btrace. */
2083
2084static void
08475817 2085record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2086{
2087 struct thread_info *tp;
2088 struct btrace_insn_iterator begin;
2089
2090 tp = require_btrace_thread ();
2091
2092 btrace_insn_begin (&begin, &tp->btrace);
2093 record_btrace_set_replay (tp, &begin);
2094
2095 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2096}
2097
2098/* The to_goto_record_end method of target record-btrace. */
2099
2100static void
307a1b91 2101record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2102{
2103 struct thread_info *tp;
2104
2105 tp = require_btrace_thread ();
2106
2107 record_btrace_set_replay (tp, NULL);
2108
2109 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2110}
2111
2112/* The to_goto_record method of target record-btrace. */
2113
2114static void
606183ac 2115record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2116{
2117 struct thread_info *tp;
2118 struct btrace_insn_iterator it;
2119 unsigned int number;
2120 int found;
2121
2122 number = insn;
2123
2124 /* Check for wrap-arounds. */
2125 if (number != insn)
2126 error (_("Instruction number out of range."));
2127
2128 tp = require_btrace_thread ();
2129
2130 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2131 if (found == 0)
2132 error (_("No such instruction."));
2133
2134 record_btrace_set_replay (tp, &it);
2135
2136 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2137}
2138
70ad5bff
MM
2139/* The to_execution_direction target method. */
2140
2141static enum exec_direction_kind
2142record_btrace_execution_direction (struct target_ops *self)
2143{
2144 return record_btrace_resume_exec_dir;
2145}
2146
aef92902
MM
2147/* The to_prepare_to_generate_core target method. */
2148
2149static void
2150record_btrace_prepare_to_generate_core (struct target_ops *self)
2151{
2152 record_btrace_generating_corefile = 1;
2153}
2154
2155/* The to_done_generating_core target method. */
2156
2157static void
2158record_btrace_done_generating_core (struct target_ops *self)
2159{
2160 record_btrace_generating_corefile = 0;
2161}
2162
afedecd3
MM
2163/* Initialize the record-btrace target ops. */
2164
2165static void
2166init_record_btrace_ops (void)
2167{
2168 struct target_ops *ops;
2169
2170 ops = &record_btrace_ops;
2171 ops->to_shortname = "record-btrace";
2172 ops->to_longname = "Branch tracing target";
2173 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2174 ops->to_open = record_btrace_open;
2175 ops->to_close = record_btrace_close;
b7d2e916 2176 ops->to_async = record_btrace_async;
afedecd3
MM
2177 ops->to_detach = record_detach;
2178 ops->to_disconnect = record_disconnect;
2179 ops->to_mourn_inferior = record_mourn_inferior;
2180 ops->to_kill = record_kill;
afedecd3
MM
2181 ops->to_stop_recording = record_btrace_stop_recording;
2182 ops->to_info_record = record_btrace_info;
2183 ops->to_insn_history = record_btrace_insn_history;
2184 ops->to_insn_history_from = record_btrace_insn_history_from;
2185 ops->to_insn_history_range = record_btrace_insn_history_range;
2186 ops->to_call_history = record_btrace_call_history;
2187 ops->to_call_history_from = record_btrace_call_history_from;
2188 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2189 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2190 ops->to_xfer_partial = record_btrace_xfer_partial;
2191 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2192 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2193 ops->to_fetch_registers = record_btrace_fetch_registers;
2194 ops->to_store_registers = record_btrace_store_registers;
2195 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2196 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2197 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2198 ops->to_resume = record_btrace_resume;
2199 ops->to_wait = record_btrace_wait;
e8032dde 2200 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2201 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2202 ops->to_goto_record_begin = record_btrace_goto_begin;
2203 ops->to_goto_record_end = record_btrace_goto_end;
2204 ops->to_goto_record = record_btrace_goto;
52834460
MM
2205 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2206 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 2207 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2208 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2209 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2210 ops->to_stratum = record_stratum;
2211 ops->to_magic = OPS_MAGIC;
2212}
2213
f4abbc16
MM
2214/* Start recording in BTS format. */
2215
2216static void
2217cmd_record_btrace_bts_start (char *args, int from_tty)
2218{
2219 volatile struct gdb_exception exception;
2220
2221 if (args != NULL && *args != 0)
2222 error (_("Invalid argument."));
2223
2224 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2225
2226 TRY_CATCH (exception, RETURN_MASK_ALL)
2227 execute_command ("target record-btrace", from_tty);
2228
2229 if (exception.error != 0)
2230 {
2231 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2232 throw_exception (exception);
2233 }
2234}
2235
afedecd3
MM
2236/* Alias for "target record". */
2237
2238static void
2239cmd_record_btrace_start (char *args, int from_tty)
2240{
f4abbc16
MM
2241 volatile struct gdb_exception exception;
2242
afedecd3
MM
2243 if (args != NULL && *args != 0)
2244 error (_("Invalid argument."));
2245
f4abbc16
MM
2246 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2247
2248 TRY_CATCH (exception, RETURN_MASK_ALL)
2249 execute_command ("target record-btrace", from_tty);
2250
2251 if (exception.error == 0)
2252 return;
2253
2254 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2255 throw_exception (exception);
afedecd3
MM
2256}
2257
67b5c0c1
MM
2258/* The "set record btrace" command. */
2259
2260static void
2261cmd_set_record_btrace (char *args, int from_tty)
2262{
2263 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2264}
2265
2266/* The "show record btrace" command. */
2267
2268static void
2269cmd_show_record_btrace (char *args, int from_tty)
2270{
2271 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2272}
2273
2274/* The "show record btrace replay-memory-access" command. */
2275
2276static void
2277cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2278 struct cmd_list_element *c, const char *value)
2279{
2280 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2281 replay_memory_access);
2282}
2283
d33501a5
MM
2284/* The "set record btrace bts" command. */
2285
2286static void
2287cmd_set_record_btrace_bts (char *args, int from_tty)
2288{
2289 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2290 "by an apporpriate subcommand.\n"));
2291 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2292 all_commands, gdb_stdout);
2293}
2294
2295/* The "show record btrace bts" command. */
2296
2297static void
2298cmd_show_record_btrace_bts (char *args, int from_tty)
2299{
2300 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2301}
2302
afedecd3
MM
2303void _initialize_record_btrace (void);
2304
2305/* Initialize btrace commands. */
2306
2307void
2308_initialize_record_btrace (void)
2309{
f4abbc16
MM
2310 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2311 _("Start branch trace recording."), &record_btrace_cmdlist,
2312 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2313 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2314
f4abbc16
MM
2315 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2316 _("\
2317Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2318The processor stores a from/to record for each branch into a cyclic buffer.\n\
2319This format may not be available on all processors."),
2320 &record_btrace_cmdlist);
2321 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2322
67b5c0c1
MM
2323 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2324 _("Set record options"), &set_record_btrace_cmdlist,
2325 "set record btrace ", 0, &set_record_cmdlist);
2326
2327 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2328 _("Show record options"), &show_record_btrace_cmdlist,
2329 "show record btrace ", 0, &show_record_cmdlist);
2330
2331 add_setshow_enum_cmd ("replay-memory-access", no_class,
2332 replay_memory_access_types, &replay_memory_access, _("\
2333Set what memory accesses are allowed during replay."), _("\
2334Show what memory accesses are allowed during replay."),
2335 _("Default is READ-ONLY.\n\n\
2336The btrace record target does not trace data.\n\
2337The memory therefore corresponds to the live target and not \
2338to the current replay position.\n\n\
2339When READ-ONLY, allow accesses to read-only memory during replay.\n\
2340When READ-WRITE, allow accesses to read-only and read-write memory during \
2341replay."),
2342 NULL, cmd_show_replay_memory_access,
2343 &set_record_btrace_cmdlist,
2344 &show_record_btrace_cmdlist);
2345
d33501a5
MM
2346 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2347 _("Set record btrace bts options"),
2348 &set_record_btrace_bts_cmdlist,
2349 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2350
2351 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2352 _("Show record btrace bts options"),
2353 &show_record_btrace_bts_cmdlist,
2354 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2355
2356 add_setshow_uinteger_cmd ("buffer-size", no_class,
2357 &record_btrace_conf.bts.size,
2358 _("Set the record/replay bts buffer size."),
2359 _("Show the record/replay bts buffer size."), _("\
2360When starting recording request a trace buffer of this size. \
2361The actual buffer size may differ from the requested size. \
2362Use \"info record\" to see the actual buffer size.\n\n\
2363Bigger buffers allow longer recording but also take more time to process \
2364the recorded execution trace.\n\n\
2365The trace buffer size may not be changed while recording."), NULL, NULL,
2366 &set_record_btrace_bts_cmdlist,
2367 &show_record_btrace_bts_cmdlist);
2368
afedecd3
MM
2369 init_record_btrace_ops ();
2370 add_target (&record_btrace_ops);
0b722aec
MM
2371
2372 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2373 xcalloc, xfree);
d33501a5
MM
2374
2375 record_btrace_conf.bts.size = 64 * 1024;
afedecd3 2376}
This page took 0.334688 seconds and 4 git commands to generate.