[ppc64le] Use skip_entrypoint for skip_trampoline_code
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
d33501a5
MM
79/* Command lists for "set/show record btrace bts". */
80static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
b20a6524
MM
83/* Command lists for "set/show record btrace pt". */
84static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85static struct cmd_list_element *show_record_btrace_pt_cmdlist;
86
afedecd3
MM
87/* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90#define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100/* Update the branch trace for the current thread and return a pointer to its
066ce621 101 thread_info.
afedecd3
MM
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
066ce621
MM
106static struct thread_info *
107require_btrace_thread (void)
afedecd3
MM
108{
109 struct thread_info *tp;
afedecd3
MM
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
6e07b1d2 119 if (btrace_is_empty (tp))
afedecd3
MM
120 error (_("No trace."));
121
066ce621
MM
122 return tp;
123}
124
125/* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131static struct btrace_thread_info *
132require_btrace (void)
133{
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
afedecd3
MM
139}
140
141/* Enable branch tracing for one thread. Warn on errors. */
142
143static void
144record_btrace_enable_warn (struct thread_info *tp)
145{
492d29ea
PA
146 TRY
147 {
148 btrace_enable (tp, &record_btrace_conf);
149 }
150 CATCH (error, RETURN_MASK_ERROR)
151 {
152 warning ("%s", error.message);
153 }
154 END_CATCH
afedecd3
MM
155}
156
157/* Callback function to disable branch tracing for one thread. */
158
159static void
160record_btrace_disable_callback (void *arg)
161{
162 struct thread_info *tp;
163
164 tp = arg;
165
166 btrace_disable (tp);
167}
168
169/* Enable automatic tracing of new threads. */
170
171static void
172record_btrace_auto_enable (void)
173{
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178}
179
180/* Disable automatic tracing of new threads. */
181
182static void
183record_btrace_auto_disable (void)
184{
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193}
194
70ad5bff
MM
195/* The record-btrace async event handler function. */
196
197static void
198record_btrace_handle_async_inferior_event (gdb_client_data data)
199{
200 inferior_event_handler (INF_REG_EVENT, NULL);
201}
202
afedecd3
MM
203/* The to_open method of target record-btrace. */
204
205static void
014f9477 206record_btrace_open (const char *args, int from_tty)
afedecd3
MM
207{
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
210
211 DEBUG ("open");
212
8213266a 213 record_preopen ();
afedecd3
MM
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
52834460
MM
218 if (non_stop)
219 error (_("Record btrace can't debug inferior in non-stop mode."));
220
afedecd3
MM
221 gdb_assert (record_btrace_thread_observer == NULL);
222
223 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 224 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
226 {
f4abbc16 227 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
228
229 make_cleanup (record_btrace_disable_callback, tp);
230 }
231
232 record_btrace_auto_enable ();
233
234 push_target (&record_btrace_ops);
235
70ad5bff
MM
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
238 NULL);
aef92902 239 record_btrace_generating_corefile = 0;
70ad5bff 240
afedecd3
MM
241 observer_notify_record_changed (current_inferior (), 1);
242
243 discard_cleanups (disable_chain);
244}
245
246/* The to_stop_recording method of target record-btrace. */
247
248static void
c6cd7c02 249record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
250{
251 struct thread_info *tp;
252
253 DEBUG ("stop recording");
254
255 record_btrace_auto_disable ();
256
034f788c 257 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
258 if (tp->btrace.target != NULL)
259 btrace_disable (tp);
260}
261
262/* The to_close method of target record-btrace. */
263
264static void
de90e03d 265record_btrace_close (struct target_ops *self)
afedecd3 266{
568e808b
MM
267 struct thread_info *tp;
268
70ad5bff
MM
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
271
99c819ee
MM
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
275
568e808b
MM
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
034f788c 278 ALL_NON_EXITED_THREADS (tp)
568e808b 279 btrace_teardown (tp);
afedecd3
MM
280}
281
b7d2e916
PA
282/* The to_async method of target record-btrace. */
283
284static void
6a3753b3 285record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 286{
6a3753b3 287 if (enable)
b7d2e916
PA
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
289 else
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
291
6a3753b3 292 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
293}
294
d33501a5
MM
295/* Adjusts the size and returns a human readable size suffix. */
296
297static const char *
298record_btrace_adjust_size (unsigned int *size)
299{
300 unsigned int sz;
301
302 sz = *size;
303
304 if ((sz & ((1u << 30) - 1)) == 0)
305 {
306 *size = sz >> 30;
307 return "GB";
308 }
309 else if ((sz & ((1u << 20) - 1)) == 0)
310 {
311 *size = sz >> 20;
312 return "MB";
313 }
314 else if ((sz & ((1u << 10) - 1)) == 0)
315 {
316 *size = sz >> 10;
317 return "kB";
318 }
319 else
320 return "";
321}
322
323/* Print a BTS configuration. */
324
325static void
326record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
327{
328 const char *suffix;
329 unsigned int size;
330
331 size = conf->size;
332 if (size > 0)
333 {
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
336 }
337}
338
b20a6524
MM
339/* Print an Intel(R) Processor Trace configuration. */
340
341static void
342record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
343{
344 const char *suffix;
345 unsigned int size;
346
347 size = conf->size;
348 if (size > 0)
349 {
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
352 }
353}
354
d33501a5
MM
355/* Print a branch tracing configuration. */
356
357static void
358record_btrace_print_conf (const struct btrace_config *conf)
359{
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
362
363 switch (conf->format)
364 {
365 case BTRACE_FORMAT_NONE:
366 return;
367
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
370 return;
b20a6524
MM
371
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
374 return;
d33501a5
MM
375 }
376
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
378}
379
afedecd3
MM
380/* The to_info_record method of target record-btrace. */
381
382static void
630d6a4a 383record_btrace_info (struct target_ops *self)
afedecd3
MM
384{
385 struct btrace_thread_info *btinfo;
f4abbc16 386 const struct btrace_config *conf;
afedecd3 387 struct thread_info *tp;
31fd9caa 388 unsigned int insns, calls, gaps;
afedecd3
MM
389
390 DEBUG ("info");
391
392 tp = find_thread_ptid (inferior_ptid);
393 if (tp == NULL)
394 error (_("No thread."));
395
f4abbc16
MM
396 btinfo = &tp->btrace;
397
398 conf = btrace_conf (btinfo);
399 if (conf != NULL)
d33501a5 400 record_btrace_print_conf (conf);
f4abbc16 401
afedecd3
MM
402 btrace_fetch (tp);
403
23a7fe75
MM
404 insns = 0;
405 calls = 0;
31fd9caa 406 gaps = 0;
23a7fe75 407
6e07b1d2 408 if (!btrace_is_empty (tp))
23a7fe75
MM
409 {
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
412
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
5de9129b 415 calls = btrace_call_number (&call);
23a7fe75
MM
416
417 btrace_insn_end (&insn, btinfo);
31fd9caa 418
5de9129b 419 insns = btrace_insn_number (&insn);
31fd9caa
MM
420 if (insns != 0)
421 {
422 /* The last instruction does not really belong to the trace. */
423 insns -= 1;
424 }
425 else
426 {
427 unsigned int steps;
428
429 /* Skip gaps at the end. */
430 do
431 {
432 steps = btrace_insn_prev (&insn, 1);
433 if (steps == 0)
434 break;
435
436 insns = btrace_insn_number (&insn);
437 }
438 while (insns == 0);
439 }
440
441 gaps = btinfo->ngaps;
23a7fe75 442 }
afedecd3 443
31fd9caa
MM
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
447
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
afedecd3
MM
451}
452
31fd9caa
MM
453/* Print a decode error. */
454
455static void
456btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
458{
459 const char *errstr;
460 int is_error;
461
462 errstr = _("unknown");
463 is_error = 1;
464
465 switch (format)
466 {
467 default:
468 break;
469
470 case BTRACE_FORMAT_BTS:
471 switch (errcode)
472 {
473 default:
474 break;
475
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
478 break;
479
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
482 break;
483 }
484 break;
b20a6524
MM
485
486#if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
488 switch (errcode)
489 {
490 case BDE_PT_USER_QUIT:
491 is_error = 0;
492 errstr = _("trace decode cancelled");
493 break;
494
495 case BDE_PT_DISABLED:
496 is_error = 0;
497 errstr = _("disabled");
498 break;
499
500 case BDE_PT_OVERFLOW:
501 is_error = 0;
502 errstr = _("overflow");
503 break;
504
505 default:
506 if (errcode < 0)
507 errstr = pt_errstr (pt_errcode (errcode));
508 break;
509 }
510 break;
511#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
512 }
513
514 ui_out_text (uiout, _("["));
515 if (is_error)
516 {
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
520 }
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
523}
524
afedecd3
MM
525/* Print an unsigned int. */
526
527static void
528ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
529{
530 ui_out_field_fmt (uiout, fld, "%u", val);
531}
532
533/* Disassemble a section of the recorded instruction trace. */
534
535static void
23a7fe75 536btrace_insn_history (struct ui_out *uiout,
31fd9caa 537 const struct btrace_thread_info *btinfo,
23a7fe75
MM
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
540{
541 struct gdbarch *gdbarch;
23a7fe75 542 struct btrace_insn_iterator it;
afedecd3 543
23a7fe75
MM
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
afedecd3
MM
546
547 gdbarch = target_gdbarch ();
548
23a7fe75 549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 550 {
23a7fe75
MM
551 const struct btrace_insn *insn;
552
553 insn = btrace_insn_get (&it);
554
31fd9caa
MM
555 /* A NULL instruction indicates a gap in the trace. */
556 if (insn == NULL)
557 {
558 const struct btrace_config *conf;
559
560 conf = btrace_conf (btinfo);
afedecd3 561
31fd9caa
MM
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
564
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
566 conf->format);
567 }
568 else
569 {
da8c46d2
MM
570 char prefix[4];
571
572 /* We may add a speculation prefix later. We use the same space
573 that is used for the pc prefix. */
574 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
575 strncpy (prefix, pc_prefix (insn->pc), 3);
576 else
577 {
578 prefix[0] = ' ';
579 prefix[1] = ' ';
580 prefix[2] = ' ';
581 }
582 prefix[3] = 0;
583
31fd9caa
MM
584 /* Print the instruction index. */
585 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
586 ui_out_text (uiout, "\t");
587
da8c46d2
MM
588 /* Indicate speculative execution by a leading '?'. */
589 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
590 prefix[0] = '?';
591
592 /* Print the prefix; we tell gdb_disassembly below to omit it. */
593 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
594
31fd9caa
MM
595 /* Disassembly with '/m' flag may not produce the expected result.
596 See PR gdb/11833. */
da8c46d2
MM
597 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
598 1, insn->pc, insn->pc + 1);
31fd9caa 599 }
afedecd3
MM
600 }
601}
602
603/* The to_insn_history method of target record-btrace. */
604
605static void
7a6c5609 606record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
607{
608 struct btrace_thread_info *btinfo;
23a7fe75
MM
609 struct btrace_insn_history *history;
610 struct btrace_insn_iterator begin, end;
afedecd3
MM
611 struct cleanup *uiout_cleanup;
612 struct ui_out *uiout;
23a7fe75 613 unsigned int context, covered;
afedecd3
MM
614
615 uiout = current_uiout;
616 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
617 "insn history");
afedecd3 618 context = abs (size);
afedecd3
MM
619 if (context == 0)
620 error (_("Bad record instruction-history-size."));
621
23a7fe75
MM
622 btinfo = require_btrace ();
623 history = btinfo->insn_history;
624 if (history == NULL)
afedecd3 625 {
07bbe694 626 struct btrace_insn_iterator *replay;
afedecd3 627
23a7fe75 628 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 629
07bbe694
MM
630 /* If we're replaying, we start at the replay position. Otherwise, we
631 start at the tail of the trace. */
632 replay = btinfo->replay;
633 if (replay != NULL)
634 begin = *replay;
635 else
636 btrace_insn_end (&begin, btinfo);
637
638 /* We start from here and expand in the requested direction. Then we
639 expand in the other direction, as well, to fill up any remaining
640 context. */
641 end = begin;
642 if (size < 0)
643 {
644 /* We want the current position covered, as well. */
645 covered = btrace_insn_next (&end, 1);
646 covered += btrace_insn_prev (&begin, context - covered);
647 covered += btrace_insn_next (&end, context - covered);
648 }
649 else
650 {
651 covered = btrace_insn_next (&end, context);
652 covered += btrace_insn_prev (&begin, context - covered);
653 }
afedecd3
MM
654 }
655 else
656 {
23a7fe75
MM
657 begin = history->begin;
658 end = history->end;
afedecd3 659
23a7fe75
MM
660 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
661 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 662
23a7fe75
MM
663 if (size < 0)
664 {
665 end = begin;
666 covered = btrace_insn_prev (&begin, context);
667 }
668 else
669 {
670 begin = end;
671 covered = btrace_insn_next (&end, context);
672 }
afedecd3
MM
673 }
674
23a7fe75 675 if (covered > 0)
31fd9caa 676 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
677 else
678 {
679 if (size < 0)
680 printf_unfiltered (_("At the start of the branch trace record.\n"));
681 else
682 printf_unfiltered (_("At the end of the branch trace record.\n"));
683 }
afedecd3 684
23a7fe75 685 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
686 do_cleanups (uiout_cleanup);
687}
688
689/* The to_insn_history_range method of target record-btrace. */
690
691static void
4e99c6b7
TT
692record_btrace_insn_history_range (struct target_ops *self,
693 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
694{
695 struct btrace_thread_info *btinfo;
23a7fe75
MM
696 struct btrace_insn_history *history;
697 struct btrace_insn_iterator begin, end;
afedecd3
MM
698 struct cleanup *uiout_cleanup;
699 struct ui_out *uiout;
23a7fe75
MM
700 unsigned int low, high;
701 int found;
afedecd3
MM
702
703 uiout = current_uiout;
704 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
705 "insn history");
23a7fe75
MM
706 low = from;
707 high = to;
afedecd3 708
23a7fe75 709 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
710
711 /* Check for wrap-arounds. */
23a7fe75 712 if (low != from || high != to)
afedecd3
MM
713 error (_("Bad range."));
714
0688d04e 715 if (high < low)
afedecd3
MM
716 error (_("Bad range."));
717
23a7fe75 718 btinfo = require_btrace ();
afedecd3 719
23a7fe75
MM
720 found = btrace_find_insn_by_number (&begin, btinfo, low);
721 if (found == 0)
722 error (_("Range out of bounds."));
afedecd3 723
23a7fe75
MM
724 found = btrace_find_insn_by_number (&end, btinfo, high);
725 if (found == 0)
0688d04e
MM
726 {
727 /* Silently truncate the range. */
728 btrace_insn_end (&end, btinfo);
729 }
730 else
731 {
732 /* We want both begin and end to be inclusive. */
733 btrace_insn_next (&end, 1);
734 }
afedecd3 735
31fd9caa 736 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 737 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
738
739 do_cleanups (uiout_cleanup);
740}
741
742/* The to_insn_history_from method of target record-btrace. */
743
744static void
9abc3ff3
TT
745record_btrace_insn_history_from (struct target_ops *self,
746 ULONGEST from, int size, int flags)
afedecd3
MM
747{
748 ULONGEST begin, end, context;
749
750 context = abs (size);
0688d04e
MM
751 if (context == 0)
752 error (_("Bad record instruction-history-size."));
afedecd3
MM
753
754 if (size < 0)
755 {
756 end = from;
757
758 if (from < context)
759 begin = 0;
760 else
0688d04e 761 begin = from - context + 1;
afedecd3
MM
762 }
763 else
764 {
765 begin = from;
0688d04e 766 end = from + context - 1;
afedecd3
MM
767
768 /* Check for wrap-around. */
769 if (end < begin)
770 end = ULONGEST_MAX;
771 }
772
4e99c6b7 773 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
774}
775
776/* Print the instruction number range for a function call history line. */
777
778static void
23a7fe75
MM
779btrace_call_history_insn_range (struct ui_out *uiout,
780 const struct btrace_function *bfun)
afedecd3 781{
7acbe133
MM
782 unsigned int begin, end, size;
783
784 size = VEC_length (btrace_insn_s, bfun->insn);
785 gdb_assert (size > 0);
afedecd3 786
23a7fe75 787 begin = bfun->insn_offset;
7acbe133 788 end = begin + size - 1;
afedecd3 789
23a7fe75 790 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 791 ui_out_text (uiout, ",");
23a7fe75 792 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
793}
794
ce0dfbea
MM
795/* Compute the lowest and highest source line for the instructions in BFUN
796 and return them in PBEGIN and PEND.
797 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
798 result from inlining or macro expansion. */
799
800static void
801btrace_compute_src_line_range (const struct btrace_function *bfun,
802 int *pbegin, int *pend)
803{
804 struct btrace_insn *insn;
805 struct symtab *symtab;
806 struct symbol *sym;
807 unsigned int idx;
808 int begin, end;
809
810 begin = INT_MAX;
811 end = INT_MIN;
812
813 sym = bfun->sym;
814 if (sym == NULL)
815 goto out;
816
817 symtab = symbol_symtab (sym);
818
819 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
820 {
821 struct symtab_and_line sal;
822
823 sal = find_pc_line (insn->pc, 0);
824 if (sal.symtab != symtab || sal.line == 0)
825 continue;
826
827 begin = min (begin, sal.line);
828 end = max (end, sal.line);
829 }
830
831 out:
832 *pbegin = begin;
833 *pend = end;
834}
835
afedecd3
MM
836/* Print the source line information for a function call history line. */
837
838static void
23a7fe75
MM
839btrace_call_history_src_line (struct ui_out *uiout,
840 const struct btrace_function *bfun)
afedecd3
MM
841{
842 struct symbol *sym;
23a7fe75 843 int begin, end;
afedecd3
MM
844
845 sym = bfun->sym;
846 if (sym == NULL)
847 return;
848
849 ui_out_field_string (uiout, "file",
08be3fe3 850 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 851
ce0dfbea 852 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 853 if (end < begin)
afedecd3
MM
854 return;
855
856 ui_out_text (uiout, ":");
23a7fe75 857 ui_out_field_int (uiout, "min line", begin);
afedecd3 858
23a7fe75 859 if (end == begin)
afedecd3
MM
860 return;
861
8710b709 862 ui_out_text (uiout, ",");
23a7fe75 863 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
864}
865
0b722aec
MM
866/* Get the name of a branch trace function. */
867
868static const char *
869btrace_get_bfun_name (const struct btrace_function *bfun)
870{
871 struct minimal_symbol *msym;
872 struct symbol *sym;
873
874 if (bfun == NULL)
875 return "??";
876
877 msym = bfun->msym;
878 sym = bfun->sym;
879
880 if (sym != NULL)
881 return SYMBOL_PRINT_NAME (sym);
882 else if (msym != NULL)
efd66ac6 883 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
884 else
885 return "??";
886}
887
afedecd3
MM
888/* Disassemble a section of the recorded function trace. */
889
890static void
23a7fe75 891btrace_call_history (struct ui_out *uiout,
8710b709 892 const struct btrace_thread_info *btinfo,
23a7fe75
MM
893 const struct btrace_call_iterator *begin,
894 const struct btrace_call_iterator *end,
afedecd3
MM
895 enum record_print_flag flags)
896{
23a7fe75 897 struct btrace_call_iterator it;
afedecd3 898
23a7fe75
MM
899 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
900 btrace_call_number (end));
afedecd3 901
23a7fe75 902 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 903 {
23a7fe75
MM
904 const struct btrace_function *bfun;
905 struct minimal_symbol *msym;
906 struct symbol *sym;
907
908 bfun = btrace_call_get (&it);
23a7fe75 909 sym = bfun->sym;
0b722aec 910 msym = bfun->msym;
23a7fe75 911
afedecd3 912 /* Print the function index. */
23a7fe75 913 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
914 ui_out_text (uiout, "\t");
915
31fd9caa
MM
916 /* Indicate gaps in the trace. */
917 if (bfun->errcode != 0)
918 {
919 const struct btrace_config *conf;
920
921 conf = btrace_conf (btinfo);
922
923 /* We have trace so we must have a configuration. */
924 gdb_assert (conf != NULL);
925
926 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
927
928 continue;
929 }
930
8710b709
MM
931 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
932 {
933 int level = bfun->level + btinfo->level, i;
934
935 for (i = 0; i < level; ++i)
936 ui_out_text (uiout, " ");
937 }
938
939 if (sym != NULL)
940 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
941 else if (msym != NULL)
efd66ac6 942 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
943 else if (!ui_out_is_mi_like_p (uiout))
944 ui_out_field_string (uiout, "function", "??");
945
1e038f67 946 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 947 {
8710b709 948 ui_out_text (uiout, _("\tinst "));
23a7fe75 949 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
950 }
951
1e038f67 952 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 953 {
8710b709 954 ui_out_text (uiout, _("\tat "));
23a7fe75 955 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
956 }
957
afedecd3
MM
958 ui_out_text (uiout, "\n");
959 }
960}
961
962/* The to_call_history method of target record-btrace. */
963
964static void
5df2fcba 965record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
966{
967 struct btrace_thread_info *btinfo;
23a7fe75
MM
968 struct btrace_call_history *history;
969 struct btrace_call_iterator begin, end;
afedecd3
MM
970 struct cleanup *uiout_cleanup;
971 struct ui_out *uiout;
23a7fe75 972 unsigned int context, covered;
afedecd3
MM
973
974 uiout = current_uiout;
975 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
976 "insn history");
afedecd3 977 context = abs (size);
afedecd3
MM
978 if (context == 0)
979 error (_("Bad record function-call-history-size."));
980
23a7fe75
MM
981 btinfo = require_btrace ();
982 history = btinfo->call_history;
983 if (history == NULL)
afedecd3 984 {
07bbe694 985 struct btrace_insn_iterator *replay;
afedecd3 986
23a7fe75 987 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 988
07bbe694
MM
989 /* If we're replaying, we start at the replay position. Otherwise, we
990 start at the tail of the trace. */
991 replay = btinfo->replay;
992 if (replay != NULL)
993 {
994 begin.function = replay->function;
995 begin.btinfo = btinfo;
996 }
997 else
998 btrace_call_end (&begin, btinfo);
999
1000 /* We start from here and expand in the requested direction. Then we
1001 expand in the other direction, as well, to fill up any remaining
1002 context. */
1003 end = begin;
1004 if (size < 0)
1005 {
1006 /* We want the current position covered, as well. */
1007 covered = btrace_call_next (&end, 1);
1008 covered += btrace_call_prev (&begin, context - covered);
1009 covered += btrace_call_next (&end, context - covered);
1010 }
1011 else
1012 {
1013 covered = btrace_call_next (&end, context);
1014 covered += btrace_call_prev (&begin, context- covered);
1015 }
afedecd3
MM
1016 }
1017 else
1018 {
23a7fe75
MM
1019 begin = history->begin;
1020 end = history->end;
afedecd3 1021
23a7fe75
MM
1022 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1023 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1024
23a7fe75
MM
1025 if (size < 0)
1026 {
1027 end = begin;
1028 covered = btrace_call_prev (&begin, context);
1029 }
1030 else
1031 {
1032 begin = end;
1033 covered = btrace_call_next (&end, context);
1034 }
afedecd3
MM
1035 }
1036
23a7fe75 1037 if (covered > 0)
8710b709 1038 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1039 else
1040 {
1041 if (size < 0)
1042 printf_unfiltered (_("At the start of the branch trace record.\n"));
1043 else
1044 printf_unfiltered (_("At the end of the branch trace record.\n"));
1045 }
afedecd3 1046
23a7fe75 1047 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1048 do_cleanups (uiout_cleanup);
1049}
1050
1051/* The to_call_history_range method of target record-btrace. */
1052
1053static void
f0d960ea
TT
1054record_btrace_call_history_range (struct target_ops *self,
1055 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1056{
1057 struct btrace_thread_info *btinfo;
23a7fe75
MM
1058 struct btrace_call_history *history;
1059 struct btrace_call_iterator begin, end;
afedecd3
MM
1060 struct cleanup *uiout_cleanup;
1061 struct ui_out *uiout;
23a7fe75
MM
1062 unsigned int low, high;
1063 int found;
afedecd3
MM
1064
1065 uiout = current_uiout;
1066 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1067 "func history");
23a7fe75
MM
1068 low = from;
1069 high = to;
afedecd3 1070
23a7fe75 1071 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1072
1073 /* Check for wrap-arounds. */
23a7fe75 1074 if (low != from || high != to)
afedecd3
MM
1075 error (_("Bad range."));
1076
0688d04e 1077 if (high < low)
afedecd3
MM
1078 error (_("Bad range."));
1079
23a7fe75 1080 btinfo = require_btrace ();
afedecd3 1081
23a7fe75
MM
1082 found = btrace_find_call_by_number (&begin, btinfo, low);
1083 if (found == 0)
1084 error (_("Range out of bounds."));
afedecd3 1085
23a7fe75
MM
1086 found = btrace_find_call_by_number (&end, btinfo, high);
1087 if (found == 0)
0688d04e
MM
1088 {
1089 /* Silently truncate the range. */
1090 btrace_call_end (&end, btinfo);
1091 }
1092 else
1093 {
1094 /* We want both begin and end to be inclusive. */
1095 btrace_call_next (&end, 1);
1096 }
afedecd3 1097
8710b709 1098 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1099 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1100
1101 do_cleanups (uiout_cleanup);
1102}
1103
1104/* The to_call_history_from method of target record-btrace. */
1105
1106static void
ec0aea04
TT
1107record_btrace_call_history_from (struct target_ops *self,
1108 ULONGEST from, int size, int flags)
afedecd3
MM
1109{
1110 ULONGEST begin, end, context;
1111
1112 context = abs (size);
0688d04e
MM
1113 if (context == 0)
1114 error (_("Bad record function-call-history-size."));
afedecd3
MM
1115
1116 if (size < 0)
1117 {
1118 end = from;
1119
1120 if (from < context)
1121 begin = 0;
1122 else
0688d04e 1123 begin = from - context + 1;
afedecd3
MM
1124 }
1125 else
1126 {
1127 begin = from;
0688d04e 1128 end = from + context - 1;
afedecd3
MM
1129
1130 /* Check for wrap-around. */
1131 if (end < begin)
1132 end = ULONGEST_MAX;
1133 }
1134
f0d960ea 1135 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1136}
1137
07bbe694
MM
1138/* The to_record_is_replaying method of target record-btrace. */
1139
1140static int
1c63c994 1141record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1142{
1143 struct thread_info *tp;
1144
034f788c 1145 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1146 if (btrace_is_replaying (tp))
1147 return 1;
1148
1149 return 0;
1150}
1151
633785ff
MM
1152/* The to_xfer_partial method of target record-btrace. */
1153
9b409511 1154static enum target_xfer_status
633785ff
MM
1155record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1156 const char *annex, gdb_byte *readbuf,
1157 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1158 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1159{
1160 struct target_ops *t;
1161
1162 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1163 if (replay_memory_access == replay_memory_access_read_only
aef92902 1164 && !record_btrace_generating_corefile
67b5c0c1 1165 && record_btrace_is_replaying (ops))
633785ff
MM
1166 {
1167 switch (object)
1168 {
1169 case TARGET_OBJECT_MEMORY:
1170 {
1171 struct target_section *section;
1172
1173 /* We do not allow writing memory in general. */
1174 if (writebuf != NULL)
9b409511
YQ
1175 {
1176 *xfered_len = len;
bc113b4e 1177 return TARGET_XFER_UNAVAILABLE;
9b409511 1178 }
633785ff
MM
1179
1180 /* We allow reading readonly memory. */
1181 section = target_section_by_addr (ops, offset);
1182 if (section != NULL)
1183 {
1184 /* Check if the section we found is readonly. */
1185 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1186 section->the_bfd_section)
1187 & SEC_READONLY) != 0)
1188 {
1189 /* Truncate the request to fit into this section. */
1190 len = min (len, section->endaddr - offset);
1191 break;
1192 }
1193 }
1194
9b409511 1195 *xfered_len = len;
bc113b4e 1196 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1197 }
1198 }
1199 }
1200
1201 /* Forward the request. */
e75fdfca
TT
1202 ops = ops->beneath;
1203 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1204 offset, len, xfered_len);
633785ff
MM
1205}
1206
1207/* The to_insert_breakpoint method of target record-btrace. */
1208
1209static int
1210record_btrace_insert_breakpoint (struct target_ops *ops,
1211 struct gdbarch *gdbarch,
1212 struct bp_target_info *bp_tgt)
1213{
67b5c0c1
MM
1214 const char *old;
1215 int ret;
633785ff
MM
1216
1217 /* Inserting breakpoints requires accessing memory. Allow it for the
1218 duration of this function. */
67b5c0c1
MM
1219 old = replay_memory_access;
1220 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1221
1222 ret = 0;
492d29ea
PA
1223 TRY
1224 {
1225 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1226 }
492d29ea
PA
1227 CATCH (except, RETURN_MASK_ALL)
1228 {
6c63c96a 1229 replay_memory_access = old;
492d29ea
PA
1230 throw_exception (except);
1231 }
1232 END_CATCH
6c63c96a 1233 replay_memory_access = old;
633785ff
MM
1234
1235 return ret;
1236}
1237
1238/* The to_remove_breakpoint method of target record-btrace. */
1239
1240static int
1241record_btrace_remove_breakpoint (struct target_ops *ops,
1242 struct gdbarch *gdbarch,
1243 struct bp_target_info *bp_tgt)
1244{
67b5c0c1
MM
1245 const char *old;
1246 int ret;
633785ff
MM
1247
1248 /* Removing breakpoints requires accessing memory. Allow it for the
1249 duration of this function. */
67b5c0c1
MM
1250 old = replay_memory_access;
1251 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1252
1253 ret = 0;
492d29ea
PA
1254 TRY
1255 {
1256 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1257 }
492d29ea
PA
1258 CATCH (except, RETURN_MASK_ALL)
1259 {
6c63c96a 1260 replay_memory_access = old;
492d29ea
PA
1261 throw_exception (except);
1262 }
1263 END_CATCH
6c63c96a 1264 replay_memory_access = old;
633785ff
MM
1265
1266 return ret;
1267}
1268
1f3ef581
MM
1269/* The to_fetch_registers method of target record-btrace. */
1270
1271static void
1272record_btrace_fetch_registers (struct target_ops *ops,
1273 struct regcache *regcache, int regno)
1274{
1275 struct btrace_insn_iterator *replay;
1276 struct thread_info *tp;
1277
1278 tp = find_thread_ptid (inferior_ptid);
1279 gdb_assert (tp != NULL);
1280
1281 replay = tp->btrace.replay;
aef92902 1282 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1283 {
1284 const struct btrace_insn *insn;
1285 struct gdbarch *gdbarch;
1286 int pcreg;
1287
1288 gdbarch = get_regcache_arch (regcache);
1289 pcreg = gdbarch_pc_regnum (gdbarch);
1290 if (pcreg < 0)
1291 return;
1292
1293 /* We can only provide the PC register. */
1294 if (regno >= 0 && regno != pcreg)
1295 return;
1296
1297 insn = btrace_insn_get (replay);
1298 gdb_assert (insn != NULL);
1299
1300 regcache_raw_supply (regcache, regno, &insn->pc);
1301 }
1302 else
1303 {
e75fdfca 1304 struct target_ops *t = ops->beneath;
1f3ef581 1305
e75fdfca 1306 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1307 }
1308}
1309
1310/* The to_store_registers method of target record-btrace. */
1311
1312static void
1313record_btrace_store_registers (struct target_ops *ops,
1314 struct regcache *regcache, int regno)
1315{
1316 struct target_ops *t;
1317
aef92902 1318 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1319 error (_("This record target does not allow writing registers."));
1320
1321 gdb_assert (may_write_registers != 0);
1322
e75fdfca
TT
1323 t = ops->beneath;
1324 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1325}
1326
1327/* The to_prepare_to_store method of target record-btrace. */
1328
1329static void
1330record_btrace_prepare_to_store (struct target_ops *ops,
1331 struct regcache *regcache)
1332{
1333 struct target_ops *t;
1334
aef92902 1335 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1336 return;
1337
e75fdfca
TT
1338 t = ops->beneath;
1339 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1340}
1341
0b722aec
MM
1342/* The branch trace frame cache. */
1343
1344struct btrace_frame_cache
1345{
1346 /* The thread. */
1347 struct thread_info *tp;
1348
1349 /* The frame info. */
1350 struct frame_info *frame;
1351
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1354};
1355
1356/* A struct btrace_frame_cache hash table indexed by NEXT. */
1357
1358static htab_t bfcache;
1359
1360/* hash_f for htab_create_alloc of bfcache. */
1361
1362static hashval_t
1363bfcache_hash (const void *arg)
1364{
1365 const struct btrace_frame_cache *cache = arg;
1366
1367 return htab_hash_pointer (cache->frame);
1368}
1369
1370/* eq_f for htab_create_alloc of bfcache. */
1371
1372static int
1373bfcache_eq (const void *arg1, const void *arg2)
1374{
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1377
1378 return cache1->frame == cache2->frame;
1379}
1380
1381/* Create a new btrace frame cache. */
1382
1383static struct btrace_frame_cache *
1384bfcache_new (struct frame_info *frame)
1385{
1386 struct btrace_frame_cache *cache;
1387 void **slot;
1388
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1391
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1394 *slot = cache;
1395
1396 return cache;
1397}
1398
1399/* Extract the branch trace function from a branch trace frame. */
1400
1401static const struct btrace_function *
1402btrace_get_frame_function (struct frame_info *frame)
1403{
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1407 void **slot;
1408
1409 pattern.frame = frame;
1410
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1412 if (slot == NULL)
1413 return NULL;
1414
1415 cache = *slot;
1416 return cache->bfun;
1417}
1418
cecac1ab
MM
1419/* Implement stop_reason method for record_btrace_frame_unwind. */
1420
1421static enum unwind_stop_reason
1422record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1423 void **this_cache)
1424{
0b722aec
MM
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1427
1428 cache = *this_cache;
1429 bfun = cache->bfun;
1430 gdb_assert (bfun != NULL);
1431
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1434
1435 return UNWIND_NO_REASON;
cecac1ab
MM
1436}
1437
1438/* Implement this_id method for record_btrace_frame_unwind. */
1439
1440static void
1441record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1443{
0b722aec
MM
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1447
1448 cache = *this_cache;
1449
1450 bfun = cache->bfun;
1451 gdb_assert (bfun != NULL);
1452
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1455
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1458
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1460
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1465}
1466
1467/* Implement prev_register method for record_btrace_frame_unwind. */
1468
1469static struct value *
1470record_btrace_frame_prev_register (struct frame_info *this_frame,
1471 void **this_cache,
1472 int regnum)
1473{
0b722aec
MM
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1478 CORE_ADDR pc;
1479 int pcreg;
1480
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1486
1487 cache = *this_cache;
1488 bfun = cache->bfun;
1489 gdb_assert (bfun != NULL);
1490
1491 caller = bfun->up;
1492 if (caller == NULL)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1495
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1497 {
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1499 pc = insn->pc;
1500 }
1501 else
1502 {
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1504 pc = insn->pc;
1505
1506 pc += gdb_insn_length (gdbarch, pc);
1507 }
1508
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1512
1513 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1514}
1515
1516/* Implement sniffer method for record_btrace_frame_unwind. */
1517
1518static int
1519record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1521 void **this_cache)
1522{
0b722aec
MM
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
cecac1ab 1525 struct thread_info *tp;
0b722aec 1526 struct frame_info *next;
cecac1ab
MM
1527
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1531
0b722aec
MM
1532 bfun = NULL;
1533 next = get_next_frame (this_frame);
1534 if (next == NULL)
1535 {
1536 const struct btrace_insn_iterator *replay;
1537
1538 replay = tp->btrace.replay;
1539 if (replay != NULL)
1540 bfun = replay->function;
1541 }
1542 else
1543 {
1544 const struct btrace_function *callee;
1545
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1548 bfun = callee->up;
1549 }
1550
1551 if (bfun == NULL)
1552 return 0;
1553
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1556
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1559 cache->tp = tp;
1560 cache->bfun = bfun;
1561
1562 *this_cache = cache;
1563 return 1;
1564}
1565
1566/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1567
1568static int
1569record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1571 void **this_cache)
1572{
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1576
1577 next = get_next_frame (this_frame);
1578 if (next == NULL)
1579 return 0;
1580
1581 callee = btrace_get_frame_function (next);
1582 if (callee == NULL)
1583 return 0;
1584
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1586 return 0;
1587
1588 bfun = callee->up;
1589 if (bfun == NULL)
1590 return 0;
1591
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1594
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1598 cache->bfun = bfun;
1599
1600 *this_cache = cache;
1601 return 1;
1602}
1603
1604static void
1605record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1606{
1607 struct btrace_frame_cache *cache;
1608 void **slot;
1609
1610 cache = this_cache;
1611
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1614
1615 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1616}
1617
1618/* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1622 <unavailable>. */
1623
0b722aec 1624const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1625{
1626 NORMAL_FRAME,
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1630 NULL,
0b722aec
MM
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1633};
1634
1635const struct frame_unwind record_btrace_tailcall_frame_unwind =
1636{
1637 TAILCALL_FRAME,
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1641 NULL,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
cecac1ab 1644};
b2f4cfde 1645
ac01945b
TT
1646/* Implement the to_get_unwinder method. */
1647
1648static const struct frame_unwind *
1649record_btrace_to_get_unwinder (struct target_ops *self)
1650{
1651 return &record_btrace_frame_unwind;
1652}
1653
1654/* Implement the to_get_tailcall_unwinder method. */
1655
1656static const struct frame_unwind *
1657record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1658{
1659 return &record_btrace_tailcall_frame_unwind;
1660}
1661
52834460
MM
1662/* Indicate that TP should be resumed according to FLAG. */
1663
1664static void
1665record_btrace_resume_thread (struct thread_info *tp,
1666 enum btrace_thread_flag flag)
1667{
1668 struct btrace_thread_info *btinfo;
1669
1670 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1671
1672 btinfo = &tp->btrace;
1673
1674 if ((btinfo->flags & BTHR_MOVE) != 0)
1675 error (_("Thread already moving."));
1676
1677 /* Fetch the latest branch trace. */
1678 btrace_fetch (tp);
1679
1680 btinfo->flags |= flag;
1681}
1682
1683/* Find the thread to resume given a PTID. */
1684
1685static struct thread_info *
1686record_btrace_find_resume_thread (ptid_t ptid)
1687{
1688 struct thread_info *tp;
1689
1690 /* When asked to resume everything, we pick the current thread. */
1691 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1692 ptid = inferior_ptid;
1693
1694 return find_thread_ptid (ptid);
1695}
1696
1697/* Start replaying a thread. */
1698
1699static struct btrace_insn_iterator *
1700record_btrace_start_replaying (struct thread_info *tp)
1701{
52834460
MM
1702 struct btrace_insn_iterator *replay;
1703 struct btrace_thread_info *btinfo;
1704 int executing;
1705
1706 btinfo = &tp->btrace;
1707 replay = NULL;
1708
1709 /* We can't start replaying without trace. */
1710 if (btinfo->begin == NULL)
1711 return NULL;
1712
1713 /* Clear the executing flag to allow changes to the current frame.
1714 We are not actually running, yet. We just started a reverse execution
1715 command or a record goto command.
1716 For the latter, EXECUTING is false and this has no effect.
1717 For the former, EXECUTING is true and we're in to_wait, about to
1718 move the thread. Since we need to recompute the stack, we temporarily
1719 set EXECUTING to flase. */
1720 executing = is_executing (tp->ptid);
1721 set_executing (tp->ptid, 0);
1722
1723 /* GDB stores the current frame_id when stepping in order to detects steps
1724 into subroutines.
1725 Since frames are computed differently when we're replaying, we need to
1726 recompute those stored frames and fix them up so we can still detect
1727 subroutines after we started replaying. */
492d29ea 1728 TRY
52834460
MM
1729 {
1730 struct frame_info *frame;
1731 struct frame_id frame_id;
1732 int upd_step_frame_id, upd_step_stack_frame_id;
1733
1734 /* The current frame without replaying - computed via normal unwind. */
1735 frame = get_current_frame ();
1736 frame_id = get_frame_id (frame);
1737
1738 /* Check if we need to update any stepping-related frame id's. */
1739 upd_step_frame_id = frame_id_eq (frame_id,
1740 tp->control.step_frame_id);
1741 upd_step_stack_frame_id = frame_id_eq (frame_id,
1742 tp->control.step_stack_frame_id);
1743
1744 /* We start replaying at the end of the branch trace. This corresponds
1745 to the current instruction. */
8d749320 1746 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1747 btrace_insn_end (replay, btinfo);
1748
31fd9caa
MM
1749 /* Skip gaps at the end of the trace. */
1750 while (btrace_insn_get (replay) == NULL)
1751 {
1752 unsigned int steps;
1753
1754 steps = btrace_insn_prev (replay, 1);
1755 if (steps == 0)
1756 error (_("No trace."));
1757 }
1758
52834460
MM
1759 /* We're not replaying, yet. */
1760 gdb_assert (btinfo->replay == NULL);
1761 btinfo->replay = replay;
1762
1763 /* Make sure we're not using any stale registers. */
1764 registers_changed_ptid (tp->ptid);
1765
1766 /* The current frame with replaying - computed via btrace unwind. */
1767 frame = get_current_frame ();
1768 frame_id = get_frame_id (frame);
1769
1770 /* Replace stepping related frames where necessary. */
1771 if (upd_step_frame_id)
1772 tp->control.step_frame_id = frame_id;
1773 if (upd_step_stack_frame_id)
1774 tp->control.step_stack_frame_id = frame_id;
1775 }
492d29ea 1776 CATCH (except, RETURN_MASK_ALL)
52834460 1777 {
6c63c96a
PA
1778 /* Restore the previous execution state. */
1779 set_executing (tp->ptid, executing);
1780
52834460
MM
1781 xfree (btinfo->replay);
1782 btinfo->replay = NULL;
1783
1784 registers_changed_ptid (tp->ptid);
1785
1786 throw_exception (except);
1787 }
492d29ea 1788 END_CATCH
52834460 1789
6c63c96a
PA
1790 /* Restore the previous execution state. */
1791 set_executing (tp->ptid, executing);
1792
52834460
MM
1793 return replay;
1794}
1795
1796/* Stop replaying a thread. */
1797
1798static void
1799record_btrace_stop_replaying (struct thread_info *tp)
1800{
1801 struct btrace_thread_info *btinfo;
1802
1803 btinfo = &tp->btrace;
1804
1805 xfree (btinfo->replay);
1806 btinfo->replay = NULL;
1807
1808 /* Make sure we're not leaving any stale registers. */
1809 registers_changed_ptid (tp->ptid);
1810}
1811
b2f4cfde
MM
1812/* The to_resume method of target record-btrace. */
1813
1814static void
1815record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1816 enum gdb_signal signal)
1817{
52834460
MM
1818 struct thread_info *tp, *other;
1819 enum btrace_thread_flag flag;
1820
1821 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1822
70ad5bff
MM
1823 /* Store the execution direction of the last resume. */
1824 record_btrace_resume_exec_dir = execution_direction;
1825
52834460
MM
1826 tp = record_btrace_find_resume_thread (ptid);
1827 if (tp == NULL)
1828 error (_("Cannot find thread to resume."));
1829
1830 /* Stop replaying other threads if the thread to resume is not replaying. */
1831 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1832 ALL_NON_EXITED_THREADS (other)
52834460
MM
1833 record_btrace_stop_replaying (other);
1834
b2f4cfde 1835 /* As long as we're not replaying, just forward the request. */
1c63c994 1836 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1837 {
e75fdfca
TT
1838 ops = ops->beneath;
1839 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1840 }
1841
52834460
MM
1842 /* Compute the btrace thread flag for the requested move. */
1843 if (step == 0)
1844 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1845 else
1846 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1847
1848 /* At the moment, we only move a single thread. We could also move
1849 all threads in parallel by single-stepping each resumed thread
1850 until the first runs into an event.
1851 When we do that, we would want to continue all other threads.
1852 For now, just resume one thread to not confuse to_wait. */
1853 record_btrace_resume_thread (tp, flag);
1854
1855 /* We just indicate the resume intent here. The actual stepping happens in
1856 record_btrace_wait below. */
70ad5bff
MM
1857
1858 /* Async support. */
1859 if (target_can_async_p ())
1860 {
6a3753b3 1861 target_async (1);
70ad5bff
MM
1862 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1863 }
52834460
MM
1864}
1865
1866/* Find a thread to move. */
1867
1868static struct thread_info *
1869record_btrace_find_thread_to_move (ptid_t ptid)
1870{
1871 struct thread_info *tp;
1872
1873 /* First check the parameter thread. */
1874 tp = find_thread_ptid (ptid);
1875 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1876 return tp;
1877
1878 /* Otherwise, find one other thread that has been resumed. */
034f788c 1879 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1880 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1881 return tp;
1882
1883 return NULL;
1884}
1885
1886/* Return a target_waitstatus indicating that we ran out of history. */
1887
1888static struct target_waitstatus
1889btrace_step_no_history (void)
1890{
1891 struct target_waitstatus status;
1892
1893 status.kind = TARGET_WAITKIND_NO_HISTORY;
1894
1895 return status;
1896}
1897
1898/* Return a target_waitstatus indicating that a step finished. */
1899
1900static struct target_waitstatus
1901btrace_step_stopped (void)
1902{
1903 struct target_waitstatus status;
1904
1905 status.kind = TARGET_WAITKIND_STOPPED;
1906 status.value.sig = GDB_SIGNAL_TRAP;
1907
1908 return status;
1909}
1910
1911/* Clear the record histories. */
1912
1913static void
1914record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1915{
1916 xfree (btinfo->insn_history);
1917 xfree (btinfo->call_history);
1918
1919 btinfo->insn_history = NULL;
1920 btinfo->call_history = NULL;
1921}
1922
1923/* Step a single thread. */
1924
1925static struct target_waitstatus
1926record_btrace_step_thread (struct thread_info *tp)
1927{
1928 struct btrace_insn_iterator *replay, end;
1929 struct btrace_thread_info *btinfo;
1930 struct address_space *aspace;
1931 struct inferior *inf;
1932 enum btrace_thread_flag flags;
1933 unsigned int steps;
1934
e59fa00f
MM
1935 /* We can't step without an execution history. */
1936 if (btrace_is_empty (tp))
1937 return btrace_step_no_history ();
1938
52834460
MM
1939 btinfo = &tp->btrace;
1940 replay = btinfo->replay;
1941
1942 flags = btinfo->flags & BTHR_MOVE;
1943 btinfo->flags &= ~BTHR_MOVE;
1944
1945 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1946
1947 switch (flags)
1948 {
1949 default:
1950 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1951
1952 case BTHR_STEP:
1953 /* We're done if we're not replaying. */
1954 if (replay == NULL)
1955 return btrace_step_no_history ();
1956
31fd9caa
MM
1957 /* Skip gaps during replay. */
1958 do
1959 {
1960 steps = btrace_insn_next (replay, 1);
1961 if (steps == 0)
1962 {
1963 record_btrace_stop_replaying (tp);
1964 return btrace_step_no_history ();
1965 }
1966 }
1967 while (btrace_insn_get (replay) == NULL);
52834460
MM
1968
1969 /* Determine the end of the instruction trace. */
1970 btrace_insn_end (&end, btinfo);
1971
1972 /* We stop replaying if we reached the end of the trace. */
1973 if (btrace_insn_cmp (replay, &end) == 0)
1974 record_btrace_stop_replaying (tp);
1975
1976 return btrace_step_stopped ();
1977
1978 case BTHR_RSTEP:
1979 /* Start replaying if we're not already doing so. */
1980 if (replay == NULL)
1981 replay = record_btrace_start_replaying (tp);
1982
31fd9caa
MM
1983 /* If we can't step any further, we reached the end of the history.
1984 Skip gaps during replay. */
1985 do
1986 {
1987 steps = btrace_insn_prev (replay, 1);
1988 if (steps == 0)
1989 return btrace_step_no_history ();
1990
1991 }
1992 while (btrace_insn_get (replay) == NULL);
52834460
MM
1993
1994 return btrace_step_stopped ();
1995
1996 case BTHR_CONT:
1997 /* We're done if we're not replaying. */
1998 if (replay == NULL)
1999 return btrace_step_no_history ();
2000
c9657e70 2001 inf = find_inferior_ptid (tp->ptid);
52834460
MM
2002 aspace = inf->aspace;
2003
2004 /* Determine the end of the instruction trace. */
2005 btrace_insn_end (&end, btinfo);
2006
2007 for (;;)
2008 {
2009 const struct btrace_insn *insn;
2010
31fd9caa
MM
2011 /* Skip gaps during replay. */
2012 do
2013 {
2014 steps = btrace_insn_next (replay, 1);
2015 if (steps == 0)
2016 {
2017 record_btrace_stop_replaying (tp);
2018 return btrace_step_no_history ();
2019 }
2020
2021 insn = btrace_insn_get (replay);
2022 }
2023 while (insn == NULL);
52834460
MM
2024
2025 /* We stop replaying if we reached the end of the trace. */
2026 if (btrace_insn_cmp (replay, &end) == 0)
2027 {
2028 record_btrace_stop_replaying (tp);
2029 return btrace_step_no_history ();
2030 }
2031
52834460
MM
2032 DEBUG ("stepping %d (%s) ... %s", tp->num,
2033 target_pid_to_str (tp->ptid),
2034 core_addr_to_string_nz (insn->pc));
2035
9e8915c6
PA
2036 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2037 &btinfo->stop_reason))
52834460
MM
2038 return btrace_step_stopped ();
2039 }
2040
2041 case BTHR_RCONT:
2042 /* Start replaying if we're not already doing so. */
2043 if (replay == NULL)
2044 replay = record_btrace_start_replaying (tp);
2045
c9657e70 2046 inf = find_inferior_ptid (tp->ptid);
52834460
MM
2047 aspace = inf->aspace;
2048
2049 for (;;)
2050 {
2051 const struct btrace_insn *insn;
2052
31fd9caa
MM
2053 /* If we can't step any further, we reached the end of the history.
2054 Skip gaps during replay. */
2055 do
2056 {
2057 steps = btrace_insn_prev (replay, 1);
2058 if (steps == 0)
2059 return btrace_step_no_history ();
52834460 2060
31fd9caa
MM
2061 insn = btrace_insn_get (replay);
2062 }
2063 while (insn == NULL);
52834460
MM
2064
2065 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2066 target_pid_to_str (tp->ptid),
2067 core_addr_to_string_nz (insn->pc));
2068
9e8915c6
PA
2069 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2070 &btinfo->stop_reason))
52834460
MM
2071 return btrace_step_stopped ();
2072 }
2073 }
b2f4cfde
MM
2074}
2075
2076/* The to_wait method of target record-btrace. */
2077
2078static ptid_t
2079record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2080 struct target_waitstatus *status, int options)
2081{
52834460
MM
2082 struct thread_info *tp, *other;
2083
2084 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2085
b2f4cfde 2086 /* As long as we're not replaying, just forward the request. */
1c63c994 2087 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 2088 {
e75fdfca
TT
2089 ops = ops->beneath;
2090 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2091 }
2092
52834460
MM
2093 /* Let's find a thread to move. */
2094 tp = record_btrace_find_thread_to_move (ptid);
2095 if (tp == NULL)
2096 {
2097 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2098
2099 status->kind = TARGET_WAITKIND_IGNORE;
2100 return minus_one_ptid;
2101 }
2102
2103 /* We only move a single thread. We're not able to correlate threads. */
2104 *status = record_btrace_step_thread (tp);
2105
2106 /* Stop all other threads. */
2107 if (!non_stop)
034f788c 2108 ALL_NON_EXITED_THREADS (other)
52834460
MM
2109 other->btrace.flags &= ~BTHR_MOVE;
2110
2111 /* Start record histories anew from the current position. */
2112 record_btrace_clear_histories (&tp->btrace);
2113
2114 /* We moved the replay position but did not update registers. */
2115 registers_changed_ptid (tp->ptid);
2116
2117 return tp->ptid;
2118}
2119
2120/* The to_can_execute_reverse method of target record-btrace. */
2121
2122static int
19db3e69 2123record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2124{
2125 return 1;
2126}
2127
9e8915c6 2128/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2129
9e8915c6
PA
2130static int
2131record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2132{
1c63c994 2133 if (record_btrace_is_replaying (ops))
9e8915c6
PA
2134 {
2135 struct thread_info *tp = inferior_thread ();
2136
2137 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2138 }
2139
2140 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2141}
2142
2143/* The to_supports_stopped_by_sw_breakpoint method of target
2144 record-btrace. */
2145
2146static int
2147record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2148{
2149 if (record_btrace_is_replaying (ops))
2150 return 1;
2151
2152 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2153}
2154
2155/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2156
2157static int
2158record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2159{
2160 if (record_btrace_is_replaying (ops))
2161 {
2162 struct thread_info *tp = inferior_thread ();
2163
2164 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2165 }
2166
2167 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2168}
2169
2170/* The to_supports_stopped_by_hw_breakpoint method of target
2171 record-btrace. */
2172
2173static int
2174record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2175{
2176 if (record_btrace_is_replaying (ops))
2177 return 1;
52834460 2178
9e8915c6 2179 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2180}
2181
e8032dde 2182/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2183
2184static void
e8032dde 2185record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2186{
e8032dde 2187 /* We don't add or remove threads during replay. */
1c63c994 2188 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2189 return;
2190
2191 /* Forward the request. */
e75fdfca 2192 ops = ops->beneath;
e8032dde 2193 ops->to_update_thread_list (ops);
e2887aa3
MM
2194}
2195
2196/* The to_thread_alive method of target record-btrace. */
2197
2198static int
2199record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2200{
2201 /* We don't add or remove threads during replay. */
1c63c994 2202 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2203 return find_thread_ptid (ptid) != NULL;
2204
2205 /* Forward the request. */
e75fdfca
TT
2206 ops = ops->beneath;
2207 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2208}
2209
066ce621
MM
2210/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2211 is stopped. */
2212
2213static void
2214record_btrace_set_replay (struct thread_info *tp,
2215 const struct btrace_insn_iterator *it)
2216{
2217 struct btrace_thread_info *btinfo;
2218
2219 btinfo = &tp->btrace;
2220
2221 if (it == NULL || it->function == NULL)
52834460 2222 record_btrace_stop_replaying (tp);
066ce621
MM
2223 else
2224 {
2225 if (btinfo->replay == NULL)
52834460 2226 record_btrace_start_replaying (tp);
066ce621
MM
2227 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2228 return;
2229
2230 *btinfo->replay = *it;
52834460 2231 registers_changed_ptid (tp->ptid);
066ce621
MM
2232 }
2233
52834460
MM
2234 /* Start anew from the new replay position. */
2235 record_btrace_clear_histories (btinfo);
485668e5
MM
2236
2237 stop_pc = regcache_read_pc (get_current_regcache ());
2238 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2239}
2240
2241/* The to_goto_record_begin method of target record-btrace. */
2242
2243static void
08475817 2244record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2245{
2246 struct thread_info *tp;
2247 struct btrace_insn_iterator begin;
2248
2249 tp = require_btrace_thread ();
2250
2251 btrace_insn_begin (&begin, &tp->btrace);
2252 record_btrace_set_replay (tp, &begin);
066ce621
MM
2253}
2254
2255/* The to_goto_record_end method of target record-btrace. */
2256
2257static void
307a1b91 2258record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2259{
2260 struct thread_info *tp;
2261
2262 tp = require_btrace_thread ();
2263
2264 record_btrace_set_replay (tp, NULL);
066ce621
MM
2265}
2266
2267/* The to_goto_record method of target record-btrace. */
2268
2269static void
606183ac 2270record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2271{
2272 struct thread_info *tp;
2273 struct btrace_insn_iterator it;
2274 unsigned int number;
2275 int found;
2276
2277 number = insn;
2278
2279 /* Check for wrap-arounds. */
2280 if (number != insn)
2281 error (_("Instruction number out of range."));
2282
2283 tp = require_btrace_thread ();
2284
2285 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2286 if (found == 0)
2287 error (_("No such instruction."));
2288
2289 record_btrace_set_replay (tp, &it);
066ce621
MM
2290}
2291
70ad5bff
MM
2292/* The to_execution_direction target method. */
2293
2294static enum exec_direction_kind
2295record_btrace_execution_direction (struct target_ops *self)
2296{
2297 return record_btrace_resume_exec_dir;
2298}
2299
aef92902
MM
2300/* The to_prepare_to_generate_core target method. */
2301
2302static void
2303record_btrace_prepare_to_generate_core (struct target_ops *self)
2304{
2305 record_btrace_generating_corefile = 1;
2306}
2307
2308/* The to_done_generating_core target method. */
2309
2310static void
2311record_btrace_done_generating_core (struct target_ops *self)
2312{
2313 record_btrace_generating_corefile = 0;
2314}
2315
afedecd3
MM
2316/* Initialize the record-btrace target ops. */
2317
2318static void
2319init_record_btrace_ops (void)
2320{
2321 struct target_ops *ops;
2322
2323 ops = &record_btrace_ops;
2324 ops->to_shortname = "record-btrace";
2325 ops->to_longname = "Branch tracing target";
2326 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2327 ops->to_open = record_btrace_open;
2328 ops->to_close = record_btrace_close;
b7d2e916 2329 ops->to_async = record_btrace_async;
afedecd3
MM
2330 ops->to_detach = record_detach;
2331 ops->to_disconnect = record_disconnect;
2332 ops->to_mourn_inferior = record_mourn_inferior;
2333 ops->to_kill = record_kill;
afedecd3
MM
2334 ops->to_stop_recording = record_btrace_stop_recording;
2335 ops->to_info_record = record_btrace_info;
2336 ops->to_insn_history = record_btrace_insn_history;
2337 ops->to_insn_history_from = record_btrace_insn_history_from;
2338 ops->to_insn_history_range = record_btrace_insn_history_range;
2339 ops->to_call_history = record_btrace_call_history;
2340 ops->to_call_history_from = record_btrace_call_history_from;
2341 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2342 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2343 ops->to_xfer_partial = record_btrace_xfer_partial;
2344 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2345 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2346 ops->to_fetch_registers = record_btrace_fetch_registers;
2347 ops->to_store_registers = record_btrace_store_registers;
2348 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2349 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2350 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2351 ops->to_resume = record_btrace_resume;
2352 ops->to_wait = record_btrace_wait;
e8032dde 2353 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2354 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2355 ops->to_goto_record_begin = record_btrace_goto_begin;
2356 ops->to_goto_record_end = record_btrace_goto_end;
2357 ops->to_goto_record = record_btrace_goto;
52834460 2358 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2359 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2360 ops->to_supports_stopped_by_sw_breakpoint
2361 = record_btrace_supports_stopped_by_sw_breakpoint;
2362 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2363 ops->to_supports_stopped_by_hw_breakpoint
2364 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2365 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2366 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2367 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2368 ops->to_stratum = record_stratum;
2369 ops->to_magic = OPS_MAGIC;
2370}
2371
f4abbc16
MM
2372/* Start recording in BTS format. */
2373
2374static void
2375cmd_record_btrace_bts_start (char *args, int from_tty)
2376{
f4abbc16
MM
2377 if (args != NULL && *args != 0)
2378 error (_("Invalid argument."));
2379
2380 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2381
492d29ea
PA
2382 TRY
2383 {
2384 execute_command ("target record-btrace", from_tty);
2385 }
2386 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2387 {
2388 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2389 throw_exception (exception);
2390 }
492d29ea 2391 END_CATCH
f4abbc16
MM
2392}
2393
b20a6524 2394/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2395
2396static void
b20a6524 2397cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2398{
2399 if (args != NULL && *args != 0)
2400 error (_("Invalid argument."));
2401
b20a6524 2402 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2403
492d29ea
PA
2404 TRY
2405 {
2406 execute_command ("target record-btrace", from_tty);
2407 }
2408 CATCH (exception, RETURN_MASK_ALL)
2409 {
2410 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2411 throw_exception (exception);
2412 }
2413 END_CATCH
afedecd3
MM
2414}
2415
b20a6524
MM
2416/* Alias for "target record". */
2417
2418static void
2419cmd_record_btrace_start (char *args, int from_tty)
2420{
2421 if (args != NULL && *args != 0)
2422 error (_("Invalid argument."));
2423
2424 record_btrace_conf.format = BTRACE_FORMAT_PT;
2425
2426 TRY
2427 {
2428 execute_command ("target record-btrace", from_tty);
2429 }
2430 CATCH (exception, RETURN_MASK_ALL)
2431 {
2432 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2433
2434 TRY
2435 {
2436 execute_command ("target record-btrace", from_tty);
2437 }
2438 CATCH (exception, RETURN_MASK_ALL)
2439 {
2440 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2441 throw_exception (exception);
2442 }
2443 END_CATCH
2444 }
2445 END_CATCH
2446}
2447
67b5c0c1
MM
2448/* The "set record btrace" command. */
2449
2450static void
2451cmd_set_record_btrace (char *args, int from_tty)
2452{
2453 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2454}
2455
2456/* The "show record btrace" command. */
2457
2458static void
2459cmd_show_record_btrace (char *args, int from_tty)
2460{
2461 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2462}
2463
2464/* The "show record btrace replay-memory-access" command. */
2465
2466static void
2467cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2468 struct cmd_list_element *c, const char *value)
2469{
2470 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2471 replay_memory_access);
2472}
2473
d33501a5
MM
2474/* The "set record btrace bts" command. */
2475
2476static void
2477cmd_set_record_btrace_bts (char *args, int from_tty)
2478{
2479 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2480 "by an appropriate subcommand.\n"));
d33501a5
MM
2481 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2482 all_commands, gdb_stdout);
2483}
2484
2485/* The "show record btrace bts" command. */
2486
2487static void
2488cmd_show_record_btrace_bts (char *args, int from_tty)
2489{
2490 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2491}
2492
b20a6524
MM
2493/* The "set record btrace pt" command. */
2494
2495static void
2496cmd_set_record_btrace_pt (char *args, int from_tty)
2497{
2498 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2499 "by an appropriate subcommand.\n"));
2500 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2501 all_commands, gdb_stdout);
2502}
2503
2504/* The "show record btrace pt" command. */
2505
2506static void
2507cmd_show_record_btrace_pt (char *args, int from_tty)
2508{
2509 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2510}
2511
2512/* The "record bts buffer-size" show value function. */
2513
2514static void
2515show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2516 struct cmd_list_element *c,
2517 const char *value)
2518{
2519 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2520 value);
2521}
2522
2523/* The "record pt buffer-size" show value function. */
2524
2525static void
2526show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2527 struct cmd_list_element *c,
2528 const char *value)
2529{
2530 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2531 value);
2532}
2533
afedecd3
MM
2534void _initialize_record_btrace (void);
2535
2536/* Initialize btrace commands. */
2537
2538void
2539_initialize_record_btrace (void)
2540{
f4abbc16
MM
2541 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2542 _("Start branch trace recording."), &record_btrace_cmdlist,
2543 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2544 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2545
f4abbc16
MM
2546 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2547 _("\
2548Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2549The processor stores a from/to record for each branch into a cyclic buffer.\n\
2550This format may not be available on all processors."),
2551 &record_btrace_cmdlist);
2552 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2553
b20a6524
MM
2554 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2555 _("\
2556Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2557This format may not be available on all processors."),
2558 &record_btrace_cmdlist);
2559 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2560
67b5c0c1
MM
2561 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2562 _("Set record options"), &set_record_btrace_cmdlist,
2563 "set record btrace ", 0, &set_record_cmdlist);
2564
2565 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2566 _("Show record options"), &show_record_btrace_cmdlist,
2567 "show record btrace ", 0, &show_record_cmdlist);
2568
2569 add_setshow_enum_cmd ("replay-memory-access", no_class,
2570 replay_memory_access_types, &replay_memory_access, _("\
2571Set what memory accesses are allowed during replay."), _("\
2572Show what memory accesses are allowed during replay."),
2573 _("Default is READ-ONLY.\n\n\
2574The btrace record target does not trace data.\n\
2575The memory therefore corresponds to the live target and not \
2576to the current replay position.\n\n\
2577When READ-ONLY, allow accesses to read-only memory during replay.\n\
2578When READ-WRITE, allow accesses to read-only and read-write memory during \
2579replay."),
2580 NULL, cmd_show_replay_memory_access,
2581 &set_record_btrace_cmdlist,
2582 &show_record_btrace_cmdlist);
2583
d33501a5
MM
2584 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2585 _("Set record btrace bts options"),
2586 &set_record_btrace_bts_cmdlist,
2587 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2588
2589 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2590 _("Show record btrace bts options"),
2591 &show_record_btrace_bts_cmdlist,
2592 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2593
2594 add_setshow_uinteger_cmd ("buffer-size", no_class,
2595 &record_btrace_conf.bts.size,
2596 _("Set the record/replay bts buffer size."),
2597 _("Show the record/replay bts buffer size."), _("\
2598When starting recording request a trace buffer of this size. \
2599The actual buffer size may differ from the requested size. \
2600Use \"info record\" to see the actual buffer size.\n\n\
2601Bigger buffers allow longer recording but also take more time to process \
2602the recorded execution trace.\n\n\
b20a6524
MM
2603The trace buffer size may not be changed while recording."), NULL,
2604 show_record_bts_buffer_size_value,
d33501a5
MM
2605 &set_record_btrace_bts_cmdlist,
2606 &show_record_btrace_bts_cmdlist);
2607
b20a6524
MM
2608 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2609 _("Set record btrace pt options"),
2610 &set_record_btrace_pt_cmdlist,
2611 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2612
2613 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2614 _("Show record btrace pt options"),
2615 &show_record_btrace_pt_cmdlist,
2616 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2617
2618 add_setshow_uinteger_cmd ("buffer-size", no_class,
2619 &record_btrace_conf.pt.size,
2620 _("Set the record/replay pt buffer size."),
2621 _("Show the record/replay pt buffer size."), _("\
2622Bigger buffers allow longer recording but also take more time to process \
2623the recorded execution.\n\
2624The actual buffer size may differ from the requested size. Use \"info record\" \
2625to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2626 &set_record_btrace_pt_cmdlist,
2627 &show_record_btrace_pt_cmdlist);
2628
afedecd3
MM
2629 init_record_btrace_ops ();
2630 add_target (&record_btrace_ops);
0b722aec
MM
2631
2632 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2633 xcalloc, xfree);
d33501a5
MM
2634
2635 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2636 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2637}
This page took 0.489113 seconds and 4 git commands to generate.