Add new infrun.h header.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
afedecd3
MM
39
40/* The target_ops of record-btrace. */
41static struct target_ops record_btrace_ops;
42
43/* A new thread observer enabling branch tracing for the new thread. */
44static struct observer *record_btrace_thread_observer;
45
633785ff
MM
46/* Temporarily allow memory accesses. */
47static int record_btrace_allow_memory_access;
48
afedecd3
MM
49/* Print a record-btrace debug message. Use do ... while (0) to avoid
50 ambiguities when used in if statements. */
51
52#define DEBUG(msg, args...) \
53 do \
54 { \
55 if (record_debug != 0) \
56 fprintf_unfiltered (gdb_stdlog, \
57 "[record-btrace] " msg "\n", ##args); \
58 } \
59 while (0)
60
61
62/* Update the branch trace for the current thread and return a pointer to its
066ce621 63 thread_info.
afedecd3
MM
64
65 Throws an error if there is no thread or no trace. This function never
66 returns NULL. */
67
066ce621
MM
68static struct thread_info *
69require_btrace_thread (void)
afedecd3
MM
70{
71 struct thread_info *tp;
afedecd3
MM
72
73 DEBUG ("require");
74
75 tp = find_thread_ptid (inferior_ptid);
76 if (tp == NULL)
77 error (_("No thread."));
78
79 btrace_fetch (tp);
80
6e07b1d2 81 if (btrace_is_empty (tp))
afedecd3
MM
82 error (_("No trace."));
83
066ce621
MM
84 return tp;
85}
86
87/* Update the branch trace for the current thread and return a pointer to its
88 branch trace information struct.
89
90 Throws an error if there is no thread or no trace. This function never
91 returns NULL. */
92
93static struct btrace_thread_info *
94require_btrace (void)
95{
96 struct thread_info *tp;
97
98 tp = require_btrace_thread ();
99
100 return &tp->btrace;
afedecd3
MM
101}
102
103/* Enable branch tracing for one thread. Warn on errors. */
104
105static void
106record_btrace_enable_warn (struct thread_info *tp)
107{
108 volatile struct gdb_exception error;
109
110 TRY_CATCH (error, RETURN_MASK_ERROR)
111 btrace_enable (tp);
112
113 if (error.message != NULL)
114 warning ("%s", error.message);
115}
116
117/* Callback function to disable branch tracing for one thread. */
118
119static void
120record_btrace_disable_callback (void *arg)
121{
122 struct thread_info *tp;
123
124 tp = arg;
125
126 btrace_disable (tp);
127}
128
129/* Enable automatic tracing of new threads. */
130
131static void
132record_btrace_auto_enable (void)
133{
134 DEBUG ("attach thread observer");
135
136 record_btrace_thread_observer
137 = observer_attach_new_thread (record_btrace_enable_warn);
138}
139
140/* Disable automatic tracing of new threads. */
141
142static void
143record_btrace_auto_disable (void)
144{
145 /* The observer may have been detached, already. */
146 if (record_btrace_thread_observer == NULL)
147 return;
148
149 DEBUG ("detach thread observer");
150
151 observer_detach_new_thread (record_btrace_thread_observer);
152 record_btrace_thread_observer = NULL;
153}
154
155/* The to_open method of target record-btrace. */
156
157static void
158record_btrace_open (char *args, int from_tty)
159{
160 struct cleanup *disable_chain;
161 struct thread_info *tp;
162
163 DEBUG ("open");
164
8213266a 165 record_preopen ();
afedecd3
MM
166
167 if (!target_has_execution)
168 error (_("The program is not being run."));
169
170 if (!target_supports_btrace ())
171 error (_("Target does not support branch tracing."));
172
52834460
MM
173 if (non_stop)
174 error (_("Record btrace can't debug inferior in non-stop mode."));
175
afedecd3
MM
176 gdb_assert (record_btrace_thread_observer == NULL);
177
178 disable_chain = make_cleanup (null_cleanup, NULL);
179 ALL_THREADS (tp)
180 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
181 {
182 btrace_enable (tp);
183
184 make_cleanup (record_btrace_disable_callback, tp);
185 }
186
187 record_btrace_auto_enable ();
188
189 push_target (&record_btrace_ops);
190
191 observer_notify_record_changed (current_inferior (), 1);
192
193 discard_cleanups (disable_chain);
194}
195
196/* The to_stop_recording method of target record-btrace. */
197
198static void
c6cd7c02 199record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
200{
201 struct thread_info *tp;
202
203 DEBUG ("stop recording");
204
205 record_btrace_auto_disable ();
206
207 ALL_THREADS (tp)
208 if (tp->btrace.target != NULL)
209 btrace_disable (tp);
210}
211
212/* The to_close method of target record-btrace. */
213
214static void
de90e03d 215record_btrace_close (struct target_ops *self)
afedecd3 216{
568e808b
MM
217 struct thread_info *tp;
218
99c819ee
MM
219 /* Make sure automatic recording gets disabled even if we did not stop
220 recording before closing the record-btrace target. */
221 record_btrace_auto_disable ();
222
568e808b
MM
223 /* We should have already stopped recording.
224 Tear down btrace in case we have not. */
225 ALL_THREADS (tp)
226 btrace_teardown (tp);
afedecd3
MM
227}
228
229/* The to_info_record method of target record-btrace. */
230
231static void
630d6a4a 232record_btrace_info (struct target_ops *self)
afedecd3
MM
233{
234 struct btrace_thread_info *btinfo;
235 struct thread_info *tp;
23a7fe75 236 unsigned int insns, calls;
afedecd3
MM
237
238 DEBUG ("info");
239
240 tp = find_thread_ptid (inferior_ptid);
241 if (tp == NULL)
242 error (_("No thread."));
243
244 btrace_fetch (tp);
245
23a7fe75
MM
246 insns = 0;
247 calls = 0;
248
afedecd3 249 btinfo = &tp->btrace;
6e07b1d2
MM
250
251 if (!btrace_is_empty (tp))
23a7fe75
MM
252 {
253 struct btrace_call_iterator call;
254 struct btrace_insn_iterator insn;
255
256 btrace_call_end (&call, btinfo);
257 btrace_call_prev (&call, 1);
5de9129b 258 calls = btrace_call_number (&call);
23a7fe75
MM
259
260 btrace_insn_end (&insn, btinfo);
261 btrace_insn_prev (&insn, 1);
5de9129b 262 insns = btrace_insn_number (&insn);
23a7fe75 263 }
afedecd3
MM
264
265 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 266 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 267 target_pid_to_str (tp->ptid));
07bbe694
MM
268
269 if (btrace_is_replaying (tp))
270 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
271 btrace_insn_number (btinfo->replay));
afedecd3
MM
272}
273
274/* Print an unsigned int. */
275
276static void
277ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
278{
279 ui_out_field_fmt (uiout, fld, "%u", val);
280}
281
282/* Disassemble a section of the recorded instruction trace. */
283
284static void
23a7fe75
MM
285btrace_insn_history (struct ui_out *uiout,
286 const struct btrace_insn_iterator *begin,
287 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
288{
289 struct gdbarch *gdbarch;
23a7fe75 290 struct btrace_insn_iterator it;
afedecd3 291
23a7fe75
MM
292 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
293 btrace_insn_number (end));
afedecd3
MM
294
295 gdbarch = target_gdbarch ();
296
23a7fe75 297 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 298 {
23a7fe75
MM
299 const struct btrace_insn *insn;
300
301 insn = btrace_insn_get (&it);
302
afedecd3 303 /* Print the instruction index. */
23a7fe75 304 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
305 ui_out_text (uiout, "\t");
306
307 /* Disassembly with '/m' flag may not produce the expected result.
308 See PR gdb/11833. */
23a7fe75 309 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
310 }
311}
312
313/* The to_insn_history method of target record-btrace. */
314
315static void
7a6c5609 316record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
317{
318 struct btrace_thread_info *btinfo;
23a7fe75
MM
319 struct btrace_insn_history *history;
320 struct btrace_insn_iterator begin, end;
afedecd3
MM
321 struct cleanup *uiout_cleanup;
322 struct ui_out *uiout;
23a7fe75 323 unsigned int context, covered;
afedecd3
MM
324
325 uiout = current_uiout;
326 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
327 "insn history");
afedecd3 328 context = abs (size);
afedecd3
MM
329 if (context == 0)
330 error (_("Bad record instruction-history-size."));
331
23a7fe75
MM
332 btinfo = require_btrace ();
333 history = btinfo->insn_history;
334 if (history == NULL)
afedecd3 335 {
07bbe694 336 struct btrace_insn_iterator *replay;
afedecd3 337
23a7fe75 338 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 339
07bbe694
MM
340 /* If we're replaying, we start at the replay position. Otherwise, we
341 start at the tail of the trace. */
342 replay = btinfo->replay;
343 if (replay != NULL)
344 begin = *replay;
345 else
346 btrace_insn_end (&begin, btinfo);
347
348 /* We start from here and expand in the requested direction. Then we
349 expand in the other direction, as well, to fill up any remaining
350 context. */
351 end = begin;
352 if (size < 0)
353 {
354 /* We want the current position covered, as well. */
355 covered = btrace_insn_next (&end, 1);
356 covered += btrace_insn_prev (&begin, context - covered);
357 covered += btrace_insn_next (&end, context - covered);
358 }
359 else
360 {
361 covered = btrace_insn_next (&end, context);
362 covered += btrace_insn_prev (&begin, context - covered);
363 }
afedecd3
MM
364 }
365 else
366 {
23a7fe75
MM
367 begin = history->begin;
368 end = history->end;
afedecd3 369
23a7fe75
MM
370 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
371 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 372
23a7fe75
MM
373 if (size < 0)
374 {
375 end = begin;
376 covered = btrace_insn_prev (&begin, context);
377 }
378 else
379 {
380 begin = end;
381 covered = btrace_insn_next (&end, context);
382 }
afedecd3
MM
383 }
384
23a7fe75
MM
385 if (covered > 0)
386 btrace_insn_history (uiout, &begin, &end, flags);
387 else
388 {
389 if (size < 0)
390 printf_unfiltered (_("At the start of the branch trace record.\n"));
391 else
392 printf_unfiltered (_("At the end of the branch trace record.\n"));
393 }
afedecd3 394
23a7fe75 395 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
396 do_cleanups (uiout_cleanup);
397}
398
399/* The to_insn_history_range method of target record-btrace. */
400
401static void
4e99c6b7
TT
402record_btrace_insn_history_range (struct target_ops *self,
403 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
404{
405 struct btrace_thread_info *btinfo;
23a7fe75
MM
406 struct btrace_insn_history *history;
407 struct btrace_insn_iterator begin, end;
afedecd3
MM
408 struct cleanup *uiout_cleanup;
409 struct ui_out *uiout;
23a7fe75
MM
410 unsigned int low, high;
411 int found;
afedecd3
MM
412
413 uiout = current_uiout;
414 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
415 "insn history");
23a7fe75
MM
416 low = from;
417 high = to;
afedecd3 418
23a7fe75 419 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
420
421 /* Check for wrap-arounds. */
23a7fe75 422 if (low != from || high != to)
afedecd3
MM
423 error (_("Bad range."));
424
0688d04e 425 if (high < low)
afedecd3
MM
426 error (_("Bad range."));
427
23a7fe75 428 btinfo = require_btrace ();
afedecd3 429
23a7fe75
MM
430 found = btrace_find_insn_by_number (&begin, btinfo, low);
431 if (found == 0)
432 error (_("Range out of bounds."));
afedecd3 433
23a7fe75
MM
434 found = btrace_find_insn_by_number (&end, btinfo, high);
435 if (found == 0)
0688d04e
MM
436 {
437 /* Silently truncate the range. */
438 btrace_insn_end (&end, btinfo);
439 }
440 else
441 {
442 /* We want both begin and end to be inclusive. */
443 btrace_insn_next (&end, 1);
444 }
afedecd3 445
23a7fe75
MM
446 btrace_insn_history (uiout, &begin, &end, flags);
447 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
448
449 do_cleanups (uiout_cleanup);
450}
451
452/* The to_insn_history_from method of target record-btrace. */
453
454static void
9abc3ff3
TT
455record_btrace_insn_history_from (struct target_ops *self,
456 ULONGEST from, int size, int flags)
afedecd3
MM
457{
458 ULONGEST begin, end, context;
459
460 context = abs (size);
0688d04e
MM
461 if (context == 0)
462 error (_("Bad record instruction-history-size."));
afedecd3
MM
463
464 if (size < 0)
465 {
466 end = from;
467
468 if (from < context)
469 begin = 0;
470 else
0688d04e 471 begin = from - context + 1;
afedecd3
MM
472 }
473 else
474 {
475 begin = from;
0688d04e 476 end = from + context - 1;
afedecd3
MM
477
478 /* Check for wrap-around. */
479 if (end < begin)
480 end = ULONGEST_MAX;
481 }
482
4e99c6b7 483 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
484}
485
486/* Print the instruction number range for a function call history line. */
487
488static void
23a7fe75
MM
489btrace_call_history_insn_range (struct ui_out *uiout,
490 const struct btrace_function *bfun)
afedecd3 491{
7acbe133
MM
492 unsigned int begin, end, size;
493
494 size = VEC_length (btrace_insn_s, bfun->insn);
495 gdb_assert (size > 0);
afedecd3 496
23a7fe75 497 begin = bfun->insn_offset;
7acbe133 498 end = begin + size - 1;
afedecd3 499
23a7fe75 500 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 501 ui_out_text (uiout, ",");
23a7fe75 502 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
503}
504
505/* Print the source line information for a function call history line. */
506
507static void
23a7fe75
MM
508btrace_call_history_src_line (struct ui_out *uiout,
509 const struct btrace_function *bfun)
afedecd3
MM
510{
511 struct symbol *sym;
23a7fe75 512 int begin, end;
afedecd3
MM
513
514 sym = bfun->sym;
515 if (sym == NULL)
516 return;
517
518 ui_out_field_string (uiout, "file",
519 symtab_to_filename_for_display (sym->symtab));
520
23a7fe75
MM
521 begin = bfun->lbegin;
522 end = bfun->lend;
523
524 if (end < begin)
afedecd3
MM
525 return;
526
527 ui_out_text (uiout, ":");
23a7fe75 528 ui_out_field_int (uiout, "min line", begin);
afedecd3 529
23a7fe75 530 if (end == begin)
afedecd3
MM
531 return;
532
8710b709 533 ui_out_text (uiout, ",");
23a7fe75 534 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
535}
536
0b722aec
MM
537/* Get the name of a branch trace function. */
538
539static const char *
540btrace_get_bfun_name (const struct btrace_function *bfun)
541{
542 struct minimal_symbol *msym;
543 struct symbol *sym;
544
545 if (bfun == NULL)
546 return "??";
547
548 msym = bfun->msym;
549 sym = bfun->sym;
550
551 if (sym != NULL)
552 return SYMBOL_PRINT_NAME (sym);
553 else if (msym != NULL)
efd66ac6 554 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
555 else
556 return "??";
557}
558
afedecd3
MM
559/* Disassemble a section of the recorded function trace. */
560
561static void
23a7fe75 562btrace_call_history (struct ui_out *uiout,
8710b709 563 const struct btrace_thread_info *btinfo,
23a7fe75
MM
564 const struct btrace_call_iterator *begin,
565 const struct btrace_call_iterator *end,
afedecd3
MM
566 enum record_print_flag flags)
567{
23a7fe75 568 struct btrace_call_iterator it;
afedecd3 569
23a7fe75
MM
570 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
571 btrace_call_number (end));
afedecd3 572
23a7fe75 573 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 574 {
23a7fe75
MM
575 const struct btrace_function *bfun;
576 struct minimal_symbol *msym;
577 struct symbol *sym;
578
579 bfun = btrace_call_get (&it);
23a7fe75 580 sym = bfun->sym;
0b722aec 581 msym = bfun->msym;
23a7fe75 582
afedecd3 583 /* Print the function index. */
23a7fe75 584 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
585 ui_out_text (uiout, "\t");
586
8710b709
MM
587 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
588 {
589 int level = bfun->level + btinfo->level, i;
590
591 for (i = 0; i < level; ++i)
592 ui_out_text (uiout, " ");
593 }
594
595 if (sym != NULL)
596 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
597 else if (msym != NULL)
efd66ac6 598 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
599 else if (!ui_out_is_mi_like_p (uiout))
600 ui_out_field_string (uiout, "function", "??");
601
1e038f67 602 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 603 {
8710b709 604 ui_out_text (uiout, _("\tinst "));
23a7fe75 605 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
606 }
607
1e038f67 608 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 609 {
8710b709 610 ui_out_text (uiout, _("\tat "));
23a7fe75 611 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
612 }
613
afedecd3
MM
614 ui_out_text (uiout, "\n");
615 }
616}
617
618/* The to_call_history method of target record-btrace. */
619
620static void
5df2fcba 621record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
622{
623 struct btrace_thread_info *btinfo;
23a7fe75
MM
624 struct btrace_call_history *history;
625 struct btrace_call_iterator begin, end;
afedecd3
MM
626 struct cleanup *uiout_cleanup;
627 struct ui_out *uiout;
23a7fe75 628 unsigned int context, covered;
afedecd3
MM
629
630 uiout = current_uiout;
631 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
632 "insn history");
afedecd3 633 context = abs (size);
afedecd3
MM
634 if (context == 0)
635 error (_("Bad record function-call-history-size."));
636
23a7fe75
MM
637 btinfo = require_btrace ();
638 history = btinfo->call_history;
639 if (history == NULL)
afedecd3 640 {
07bbe694 641 struct btrace_insn_iterator *replay;
afedecd3 642
23a7fe75 643 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 644
07bbe694
MM
645 /* If we're replaying, we start at the replay position. Otherwise, we
646 start at the tail of the trace. */
647 replay = btinfo->replay;
648 if (replay != NULL)
649 {
650 begin.function = replay->function;
651 begin.btinfo = btinfo;
652 }
653 else
654 btrace_call_end (&begin, btinfo);
655
656 /* We start from here and expand in the requested direction. Then we
657 expand in the other direction, as well, to fill up any remaining
658 context. */
659 end = begin;
660 if (size < 0)
661 {
662 /* We want the current position covered, as well. */
663 covered = btrace_call_next (&end, 1);
664 covered += btrace_call_prev (&begin, context - covered);
665 covered += btrace_call_next (&end, context - covered);
666 }
667 else
668 {
669 covered = btrace_call_next (&end, context);
670 covered += btrace_call_prev (&begin, context- covered);
671 }
afedecd3
MM
672 }
673 else
674 {
23a7fe75
MM
675 begin = history->begin;
676 end = history->end;
afedecd3 677
23a7fe75
MM
678 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
679 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 680
23a7fe75
MM
681 if (size < 0)
682 {
683 end = begin;
684 covered = btrace_call_prev (&begin, context);
685 }
686 else
687 {
688 begin = end;
689 covered = btrace_call_next (&end, context);
690 }
afedecd3
MM
691 }
692
23a7fe75 693 if (covered > 0)
8710b709 694 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
695 else
696 {
697 if (size < 0)
698 printf_unfiltered (_("At the start of the branch trace record.\n"));
699 else
700 printf_unfiltered (_("At the end of the branch trace record.\n"));
701 }
afedecd3 702
23a7fe75 703 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
704 do_cleanups (uiout_cleanup);
705}
706
707/* The to_call_history_range method of target record-btrace. */
708
709static void
f0d960ea
TT
710record_btrace_call_history_range (struct target_ops *self,
711 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
712{
713 struct btrace_thread_info *btinfo;
23a7fe75
MM
714 struct btrace_call_history *history;
715 struct btrace_call_iterator begin, end;
afedecd3
MM
716 struct cleanup *uiout_cleanup;
717 struct ui_out *uiout;
23a7fe75
MM
718 unsigned int low, high;
719 int found;
afedecd3
MM
720
721 uiout = current_uiout;
722 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
723 "func history");
23a7fe75
MM
724 low = from;
725 high = to;
afedecd3 726
23a7fe75 727 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
728
729 /* Check for wrap-arounds. */
23a7fe75 730 if (low != from || high != to)
afedecd3
MM
731 error (_("Bad range."));
732
0688d04e 733 if (high < low)
afedecd3
MM
734 error (_("Bad range."));
735
23a7fe75 736 btinfo = require_btrace ();
afedecd3 737
23a7fe75
MM
738 found = btrace_find_call_by_number (&begin, btinfo, low);
739 if (found == 0)
740 error (_("Range out of bounds."));
afedecd3 741
23a7fe75
MM
742 found = btrace_find_call_by_number (&end, btinfo, high);
743 if (found == 0)
0688d04e
MM
744 {
745 /* Silently truncate the range. */
746 btrace_call_end (&end, btinfo);
747 }
748 else
749 {
750 /* We want both begin and end to be inclusive. */
751 btrace_call_next (&end, 1);
752 }
afedecd3 753
8710b709 754 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 755 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
756
757 do_cleanups (uiout_cleanup);
758}
759
760/* The to_call_history_from method of target record-btrace. */
761
762static void
ec0aea04
TT
763record_btrace_call_history_from (struct target_ops *self,
764 ULONGEST from, int size, int flags)
afedecd3
MM
765{
766 ULONGEST begin, end, context;
767
768 context = abs (size);
0688d04e
MM
769 if (context == 0)
770 error (_("Bad record function-call-history-size."));
afedecd3
MM
771
772 if (size < 0)
773 {
774 end = from;
775
776 if (from < context)
777 begin = 0;
778 else
0688d04e 779 begin = from - context + 1;
afedecd3
MM
780 }
781 else
782 {
783 begin = from;
0688d04e 784 end = from + context - 1;
afedecd3
MM
785
786 /* Check for wrap-around. */
787 if (end < begin)
788 end = ULONGEST_MAX;
789 }
790
f0d960ea 791 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
792}
793
07bbe694
MM
794/* The to_record_is_replaying method of target record-btrace. */
795
796static int
1c63c994 797record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
798{
799 struct thread_info *tp;
800
801 ALL_THREADS (tp)
802 if (btrace_is_replaying (tp))
803 return 1;
804
805 return 0;
806}
807
633785ff
MM
808/* The to_xfer_partial method of target record-btrace. */
809
9b409511 810static enum target_xfer_status
633785ff
MM
811record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
812 const char *annex, gdb_byte *readbuf,
813 const gdb_byte *writebuf, ULONGEST offset,
9b409511 814 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
815{
816 struct target_ops *t;
817
818 /* Filter out requests that don't make sense during replay. */
1c63c994 819 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
633785ff
MM
820 {
821 switch (object)
822 {
823 case TARGET_OBJECT_MEMORY:
824 {
825 struct target_section *section;
826
827 /* We do not allow writing memory in general. */
828 if (writebuf != NULL)
9b409511
YQ
829 {
830 *xfered_len = len;
bc113b4e 831 return TARGET_XFER_UNAVAILABLE;
9b409511 832 }
633785ff
MM
833
834 /* We allow reading readonly memory. */
835 section = target_section_by_addr (ops, offset);
836 if (section != NULL)
837 {
838 /* Check if the section we found is readonly. */
839 if ((bfd_get_section_flags (section->the_bfd_section->owner,
840 section->the_bfd_section)
841 & SEC_READONLY) != 0)
842 {
843 /* Truncate the request to fit into this section. */
844 len = min (len, section->endaddr - offset);
845 break;
846 }
847 }
848
9b409511 849 *xfered_len = len;
bc113b4e 850 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
851 }
852 }
853 }
854
855 /* Forward the request. */
856 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
857 if (ops->to_xfer_partial != NULL)
858 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 859 offset, len, xfered_len);
633785ff 860
9b409511 861 *xfered_len = len;
bc113b4e 862 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
863}
864
865/* The to_insert_breakpoint method of target record-btrace. */
866
867static int
868record_btrace_insert_breakpoint (struct target_ops *ops,
869 struct gdbarch *gdbarch,
870 struct bp_target_info *bp_tgt)
871{
872 volatile struct gdb_exception except;
873 int old, ret;
874
875 /* Inserting breakpoints requires accessing memory. Allow it for the
876 duration of this function. */
877 old = record_btrace_allow_memory_access;
878 record_btrace_allow_memory_access = 1;
879
880 ret = 0;
881 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 882 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
883
884 record_btrace_allow_memory_access = old;
885
886 if (except.reason < 0)
887 throw_exception (except);
888
889 return ret;
890}
891
892/* The to_remove_breakpoint method of target record-btrace. */
893
894static int
895record_btrace_remove_breakpoint (struct target_ops *ops,
896 struct gdbarch *gdbarch,
897 struct bp_target_info *bp_tgt)
898{
899 volatile struct gdb_exception except;
900 int old, ret;
901
902 /* Removing breakpoints requires accessing memory. Allow it for the
903 duration of this function. */
904 old = record_btrace_allow_memory_access;
905 record_btrace_allow_memory_access = 1;
906
907 ret = 0;
908 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 909 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
910
911 record_btrace_allow_memory_access = old;
912
913 if (except.reason < 0)
914 throw_exception (except);
915
916 return ret;
917}
918
1f3ef581
MM
919/* The to_fetch_registers method of target record-btrace. */
920
921static void
922record_btrace_fetch_registers (struct target_ops *ops,
923 struct regcache *regcache, int regno)
924{
925 struct btrace_insn_iterator *replay;
926 struct thread_info *tp;
927
928 tp = find_thread_ptid (inferior_ptid);
929 gdb_assert (tp != NULL);
930
931 replay = tp->btrace.replay;
932 if (replay != NULL)
933 {
934 const struct btrace_insn *insn;
935 struct gdbarch *gdbarch;
936 int pcreg;
937
938 gdbarch = get_regcache_arch (regcache);
939 pcreg = gdbarch_pc_regnum (gdbarch);
940 if (pcreg < 0)
941 return;
942
943 /* We can only provide the PC register. */
944 if (regno >= 0 && regno != pcreg)
945 return;
946
947 insn = btrace_insn_get (replay);
948 gdb_assert (insn != NULL);
949
950 regcache_raw_supply (regcache, regno, &insn->pc);
951 }
952 else
953 {
954 struct target_ops *t;
955
956 for (t = ops->beneath; t != NULL; t = t->beneath)
957 if (t->to_fetch_registers != NULL)
958 {
959 t->to_fetch_registers (t, regcache, regno);
960 break;
961 }
962 }
963}
964
965/* The to_store_registers method of target record-btrace. */
966
967static void
968record_btrace_store_registers (struct target_ops *ops,
969 struct regcache *regcache, int regno)
970{
971 struct target_ops *t;
972
1c63c994 973 if (record_btrace_is_replaying (ops))
1f3ef581
MM
974 error (_("This record target does not allow writing registers."));
975
976 gdb_assert (may_write_registers != 0);
977
978 for (t = ops->beneath; t != NULL; t = t->beneath)
979 if (t->to_store_registers != NULL)
980 {
981 t->to_store_registers (t, regcache, regno);
982 return;
983 }
984
985 noprocess ();
986}
987
988/* The to_prepare_to_store method of target record-btrace. */
989
990static void
991record_btrace_prepare_to_store (struct target_ops *ops,
992 struct regcache *regcache)
993{
994 struct target_ops *t;
995
1c63c994 996 if (record_btrace_is_replaying (ops))
1f3ef581
MM
997 return;
998
999 for (t = ops->beneath; t != NULL; t = t->beneath)
1000 if (t->to_prepare_to_store != NULL)
1001 {
1002 t->to_prepare_to_store (t, regcache);
1003 return;
1004 }
1005}
1006
0b722aec
MM
1007/* The branch trace frame cache. */
1008
1009struct btrace_frame_cache
1010{
1011 /* The thread. */
1012 struct thread_info *tp;
1013
1014 /* The frame info. */
1015 struct frame_info *frame;
1016
1017 /* The branch trace function segment. */
1018 const struct btrace_function *bfun;
1019};
1020
1021/* A struct btrace_frame_cache hash table indexed by NEXT. */
1022
1023static htab_t bfcache;
1024
1025/* hash_f for htab_create_alloc of bfcache. */
1026
1027static hashval_t
1028bfcache_hash (const void *arg)
1029{
1030 const struct btrace_frame_cache *cache = arg;
1031
1032 return htab_hash_pointer (cache->frame);
1033}
1034
1035/* eq_f for htab_create_alloc of bfcache. */
1036
1037static int
1038bfcache_eq (const void *arg1, const void *arg2)
1039{
1040 const struct btrace_frame_cache *cache1 = arg1;
1041 const struct btrace_frame_cache *cache2 = arg2;
1042
1043 return cache1->frame == cache2->frame;
1044}
1045
1046/* Create a new btrace frame cache. */
1047
1048static struct btrace_frame_cache *
1049bfcache_new (struct frame_info *frame)
1050{
1051 struct btrace_frame_cache *cache;
1052 void **slot;
1053
1054 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1055 cache->frame = frame;
1056
1057 slot = htab_find_slot (bfcache, cache, INSERT);
1058 gdb_assert (*slot == NULL);
1059 *slot = cache;
1060
1061 return cache;
1062}
1063
1064/* Extract the branch trace function from a branch trace frame. */
1065
1066static const struct btrace_function *
1067btrace_get_frame_function (struct frame_info *frame)
1068{
1069 const struct btrace_frame_cache *cache;
1070 const struct btrace_function *bfun;
1071 struct btrace_frame_cache pattern;
1072 void **slot;
1073
1074 pattern.frame = frame;
1075
1076 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1077 if (slot == NULL)
1078 return NULL;
1079
1080 cache = *slot;
1081 return cache->bfun;
1082}
1083
cecac1ab
MM
1084/* Implement stop_reason method for record_btrace_frame_unwind. */
1085
1086static enum unwind_stop_reason
1087record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1088 void **this_cache)
1089{
0b722aec
MM
1090 const struct btrace_frame_cache *cache;
1091 const struct btrace_function *bfun;
1092
1093 cache = *this_cache;
1094 bfun = cache->bfun;
1095 gdb_assert (bfun != NULL);
1096
1097 if (bfun->up == NULL)
1098 return UNWIND_UNAVAILABLE;
1099
1100 return UNWIND_NO_REASON;
cecac1ab
MM
1101}
1102
1103/* Implement this_id method for record_btrace_frame_unwind. */
1104
1105static void
1106record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1107 struct frame_id *this_id)
1108{
0b722aec
MM
1109 const struct btrace_frame_cache *cache;
1110 const struct btrace_function *bfun;
1111 CORE_ADDR code, special;
1112
1113 cache = *this_cache;
1114
1115 bfun = cache->bfun;
1116 gdb_assert (bfun != NULL);
1117
1118 while (bfun->segment.prev != NULL)
1119 bfun = bfun->segment.prev;
1120
1121 code = get_frame_func (this_frame);
1122 special = bfun->number;
1123
1124 *this_id = frame_id_build_unavailable_stack_special (code, special);
1125
1126 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1127 btrace_get_bfun_name (cache->bfun),
1128 core_addr_to_string_nz (this_id->code_addr),
1129 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1130}
1131
1132/* Implement prev_register method for record_btrace_frame_unwind. */
1133
1134static struct value *
1135record_btrace_frame_prev_register (struct frame_info *this_frame,
1136 void **this_cache,
1137 int regnum)
1138{
0b722aec
MM
1139 const struct btrace_frame_cache *cache;
1140 const struct btrace_function *bfun, *caller;
1141 const struct btrace_insn *insn;
1142 struct gdbarch *gdbarch;
1143 CORE_ADDR pc;
1144 int pcreg;
1145
1146 gdbarch = get_frame_arch (this_frame);
1147 pcreg = gdbarch_pc_regnum (gdbarch);
1148 if (pcreg < 0 || regnum != pcreg)
1149 throw_error (NOT_AVAILABLE_ERROR,
1150 _("Registers are not available in btrace record history"));
1151
1152 cache = *this_cache;
1153 bfun = cache->bfun;
1154 gdb_assert (bfun != NULL);
1155
1156 caller = bfun->up;
1157 if (caller == NULL)
1158 throw_error (NOT_AVAILABLE_ERROR,
1159 _("No caller in btrace record history"));
1160
1161 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1162 {
1163 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1164 pc = insn->pc;
1165 }
1166 else
1167 {
1168 insn = VEC_last (btrace_insn_s, caller->insn);
1169 pc = insn->pc;
1170
1171 pc += gdb_insn_length (gdbarch, pc);
1172 }
1173
1174 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1175 btrace_get_bfun_name (bfun), bfun->level,
1176 core_addr_to_string_nz (pc));
1177
1178 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1179}
1180
1181/* Implement sniffer method for record_btrace_frame_unwind. */
1182
1183static int
1184record_btrace_frame_sniffer (const struct frame_unwind *self,
1185 struct frame_info *this_frame,
1186 void **this_cache)
1187{
0b722aec
MM
1188 const struct btrace_function *bfun;
1189 struct btrace_frame_cache *cache;
cecac1ab 1190 struct thread_info *tp;
0b722aec 1191 struct frame_info *next;
cecac1ab
MM
1192
1193 /* THIS_FRAME does not contain a reference to its thread. */
1194 tp = find_thread_ptid (inferior_ptid);
1195 gdb_assert (tp != NULL);
1196
0b722aec
MM
1197 bfun = NULL;
1198 next = get_next_frame (this_frame);
1199 if (next == NULL)
1200 {
1201 const struct btrace_insn_iterator *replay;
1202
1203 replay = tp->btrace.replay;
1204 if (replay != NULL)
1205 bfun = replay->function;
1206 }
1207 else
1208 {
1209 const struct btrace_function *callee;
1210
1211 callee = btrace_get_frame_function (next);
1212 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1213 bfun = callee->up;
1214 }
1215
1216 if (bfun == NULL)
1217 return 0;
1218
1219 DEBUG ("[frame] sniffed frame for %s on level %d",
1220 btrace_get_bfun_name (bfun), bfun->level);
1221
1222 /* This is our frame. Initialize the frame cache. */
1223 cache = bfcache_new (this_frame);
1224 cache->tp = tp;
1225 cache->bfun = bfun;
1226
1227 *this_cache = cache;
1228 return 1;
1229}
1230
1231/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1232
1233static int
1234record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1235 struct frame_info *this_frame,
1236 void **this_cache)
1237{
1238 const struct btrace_function *bfun, *callee;
1239 struct btrace_frame_cache *cache;
1240 struct frame_info *next;
1241
1242 next = get_next_frame (this_frame);
1243 if (next == NULL)
1244 return 0;
1245
1246 callee = btrace_get_frame_function (next);
1247 if (callee == NULL)
1248 return 0;
1249
1250 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1251 return 0;
1252
1253 bfun = callee->up;
1254 if (bfun == NULL)
1255 return 0;
1256
1257 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1258 btrace_get_bfun_name (bfun), bfun->level);
1259
1260 /* This is our frame. Initialize the frame cache. */
1261 cache = bfcache_new (this_frame);
1262 cache->tp = find_thread_ptid (inferior_ptid);
1263 cache->bfun = bfun;
1264
1265 *this_cache = cache;
1266 return 1;
1267}
1268
1269static void
1270record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1271{
1272 struct btrace_frame_cache *cache;
1273 void **slot;
1274
1275 cache = this_cache;
1276
1277 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1278 gdb_assert (slot != NULL);
1279
1280 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1281}
1282
1283/* btrace recording does not store previous memory content, neither the stack
1284 frames content. Any unwinding would return errorneous results as the stack
1285 contents no longer matches the changed PC value restored from history.
1286 Therefore this unwinder reports any possibly unwound registers as
1287 <unavailable>. */
1288
0b722aec 1289const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1290{
1291 NORMAL_FRAME,
1292 record_btrace_frame_unwind_stop_reason,
1293 record_btrace_frame_this_id,
1294 record_btrace_frame_prev_register,
1295 NULL,
0b722aec
MM
1296 record_btrace_frame_sniffer,
1297 record_btrace_frame_dealloc_cache
1298};
1299
1300const struct frame_unwind record_btrace_tailcall_frame_unwind =
1301{
1302 TAILCALL_FRAME,
1303 record_btrace_frame_unwind_stop_reason,
1304 record_btrace_frame_this_id,
1305 record_btrace_frame_prev_register,
1306 NULL,
1307 record_btrace_tailcall_frame_sniffer,
1308 record_btrace_frame_dealloc_cache
cecac1ab 1309};
b2f4cfde 1310
ac01945b
TT
1311/* Implement the to_get_unwinder method. */
1312
1313static const struct frame_unwind *
1314record_btrace_to_get_unwinder (struct target_ops *self)
1315{
1316 return &record_btrace_frame_unwind;
1317}
1318
1319/* Implement the to_get_tailcall_unwinder method. */
1320
1321static const struct frame_unwind *
1322record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1323{
1324 return &record_btrace_tailcall_frame_unwind;
1325}
1326
52834460
MM
1327/* Indicate that TP should be resumed according to FLAG. */
1328
1329static void
1330record_btrace_resume_thread (struct thread_info *tp,
1331 enum btrace_thread_flag flag)
1332{
1333 struct btrace_thread_info *btinfo;
1334
1335 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1336
1337 btinfo = &tp->btrace;
1338
1339 if ((btinfo->flags & BTHR_MOVE) != 0)
1340 error (_("Thread already moving."));
1341
1342 /* Fetch the latest branch trace. */
1343 btrace_fetch (tp);
1344
1345 btinfo->flags |= flag;
1346}
1347
1348/* Find the thread to resume given a PTID. */
1349
1350static struct thread_info *
1351record_btrace_find_resume_thread (ptid_t ptid)
1352{
1353 struct thread_info *tp;
1354
1355 /* When asked to resume everything, we pick the current thread. */
1356 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1357 ptid = inferior_ptid;
1358
1359 return find_thread_ptid (ptid);
1360}
1361
1362/* Start replaying a thread. */
1363
1364static struct btrace_insn_iterator *
1365record_btrace_start_replaying (struct thread_info *tp)
1366{
1367 volatile struct gdb_exception except;
1368 struct btrace_insn_iterator *replay;
1369 struct btrace_thread_info *btinfo;
1370 int executing;
1371
1372 btinfo = &tp->btrace;
1373 replay = NULL;
1374
1375 /* We can't start replaying without trace. */
1376 if (btinfo->begin == NULL)
1377 return NULL;
1378
1379 /* Clear the executing flag to allow changes to the current frame.
1380 We are not actually running, yet. We just started a reverse execution
1381 command or a record goto command.
1382 For the latter, EXECUTING is false and this has no effect.
1383 For the former, EXECUTING is true and we're in to_wait, about to
1384 move the thread. Since we need to recompute the stack, we temporarily
1385 set EXECUTING to flase. */
1386 executing = is_executing (tp->ptid);
1387 set_executing (tp->ptid, 0);
1388
1389 /* GDB stores the current frame_id when stepping in order to detects steps
1390 into subroutines.
1391 Since frames are computed differently when we're replaying, we need to
1392 recompute those stored frames and fix them up so we can still detect
1393 subroutines after we started replaying. */
1394 TRY_CATCH (except, RETURN_MASK_ALL)
1395 {
1396 struct frame_info *frame;
1397 struct frame_id frame_id;
1398 int upd_step_frame_id, upd_step_stack_frame_id;
1399
1400 /* The current frame without replaying - computed via normal unwind. */
1401 frame = get_current_frame ();
1402 frame_id = get_frame_id (frame);
1403
1404 /* Check if we need to update any stepping-related frame id's. */
1405 upd_step_frame_id = frame_id_eq (frame_id,
1406 tp->control.step_frame_id);
1407 upd_step_stack_frame_id = frame_id_eq (frame_id,
1408 tp->control.step_stack_frame_id);
1409
1410 /* We start replaying at the end of the branch trace. This corresponds
1411 to the current instruction. */
1412 replay = xmalloc (sizeof (*replay));
1413 btrace_insn_end (replay, btinfo);
1414
1415 /* We're not replaying, yet. */
1416 gdb_assert (btinfo->replay == NULL);
1417 btinfo->replay = replay;
1418
1419 /* Make sure we're not using any stale registers. */
1420 registers_changed_ptid (tp->ptid);
1421
1422 /* The current frame with replaying - computed via btrace unwind. */
1423 frame = get_current_frame ();
1424 frame_id = get_frame_id (frame);
1425
1426 /* Replace stepping related frames where necessary. */
1427 if (upd_step_frame_id)
1428 tp->control.step_frame_id = frame_id;
1429 if (upd_step_stack_frame_id)
1430 tp->control.step_stack_frame_id = frame_id;
1431 }
1432
1433 /* Restore the previous execution state. */
1434 set_executing (tp->ptid, executing);
1435
1436 if (except.reason < 0)
1437 {
1438 xfree (btinfo->replay);
1439 btinfo->replay = NULL;
1440
1441 registers_changed_ptid (tp->ptid);
1442
1443 throw_exception (except);
1444 }
1445
1446 return replay;
1447}
1448
1449/* Stop replaying a thread. */
1450
1451static void
1452record_btrace_stop_replaying (struct thread_info *tp)
1453{
1454 struct btrace_thread_info *btinfo;
1455
1456 btinfo = &tp->btrace;
1457
1458 xfree (btinfo->replay);
1459 btinfo->replay = NULL;
1460
1461 /* Make sure we're not leaving any stale registers. */
1462 registers_changed_ptid (tp->ptid);
1463}
1464
b2f4cfde
MM
1465/* The to_resume method of target record-btrace. */
1466
1467static void
1468record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1469 enum gdb_signal signal)
1470{
52834460
MM
1471 struct thread_info *tp, *other;
1472 enum btrace_thread_flag flag;
1473
1474 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1475
1476 tp = record_btrace_find_resume_thread (ptid);
1477 if (tp == NULL)
1478 error (_("Cannot find thread to resume."));
1479
1480 /* Stop replaying other threads if the thread to resume is not replaying. */
1481 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1482 ALL_THREADS (other)
1483 record_btrace_stop_replaying (other);
1484
b2f4cfde 1485 /* As long as we're not replaying, just forward the request. */
1c63c994 1486 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1487 {
1488 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1489 if (ops->to_resume != NULL)
1490 return ops->to_resume (ops, ptid, step, signal);
1491
1492 error (_("Cannot find target for stepping."));
1493 }
1494
52834460
MM
1495 /* Compute the btrace thread flag for the requested move. */
1496 if (step == 0)
1497 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1498 else
1499 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1500
1501 /* At the moment, we only move a single thread. We could also move
1502 all threads in parallel by single-stepping each resumed thread
1503 until the first runs into an event.
1504 When we do that, we would want to continue all other threads.
1505 For now, just resume one thread to not confuse to_wait. */
1506 record_btrace_resume_thread (tp, flag);
1507
1508 /* We just indicate the resume intent here. The actual stepping happens in
1509 record_btrace_wait below. */
1510}
1511
1512/* Find a thread to move. */
1513
1514static struct thread_info *
1515record_btrace_find_thread_to_move (ptid_t ptid)
1516{
1517 struct thread_info *tp;
1518
1519 /* First check the parameter thread. */
1520 tp = find_thread_ptid (ptid);
1521 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1522 return tp;
1523
1524 /* Otherwise, find one other thread that has been resumed. */
1525 ALL_THREADS (tp)
1526 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1527 return tp;
1528
1529 return NULL;
1530}
1531
1532/* Return a target_waitstatus indicating that we ran out of history. */
1533
1534static struct target_waitstatus
1535btrace_step_no_history (void)
1536{
1537 struct target_waitstatus status;
1538
1539 status.kind = TARGET_WAITKIND_NO_HISTORY;
1540
1541 return status;
1542}
1543
1544/* Return a target_waitstatus indicating that a step finished. */
1545
1546static struct target_waitstatus
1547btrace_step_stopped (void)
1548{
1549 struct target_waitstatus status;
1550
1551 status.kind = TARGET_WAITKIND_STOPPED;
1552 status.value.sig = GDB_SIGNAL_TRAP;
1553
1554 return status;
1555}
1556
1557/* Clear the record histories. */
1558
1559static void
1560record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1561{
1562 xfree (btinfo->insn_history);
1563 xfree (btinfo->call_history);
1564
1565 btinfo->insn_history = NULL;
1566 btinfo->call_history = NULL;
1567}
1568
1569/* Step a single thread. */
1570
1571static struct target_waitstatus
1572record_btrace_step_thread (struct thread_info *tp)
1573{
1574 struct btrace_insn_iterator *replay, end;
1575 struct btrace_thread_info *btinfo;
1576 struct address_space *aspace;
1577 struct inferior *inf;
1578 enum btrace_thread_flag flags;
1579 unsigned int steps;
1580
e59fa00f
MM
1581 /* We can't step without an execution history. */
1582 if (btrace_is_empty (tp))
1583 return btrace_step_no_history ();
1584
52834460
MM
1585 btinfo = &tp->btrace;
1586 replay = btinfo->replay;
1587
1588 flags = btinfo->flags & BTHR_MOVE;
1589 btinfo->flags &= ~BTHR_MOVE;
1590
1591 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1592
1593 switch (flags)
1594 {
1595 default:
1596 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1597
1598 case BTHR_STEP:
1599 /* We're done if we're not replaying. */
1600 if (replay == NULL)
1601 return btrace_step_no_history ();
1602
1603 /* We are always able to step at least once. */
1604 steps = btrace_insn_next (replay, 1);
1605 gdb_assert (steps == 1);
1606
1607 /* Determine the end of the instruction trace. */
1608 btrace_insn_end (&end, btinfo);
1609
1610 /* We stop replaying if we reached the end of the trace. */
1611 if (btrace_insn_cmp (replay, &end) == 0)
1612 record_btrace_stop_replaying (tp);
1613
1614 return btrace_step_stopped ();
1615
1616 case BTHR_RSTEP:
1617 /* Start replaying if we're not already doing so. */
1618 if (replay == NULL)
1619 replay = record_btrace_start_replaying (tp);
1620
1621 /* If we can't step any further, we reached the end of the history. */
1622 steps = btrace_insn_prev (replay, 1);
1623 if (steps == 0)
1624 return btrace_step_no_history ();
1625
1626 return btrace_step_stopped ();
1627
1628 case BTHR_CONT:
1629 /* We're done if we're not replaying. */
1630 if (replay == NULL)
1631 return btrace_step_no_history ();
1632
1633 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1634 aspace = inf->aspace;
1635
1636 /* Determine the end of the instruction trace. */
1637 btrace_insn_end (&end, btinfo);
1638
1639 for (;;)
1640 {
1641 const struct btrace_insn *insn;
1642
1643 /* We are always able to step at least once. */
1644 steps = btrace_insn_next (replay, 1);
1645 gdb_assert (steps == 1);
1646
1647 /* We stop replaying if we reached the end of the trace. */
1648 if (btrace_insn_cmp (replay, &end) == 0)
1649 {
1650 record_btrace_stop_replaying (tp);
1651 return btrace_step_no_history ();
1652 }
1653
1654 insn = btrace_insn_get (replay);
1655 gdb_assert (insn);
1656
1657 DEBUG ("stepping %d (%s) ... %s", tp->num,
1658 target_pid_to_str (tp->ptid),
1659 core_addr_to_string_nz (insn->pc));
1660
1661 if (breakpoint_here_p (aspace, insn->pc))
1662 return btrace_step_stopped ();
1663 }
1664
1665 case BTHR_RCONT:
1666 /* Start replaying if we're not already doing so. */
1667 if (replay == NULL)
1668 replay = record_btrace_start_replaying (tp);
1669
1670 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1671 aspace = inf->aspace;
1672
1673 for (;;)
1674 {
1675 const struct btrace_insn *insn;
1676
1677 /* If we can't step any further, we're done. */
1678 steps = btrace_insn_prev (replay, 1);
1679 if (steps == 0)
1680 return btrace_step_no_history ();
1681
1682 insn = btrace_insn_get (replay);
1683 gdb_assert (insn);
1684
1685 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1686 target_pid_to_str (tp->ptid),
1687 core_addr_to_string_nz (insn->pc));
1688
1689 if (breakpoint_here_p (aspace, insn->pc))
1690 return btrace_step_stopped ();
1691 }
1692 }
b2f4cfde
MM
1693}
1694
1695/* The to_wait method of target record-btrace. */
1696
1697static ptid_t
1698record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1699 struct target_waitstatus *status, int options)
1700{
52834460
MM
1701 struct thread_info *tp, *other;
1702
1703 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1704
b2f4cfde 1705 /* As long as we're not replaying, just forward the request. */
1c63c994 1706 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1707 {
1708 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1709 if (ops->to_wait != NULL)
1710 return ops->to_wait (ops, ptid, status, options);
1711
1712 error (_("Cannot find target for waiting."));
1713 }
1714
52834460
MM
1715 /* Let's find a thread to move. */
1716 tp = record_btrace_find_thread_to_move (ptid);
1717 if (tp == NULL)
1718 {
1719 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1720
1721 status->kind = TARGET_WAITKIND_IGNORE;
1722 return minus_one_ptid;
1723 }
1724
1725 /* We only move a single thread. We're not able to correlate threads. */
1726 *status = record_btrace_step_thread (tp);
1727
1728 /* Stop all other threads. */
1729 if (!non_stop)
1730 ALL_THREADS (other)
1731 other->btrace.flags &= ~BTHR_MOVE;
1732
1733 /* Start record histories anew from the current position. */
1734 record_btrace_clear_histories (&tp->btrace);
1735
1736 /* We moved the replay position but did not update registers. */
1737 registers_changed_ptid (tp->ptid);
1738
1739 return tp->ptid;
1740}
1741
1742/* The to_can_execute_reverse method of target record-btrace. */
1743
1744static int
19db3e69 1745record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1746{
1747 return 1;
1748}
1749
1750/* The to_decr_pc_after_break method of target record-btrace. */
1751
1752static CORE_ADDR
1753record_btrace_decr_pc_after_break (struct target_ops *ops,
1754 struct gdbarch *gdbarch)
1755{
1756 /* When replaying, we do not actually execute the breakpoint instruction
1757 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1758 if (record_btrace_is_replaying (ops))
52834460
MM
1759 return 0;
1760
c0eca49f 1761 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1762}
1763
e2887aa3
MM
1764/* The to_find_new_threads method of target record-btrace. */
1765
1766static void
1767record_btrace_find_new_threads (struct target_ops *ops)
1768{
1769 /* Don't expect new threads if we're replaying. */
1c63c994 1770 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1771 return;
1772
1773 /* Forward the request. */
1774 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1775 if (ops->to_find_new_threads != NULL)
1776 {
1777 ops->to_find_new_threads (ops);
1778 break;
1779 }
1780}
1781
1782/* The to_thread_alive method of target record-btrace. */
1783
1784static int
1785record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1786{
1787 /* We don't add or remove threads during replay. */
1c63c994 1788 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1789 return find_thread_ptid (ptid) != NULL;
1790
1791 /* Forward the request. */
1792 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1793 if (ops->to_thread_alive != NULL)
1794 return ops->to_thread_alive (ops, ptid);
1795
1796 return 0;
1797}
1798
066ce621
MM
1799/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1800 is stopped. */
1801
1802static void
1803record_btrace_set_replay (struct thread_info *tp,
1804 const struct btrace_insn_iterator *it)
1805{
1806 struct btrace_thread_info *btinfo;
1807
1808 btinfo = &tp->btrace;
1809
1810 if (it == NULL || it->function == NULL)
52834460 1811 record_btrace_stop_replaying (tp);
066ce621
MM
1812 else
1813 {
1814 if (btinfo->replay == NULL)
52834460 1815 record_btrace_start_replaying (tp);
066ce621
MM
1816 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1817 return;
1818
1819 *btinfo->replay = *it;
52834460 1820 registers_changed_ptid (tp->ptid);
066ce621
MM
1821 }
1822
52834460
MM
1823 /* Start anew from the new replay position. */
1824 record_btrace_clear_histories (btinfo);
066ce621
MM
1825}
1826
1827/* The to_goto_record_begin method of target record-btrace. */
1828
1829static void
08475817 1830record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1831{
1832 struct thread_info *tp;
1833 struct btrace_insn_iterator begin;
1834
1835 tp = require_btrace_thread ();
1836
1837 btrace_insn_begin (&begin, &tp->btrace);
1838 record_btrace_set_replay (tp, &begin);
1839
1840 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1841}
1842
1843/* The to_goto_record_end method of target record-btrace. */
1844
1845static void
307a1b91 1846record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1847{
1848 struct thread_info *tp;
1849
1850 tp = require_btrace_thread ();
1851
1852 record_btrace_set_replay (tp, NULL);
1853
1854 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1855}
1856
1857/* The to_goto_record method of target record-btrace. */
1858
1859static void
606183ac 1860record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1861{
1862 struct thread_info *tp;
1863 struct btrace_insn_iterator it;
1864 unsigned int number;
1865 int found;
1866
1867 number = insn;
1868
1869 /* Check for wrap-arounds. */
1870 if (number != insn)
1871 error (_("Instruction number out of range."));
1872
1873 tp = require_btrace_thread ();
1874
1875 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1876 if (found == 0)
1877 error (_("No such instruction."));
1878
1879 record_btrace_set_replay (tp, &it);
1880
1881 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1882}
1883
afedecd3
MM
1884/* Initialize the record-btrace target ops. */
1885
1886static void
1887init_record_btrace_ops (void)
1888{
1889 struct target_ops *ops;
1890
1891 ops = &record_btrace_ops;
1892 ops->to_shortname = "record-btrace";
1893 ops->to_longname = "Branch tracing target";
1894 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1895 ops->to_open = record_btrace_open;
1896 ops->to_close = record_btrace_close;
1897 ops->to_detach = record_detach;
1898 ops->to_disconnect = record_disconnect;
1899 ops->to_mourn_inferior = record_mourn_inferior;
1900 ops->to_kill = record_kill;
afedecd3
MM
1901 ops->to_stop_recording = record_btrace_stop_recording;
1902 ops->to_info_record = record_btrace_info;
1903 ops->to_insn_history = record_btrace_insn_history;
1904 ops->to_insn_history_from = record_btrace_insn_history_from;
1905 ops->to_insn_history_range = record_btrace_insn_history_range;
1906 ops->to_call_history = record_btrace_call_history;
1907 ops->to_call_history_from = record_btrace_call_history_from;
1908 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1909 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1910 ops->to_xfer_partial = record_btrace_xfer_partial;
1911 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1912 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1913 ops->to_fetch_registers = record_btrace_fetch_registers;
1914 ops->to_store_registers = record_btrace_store_registers;
1915 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1916 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1917 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1918 ops->to_resume = record_btrace_resume;
1919 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1920 ops->to_find_new_threads = record_btrace_find_new_threads;
1921 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1922 ops->to_goto_record_begin = record_btrace_goto_begin;
1923 ops->to_goto_record_end = record_btrace_goto_end;
1924 ops->to_goto_record = record_btrace_goto;
52834460
MM
1925 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1926 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
afedecd3
MM
1927 ops->to_stratum = record_stratum;
1928 ops->to_magic = OPS_MAGIC;
1929}
1930
1931/* Alias for "target record". */
1932
1933static void
1934cmd_record_btrace_start (char *args, int from_tty)
1935{
1936 if (args != NULL && *args != 0)
1937 error (_("Invalid argument."));
1938
1939 execute_command ("target record-btrace", from_tty);
1940}
1941
1942void _initialize_record_btrace (void);
1943
1944/* Initialize btrace commands. */
1945
1946void
1947_initialize_record_btrace (void)
1948{
1949 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1950 _("Start branch trace recording."),
1951 &record_cmdlist);
1952 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1953
1954 init_record_btrace_ops ();
1955 add_target (&record_btrace_ops);
0b722aec
MM
1956
1957 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1958 xcalloc, xfree);
afedecd3 1959}
This page took 0.285481 seconds and 4 git commands to generate.