Shorten long lines in linux-aarch64-low.c
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp = (struct thread_info *) arg;
164
165 btrace_disable (tp);
166 }
167
168 /* Enable automatic tracing of new threads. */
169
170 static void
171 record_btrace_auto_enable (void)
172 {
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177 }
178
179 /* Disable automatic tracing of new threads. */
180
181 static void
182 record_btrace_auto_disable (void)
183 {
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192 }
193
194 /* The record-btrace async event handler function. */
195
196 static void
197 record_btrace_handle_async_inferior_event (gdb_client_data data)
198 {
199 inferior_event_handler (INF_REG_EVENT, NULL);
200 }
201
202 /* The to_open method of target record-btrace. */
203
204 static void
205 record_btrace_open (const char *args, int from_tty)
206 {
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
212 record_preopen ();
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
223 btrace_enable (tp, &record_btrace_conf);
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
235 record_btrace_generating_corefile = 0;
236
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240 }
241
242 /* The to_stop_recording method of target record-btrace. */
243
244 static void
245 record_btrace_stop_recording (struct target_ops *self)
246 {
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256 }
257
258 /* The to_close method of target record-btrace. */
259
260 static void
261 record_btrace_close (struct target_ops *self)
262 {
263 struct thread_info *tp;
264
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
276 }
277
278 /* The to_async method of target record-btrace. */
279
280 static void
281 record_btrace_async (struct target_ops *ops, int enable)
282 {
283 if (enable)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
288 ops->beneath->to_async (ops->beneath, enable);
289 }
290
291 /* Adjusts the size and returns a human readable size suffix. */
292
293 static const char *
294 record_btrace_adjust_size (unsigned int *size)
295 {
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317 }
318
319 /* Print a BTS configuration. */
320
321 static void
322 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323 {
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333 }
334
335 /* Print an Intel(R) Processor Trace configuration. */
336
337 static void
338 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339 {
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349 }
350
351 /* Print a branch tracing configuration. */
352
353 static void
354 record_btrace_print_conf (const struct btrace_config *conf)
355 {
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374 }
375
376 /* The to_info_record method of target record-btrace. */
377
378 static void
379 record_btrace_info (struct target_ops *self)
380 {
381 struct btrace_thread_info *btinfo;
382 const struct btrace_config *conf;
383 struct thread_info *tp;
384 unsigned int insns, calls, gaps;
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
396 record_btrace_print_conf (conf);
397
398 btrace_fetch (tp);
399
400 insns = 0;
401 calls = 0;
402 gaps = 0;
403
404 if (!btrace_is_empty (tp))
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
411 calls = btrace_call_number (&call);
412
413 btrace_insn_end (&insn, btinfo);
414
415 insns = btrace_insn_number (&insn);
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
438 }
439
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
447 }
448
449 /* Print a decode error. */
450
451 static void
452 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454 {
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
481
482 #if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507 #endif /* defined (HAVE_LIBIPT) */
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519 }
520
521 /* Print an unsigned int. */
522
523 static void
524 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525 {
526 ui_out_field_fmt (uiout, fld, "%u", val);
527 }
528
529 /* Disassemble a section of the recorded instruction trace. */
530
531 static void
532 btrace_insn_history (struct ui_out *uiout,
533 const struct btrace_thread_info *btinfo,
534 const struct btrace_insn_iterator *begin,
535 const struct btrace_insn_iterator *end, int flags)
536 {
537 struct gdbarch *gdbarch;
538 struct btrace_insn_iterator it;
539
540 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
541 btrace_insn_number (end));
542
543 gdbarch = target_gdbarch ();
544
545 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
546 {
547 const struct btrace_insn *insn;
548
549 insn = btrace_insn_get (&it);
550
551 /* A NULL instruction indicates a gap in the trace. */
552 if (insn == NULL)
553 {
554 const struct btrace_config *conf;
555
556 conf = btrace_conf (btinfo);
557
558 /* We have trace so we must have a configuration. */
559 gdb_assert (conf != NULL);
560
561 btrace_ui_out_decode_error (uiout, it.function->errcode,
562 conf->format);
563 }
564 else
565 {
566 char prefix[4];
567
568 /* We may add a speculation prefix later. We use the same space
569 that is used for the pc prefix. */
570 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
571 strncpy (prefix, pc_prefix (insn->pc), 3);
572 else
573 {
574 prefix[0] = ' ';
575 prefix[1] = ' ';
576 prefix[2] = ' ';
577 }
578 prefix[3] = 0;
579
580 /* Print the instruction index. */
581 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
582 ui_out_text (uiout, "\t");
583
584 /* Indicate speculative execution by a leading '?'. */
585 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
586 prefix[0] = '?';
587
588 /* Print the prefix; we tell gdb_disassembly below to omit it. */
589 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
590
591 /* Disassembly with '/m' flag may not produce the expected result.
592 See PR gdb/11833. */
593 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
594 1, insn->pc, insn->pc + 1);
595 }
596 }
597 }
598
599 /* The to_insn_history method of target record-btrace. */
600
601 static void
602 record_btrace_insn_history (struct target_ops *self, int size, int flags)
603 {
604 struct btrace_thread_info *btinfo;
605 struct btrace_insn_history *history;
606 struct btrace_insn_iterator begin, end;
607 struct cleanup *uiout_cleanup;
608 struct ui_out *uiout;
609 unsigned int context, covered;
610
611 uiout = current_uiout;
612 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
613 "insn history");
614 context = abs (size);
615 if (context == 0)
616 error (_("Bad record instruction-history-size."));
617
618 btinfo = require_btrace ();
619 history = btinfo->insn_history;
620 if (history == NULL)
621 {
622 struct btrace_insn_iterator *replay;
623
624 DEBUG ("insn-history (0x%x): %d", flags, size);
625
626 /* If we're replaying, we start at the replay position. Otherwise, we
627 start at the tail of the trace. */
628 replay = btinfo->replay;
629 if (replay != NULL)
630 begin = *replay;
631 else
632 btrace_insn_end (&begin, btinfo);
633
634 /* We start from here and expand in the requested direction. Then we
635 expand in the other direction, as well, to fill up any remaining
636 context. */
637 end = begin;
638 if (size < 0)
639 {
640 /* We want the current position covered, as well. */
641 covered = btrace_insn_next (&end, 1);
642 covered += btrace_insn_prev (&begin, context - covered);
643 covered += btrace_insn_next (&end, context - covered);
644 }
645 else
646 {
647 covered = btrace_insn_next (&end, context);
648 covered += btrace_insn_prev (&begin, context - covered);
649 }
650 }
651 else
652 {
653 begin = history->begin;
654 end = history->end;
655
656 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
657 btrace_insn_number (&begin), btrace_insn_number (&end));
658
659 if (size < 0)
660 {
661 end = begin;
662 covered = btrace_insn_prev (&begin, context);
663 }
664 else
665 {
666 begin = end;
667 covered = btrace_insn_next (&end, context);
668 }
669 }
670
671 if (covered > 0)
672 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
673 else
674 {
675 if (size < 0)
676 printf_unfiltered (_("At the start of the branch trace record.\n"));
677 else
678 printf_unfiltered (_("At the end of the branch trace record.\n"));
679 }
680
681 btrace_set_insn_history (btinfo, &begin, &end);
682 do_cleanups (uiout_cleanup);
683 }
684
685 /* The to_insn_history_range method of target record-btrace. */
686
687 static void
688 record_btrace_insn_history_range (struct target_ops *self,
689 ULONGEST from, ULONGEST to, int flags)
690 {
691 struct btrace_thread_info *btinfo;
692 struct btrace_insn_history *history;
693 struct btrace_insn_iterator begin, end;
694 struct cleanup *uiout_cleanup;
695 struct ui_out *uiout;
696 unsigned int low, high;
697 int found;
698
699 uiout = current_uiout;
700 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
701 "insn history");
702 low = from;
703 high = to;
704
705 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
706
707 /* Check for wrap-arounds. */
708 if (low != from || high != to)
709 error (_("Bad range."));
710
711 if (high < low)
712 error (_("Bad range."));
713
714 btinfo = require_btrace ();
715
716 found = btrace_find_insn_by_number (&begin, btinfo, low);
717 if (found == 0)
718 error (_("Range out of bounds."));
719
720 found = btrace_find_insn_by_number (&end, btinfo, high);
721 if (found == 0)
722 {
723 /* Silently truncate the range. */
724 btrace_insn_end (&end, btinfo);
725 }
726 else
727 {
728 /* We want both begin and end to be inclusive. */
729 btrace_insn_next (&end, 1);
730 }
731
732 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
733 btrace_set_insn_history (btinfo, &begin, &end);
734
735 do_cleanups (uiout_cleanup);
736 }
737
738 /* The to_insn_history_from method of target record-btrace. */
739
740 static void
741 record_btrace_insn_history_from (struct target_ops *self,
742 ULONGEST from, int size, int flags)
743 {
744 ULONGEST begin, end, context;
745
746 context = abs (size);
747 if (context == 0)
748 error (_("Bad record instruction-history-size."));
749
750 if (size < 0)
751 {
752 end = from;
753
754 if (from < context)
755 begin = 0;
756 else
757 begin = from - context + 1;
758 }
759 else
760 {
761 begin = from;
762 end = from + context - 1;
763
764 /* Check for wrap-around. */
765 if (end < begin)
766 end = ULONGEST_MAX;
767 }
768
769 record_btrace_insn_history_range (self, begin, end, flags);
770 }
771
772 /* Print the instruction number range for a function call history line. */
773
774 static void
775 btrace_call_history_insn_range (struct ui_out *uiout,
776 const struct btrace_function *bfun)
777 {
778 unsigned int begin, end, size;
779
780 size = VEC_length (btrace_insn_s, bfun->insn);
781 gdb_assert (size > 0);
782
783 begin = bfun->insn_offset;
784 end = begin + size - 1;
785
786 ui_out_field_uint (uiout, "insn begin", begin);
787 ui_out_text (uiout, ",");
788 ui_out_field_uint (uiout, "insn end", end);
789 }
790
791 /* Compute the lowest and highest source line for the instructions in BFUN
792 and return them in PBEGIN and PEND.
793 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
794 result from inlining or macro expansion. */
795
796 static void
797 btrace_compute_src_line_range (const struct btrace_function *bfun,
798 int *pbegin, int *pend)
799 {
800 struct btrace_insn *insn;
801 struct symtab *symtab;
802 struct symbol *sym;
803 unsigned int idx;
804 int begin, end;
805
806 begin = INT_MAX;
807 end = INT_MIN;
808
809 sym = bfun->sym;
810 if (sym == NULL)
811 goto out;
812
813 symtab = symbol_symtab (sym);
814
815 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
816 {
817 struct symtab_and_line sal;
818
819 sal = find_pc_line (insn->pc, 0);
820 if (sal.symtab != symtab || sal.line == 0)
821 continue;
822
823 begin = min (begin, sal.line);
824 end = max (end, sal.line);
825 }
826
827 out:
828 *pbegin = begin;
829 *pend = end;
830 }
831
832 /* Print the source line information for a function call history line. */
833
834 static void
835 btrace_call_history_src_line (struct ui_out *uiout,
836 const struct btrace_function *bfun)
837 {
838 struct symbol *sym;
839 int begin, end;
840
841 sym = bfun->sym;
842 if (sym == NULL)
843 return;
844
845 ui_out_field_string (uiout, "file",
846 symtab_to_filename_for_display (symbol_symtab (sym)));
847
848 btrace_compute_src_line_range (bfun, &begin, &end);
849 if (end < begin)
850 return;
851
852 ui_out_text (uiout, ":");
853 ui_out_field_int (uiout, "min line", begin);
854
855 if (end == begin)
856 return;
857
858 ui_out_text (uiout, ",");
859 ui_out_field_int (uiout, "max line", end);
860 }
861
862 /* Get the name of a branch trace function. */
863
864 static const char *
865 btrace_get_bfun_name (const struct btrace_function *bfun)
866 {
867 struct minimal_symbol *msym;
868 struct symbol *sym;
869
870 if (bfun == NULL)
871 return "??";
872
873 msym = bfun->msym;
874 sym = bfun->sym;
875
876 if (sym != NULL)
877 return SYMBOL_PRINT_NAME (sym);
878 else if (msym != NULL)
879 return MSYMBOL_PRINT_NAME (msym);
880 else
881 return "??";
882 }
883
884 /* Disassemble a section of the recorded function trace. */
885
886 static void
887 btrace_call_history (struct ui_out *uiout,
888 const struct btrace_thread_info *btinfo,
889 const struct btrace_call_iterator *begin,
890 const struct btrace_call_iterator *end,
891 enum record_print_flag flags)
892 {
893 struct btrace_call_iterator it;
894
895 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
896 btrace_call_number (end));
897
898 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
899 {
900 const struct btrace_function *bfun;
901 struct minimal_symbol *msym;
902 struct symbol *sym;
903
904 bfun = btrace_call_get (&it);
905 sym = bfun->sym;
906 msym = bfun->msym;
907
908 /* Print the function index. */
909 ui_out_field_uint (uiout, "index", bfun->number);
910 ui_out_text (uiout, "\t");
911
912 /* Indicate gaps in the trace. */
913 if (bfun->errcode != 0)
914 {
915 const struct btrace_config *conf;
916
917 conf = btrace_conf (btinfo);
918
919 /* We have trace so we must have a configuration. */
920 gdb_assert (conf != NULL);
921
922 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
923
924 continue;
925 }
926
927 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
928 {
929 int level = bfun->level + btinfo->level, i;
930
931 for (i = 0; i < level; ++i)
932 ui_out_text (uiout, " ");
933 }
934
935 if (sym != NULL)
936 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
937 else if (msym != NULL)
938 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
939 else if (!ui_out_is_mi_like_p (uiout))
940 ui_out_field_string (uiout, "function", "??");
941
942 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
943 {
944 ui_out_text (uiout, _("\tinst "));
945 btrace_call_history_insn_range (uiout, bfun);
946 }
947
948 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
949 {
950 ui_out_text (uiout, _("\tat "));
951 btrace_call_history_src_line (uiout, bfun);
952 }
953
954 ui_out_text (uiout, "\n");
955 }
956 }
957
958 /* The to_call_history method of target record-btrace. */
959
960 static void
961 record_btrace_call_history (struct target_ops *self, int size, int flags)
962 {
963 struct btrace_thread_info *btinfo;
964 struct btrace_call_history *history;
965 struct btrace_call_iterator begin, end;
966 struct cleanup *uiout_cleanup;
967 struct ui_out *uiout;
968 unsigned int context, covered;
969
970 uiout = current_uiout;
971 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
972 "insn history");
973 context = abs (size);
974 if (context == 0)
975 error (_("Bad record function-call-history-size."));
976
977 btinfo = require_btrace ();
978 history = btinfo->call_history;
979 if (history == NULL)
980 {
981 struct btrace_insn_iterator *replay;
982
983 DEBUG ("call-history (0x%x): %d", flags, size);
984
985 /* If we're replaying, we start at the replay position. Otherwise, we
986 start at the tail of the trace. */
987 replay = btinfo->replay;
988 if (replay != NULL)
989 {
990 begin.function = replay->function;
991 begin.btinfo = btinfo;
992 }
993 else
994 btrace_call_end (&begin, btinfo);
995
996 /* We start from here and expand in the requested direction. Then we
997 expand in the other direction, as well, to fill up any remaining
998 context. */
999 end = begin;
1000 if (size < 0)
1001 {
1002 /* We want the current position covered, as well. */
1003 covered = btrace_call_next (&end, 1);
1004 covered += btrace_call_prev (&begin, context - covered);
1005 covered += btrace_call_next (&end, context - covered);
1006 }
1007 else
1008 {
1009 covered = btrace_call_next (&end, context);
1010 covered += btrace_call_prev (&begin, context- covered);
1011 }
1012 }
1013 else
1014 {
1015 begin = history->begin;
1016 end = history->end;
1017
1018 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1019 btrace_call_number (&begin), btrace_call_number (&end));
1020
1021 if (size < 0)
1022 {
1023 end = begin;
1024 covered = btrace_call_prev (&begin, context);
1025 }
1026 else
1027 {
1028 begin = end;
1029 covered = btrace_call_next (&end, context);
1030 }
1031 }
1032
1033 if (covered > 0)
1034 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1035 else
1036 {
1037 if (size < 0)
1038 printf_unfiltered (_("At the start of the branch trace record.\n"));
1039 else
1040 printf_unfiltered (_("At the end of the branch trace record.\n"));
1041 }
1042
1043 btrace_set_call_history (btinfo, &begin, &end);
1044 do_cleanups (uiout_cleanup);
1045 }
1046
1047 /* The to_call_history_range method of target record-btrace. */
1048
1049 static void
1050 record_btrace_call_history_range (struct target_ops *self,
1051 ULONGEST from, ULONGEST to, int flags)
1052 {
1053 struct btrace_thread_info *btinfo;
1054 struct btrace_call_history *history;
1055 struct btrace_call_iterator begin, end;
1056 struct cleanup *uiout_cleanup;
1057 struct ui_out *uiout;
1058 unsigned int low, high;
1059 int found;
1060
1061 uiout = current_uiout;
1062 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1063 "func history");
1064 low = from;
1065 high = to;
1066
1067 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1068
1069 /* Check for wrap-arounds. */
1070 if (low != from || high != to)
1071 error (_("Bad range."));
1072
1073 if (high < low)
1074 error (_("Bad range."));
1075
1076 btinfo = require_btrace ();
1077
1078 found = btrace_find_call_by_number (&begin, btinfo, low);
1079 if (found == 0)
1080 error (_("Range out of bounds."));
1081
1082 found = btrace_find_call_by_number (&end, btinfo, high);
1083 if (found == 0)
1084 {
1085 /* Silently truncate the range. */
1086 btrace_call_end (&end, btinfo);
1087 }
1088 else
1089 {
1090 /* We want both begin and end to be inclusive. */
1091 btrace_call_next (&end, 1);
1092 }
1093
1094 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1095 btrace_set_call_history (btinfo, &begin, &end);
1096
1097 do_cleanups (uiout_cleanup);
1098 }
1099
1100 /* The to_call_history_from method of target record-btrace. */
1101
1102 static void
1103 record_btrace_call_history_from (struct target_ops *self,
1104 ULONGEST from, int size, int flags)
1105 {
1106 ULONGEST begin, end, context;
1107
1108 context = abs (size);
1109 if (context == 0)
1110 error (_("Bad record function-call-history-size."));
1111
1112 if (size < 0)
1113 {
1114 end = from;
1115
1116 if (from < context)
1117 begin = 0;
1118 else
1119 begin = from - context + 1;
1120 }
1121 else
1122 {
1123 begin = from;
1124 end = from + context - 1;
1125
1126 /* Check for wrap-around. */
1127 if (end < begin)
1128 end = ULONGEST_MAX;
1129 }
1130
1131 record_btrace_call_history_range (self, begin, end, flags);
1132 }
1133
1134 /* The to_record_is_replaying method of target record-btrace. */
1135
1136 static int
1137 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1138 {
1139 struct thread_info *tp;
1140
1141 ALL_NON_EXITED_THREADS (tp)
1142 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1143 return 1;
1144
1145 return 0;
1146 }
1147
1148 /* The to_record_will_replay method of target record-btrace. */
1149
1150 static int
1151 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1152 {
1153 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1154 }
1155
1156 /* The to_xfer_partial method of target record-btrace. */
1157
1158 static enum target_xfer_status
1159 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1160 const char *annex, gdb_byte *readbuf,
1161 const gdb_byte *writebuf, ULONGEST offset,
1162 ULONGEST len, ULONGEST *xfered_len)
1163 {
1164 struct target_ops *t;
1165
1166 /* Filter out requests that don't make sense during replay. */
1167 if (replay_memory_access == replay_memory_access_read_only
1168 && !record_btrace_generating_corefile
1169 && record_btrace_is_replaying (ops, inferior_ptid))
1170 {
1171 switch (object)
1172 {
1173 case TARGET_OBJECT_MEMORY:
1174 {
1175 struct target_section *section;
1176
1177 /* We do not allow writing memory in general. */
1178 if (writebuf != NULL)
1179 {
1180 *xfered_len = len;
1181 return TARGET_XFER_UNAVAILABLE;
1182 }
1183
1184 /* We allow reading readonly memory. */
1185 section = target_section_by_addr (ops, offset);
1186 if (section != NULL)
1187 {
1188 /* Check if the section we found is readonly. */
1189 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1190 section->the_bfd_section)
1191 & SEC_READONLY) != 0)
1192 {
1193 /* Truncate the request to fit into this section. */
1194 len = min (len, section->endaddr - offset);
1195 break;
1196 }
1197 }
1198
1199 *xfered_len = len;
1200 return TARGET_XFER_UNAVAILABLE;
1201 }
1202 }
1203 }
1204
1205 /* Forward the request. */
1206 ops = ops->beneath;
1207 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1208 offset, len, xfered_len);
1209 }
1210
1211 /* The to_insert_breakpoint method of target record-btrace. */
1212
1213 static int
1214 record_btrace_insert_breakpoint (struct target_ops *ops,
1215 struct gdbarch *gdbarch,
1216 struct bp_target_info *bp_tgt)
1217 {
1218 const char *old;
1219 int ret;
1220
1221 /* Inserting breakpoints requires accessing memory. Allow it for the
1222 duration of this function. */
1223 old = replay_memory_access;
1224 replay_memory_access = replay_memory_access_read_write;
1225
1226 ret = 0;
1227 TRY
1228 {
1229 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1230 }
1231 CATCH (except, RETURN_MASK_ALL)
1232 {
1233 replay_memory_access = old;
1234 throw_exception (except);
1235 }
1236 END_CATCH
1237 replay_memory_access = old;
1238
1239 return ret;
1240 }
1241
1242 /* The to_remove_breakpoint method of target record-btrace. */
1243
1244 static int
1245 record_btrace_remove_breakpoint (struct target_ops *ops,
1246 struct gdbarch *gdbarch,
1247 struct bp_target_info *bp_tgt)
1248 {
1249 const char *old;
1250 int ret;
1251
1252 /* Removing breakpoints requires accessing memory. Allow it for the
1253 duration of this function. */
1254 old = replay_memory_access;
1255 replay_memory_access = replay_memory_access_read_write;
1256
1257 ret = 0;
1258 TRY
1259 {
1260 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1261 }
1262 CATCH (except, RETURN_MASK_ALL)
1263 {
1264 replay_memory_access = old;
1265 throw_exception (except);
1266 }
1267 END_CATCH
1268 replay_memory_access = old;
1269
1270 return ret;
1271 }
1272
1273 /* The to_fetch_registers method of target record-btrace. */
1274
1275 static void
1276 record_btrace_fetch_registers (struct target_ops *ops,
1277 struct regcache *regcache, int regno)
1278 {
1279 struct btrace_insn_iterator *replay;
1280 struct thread_info *tp;
1281
1282 tp = find_thread_ptid (inferior_ptid);
1283 gdb_assert (tp != NULL);
1284
1285 replay = tp->btrace.replay;
1286 if (replay != NULL && !record_btrace_generating_corefile)
1287 {
1288 const struct btrace_insn *insn;
1289 struct gdbarch *gdbarch;
1290 int pcreg;
1291
1292 gdbarch = get_regcache_arch (regcache);
1293 pcreg = gdbarch_pc_regnum (gdbarch);
1294 if (pcreg < 0)
1295 return;
1296
1297 /* We can only provide the PC register. */
1298 if (regno >= 0 && regno != pcreg)
1299 return;
1300
1301 insn = btrace_insn_get (replay);
1302 gdb_assert (insn != NULL);
1303
1304 regcache_raw_supply (regcache, regno, &insn->pc);
1305 }
1306 else
1307 {
1308 struct target_ops *t = ops->beneath;
1309
1310 t->to_fetch_registers (t, regcache, regno);
1311 }
1312 }
1313
1314 /* The to_store_registers method of target record-btrace. */
1315
1316 static void
1317 record_btrace_store_registers (struct target_ops *ops,
1318 struct regcache *regcache, int regno)
1319 {
1320 struct target_ops *t;
1321
1322 if (!record_btrace_generating_corefile
1323 && record_btrace_is_replaying (ops, inferior_ptid))
1324 error (_("Cannot write registers while replaying."));
1325
1326 gdb_assert (may_write_registers != 0);
1327
1328 t = ops->beneath;
1329 t->to_store_registers (t, regcache, regno);
1330 }
1331
1332 /* The to_prepare_to_store method of target record-btrace. */
1333
1334 static void
1335 record_btrace_prepare_to_store (struct target_ops *ops,
1336 struct regcache *regcache)
1337 {
1338 struct target_ops *t;
1339
1340 if (!record_btrace_generating_corefile
1341 && record_btrace_is_replaying (ops, inferior_ptid))
1342 return;
1343
1344 t = ops->beneath;
1345 t->to_prepare_to_store (t, regcache);
1346 }
1347
1348 /* The branch trace frame cache. */
1349
1350 struct btrace_frame_cache
1351 {
1352 /* The thread. */
1353 struct thread_info *tp;
1354
1355 /* The frame info. */
1356 struct frame_info *frame;
1357
1358 /* The branch trace function segment. */
1359 const struct btrace_function *bfun;
1360 };
1361
1362 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1363
1364 static htab_t bfcache;
1365
1366 /* hash_f for htab_create_alloc of bfcache. */
1367
1368 static hashval_t
1369 bfcache_hash (const void *arg)
1370 {
1371 const struct btrace_frame_cache *cache
1372 = (const struct btrace_frame_cache *) arg;
1373
1374 return htab_hash_pointer (cache->frame);
1375 }
1376
1377 /* eq_f for htab_create_alloc of bfcache. */
1378
1379 static int
1380 bfcache_eq (const void *arg1, const void *arg2)
1381 {
1382 const struct btrace_frame_cache *cache1
1383 = (const struct btrace_frame_cache *) arg1;
1384 const struct btrace_frame_cache *cache2
1385 = (const struct btrace_frame_cache *) arg2;
1386
1387 return cache1->frame == cache2->frame;
1388 }
1389
1390 /* Create a new btrace frame cache. */
1391
1392 static struct btrace_frame_cache *
1393 bfcache_new (struct frame_info *frame)
1394 {
1395 struct btrace_frame_cache *cache;
1396 void **slot;
1397
1398 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1399 cache->frame = frame;
1400
1401 slot = htab_find_slot (bfcache, cache, INSERT);
1402 gdb_assert (*slot == NULL);
1403 *slot = cache;
1404
1405 return cache;
1406 }
1407
1408 /* Extract the branch trace function from a branch trace frame. */
1409
1410 static const struct btrace_function *
1411 btrace_get_frame_function (struct frame_info *frame)
1412 {
1413 const struct btrace_frame_cache *cache;
1414 const struct btrace_function *bfun;
1415 struct btrace_frame_cache pattern;
1416 void **slot;
1417
1418 pattern.frame = frame;
1419
1420 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1421 if (slot == NULL)
1422 return NULL;
1423
1424 cache = (const struct btrace_frame_cache *) *slot;
1425 return cache->bfun;
1426 }
1427
1428 /* Implement stop_reason method for record_btrace_frame_unwind. */
1429
1430 static enum unwind_stop_reason
1431 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1432 void **this_cache)
1433 {
1434 const struct btrace_frame_cache *cache;
1435 const struct btrace_function *bfun;
1436
1437 cache = (const struct btrace_frame_cache *) *this_cache;
1438 bfun = cache->bfun;
1439 gdb_assert (bfun != NULL);
1440
1441 if (bfun->up == NULL)
1442 return UNWIND_UNAVAILABLE;
1443
1444 return UNWIND_NO_REASON;
1445 }
1446
1447 /* Implement this_id method for record_btrace_frame_unwind. */
1448
1449 static void
1450 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1451 struct frame_id *this_id)
1452 {
1453 const struct btrace_frame_cache *cache;
1454 const struct btrace_function *bfun;
1455 CORE_ADDR code, special;
1456
1457 cache = (const struct btrace_frame_cache *) *this_cache;
1458
1459 bfun = cache->bfun;
1460 gdb_assert (bfun != NULL);
1461
1462 while (bfun->segment.prev != NULL)
1463 bfun = bfun->segment.prev;
1464
1465 code = get_frame_func (this_frame);
1466 special = bfun->number;
1467
1468 *this_id = frame_id_build_unavailable_stack_special (code, special);
1469
1470 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1471 btrace_get_bfun_name (cache->bfun),
1472 core_addr_to_string_nz (this_id->code_addr),
1473 core_addr_to_string_nz (this_id->special_addr));
1474 }
1475
1476 /* Implement prev_register method for record_btrace_frame_unwind. */
1477
1478 static struct value *
1479 record_btrace_frame_prev_register (struct frame_info *this_frame,
1480 void **this_cache,
1481 int regnum)
1482 {
1483 const struct btrace_frame_cache *cache;
1484 const struct btrace_function *bfun, *caller;
1485 const struct btrace_insn *insn;
1486 struct gdbarch *gdbarch;
1487 CORE_ADDR pc;
1488 int pcreg;
1489
1490 gdbarch = get_frame_arch (this_frame);
1491 pcreg = gdbarch_pc_regnum (gdbarch);
1492 if (pcreg < 0 || regnum != pcreg)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("Registers are not available in btrace record history"));
1495
1496 cache = (const struct btrace_frame_cache *) *this_cache;
1497 bfun = cache->bfun;
1498 gdb_assert (bfun != NULL);
1499
1500 caller = bfun->up;
1501 if (caller == NULL)
1502 throw_error (NOT_AVAILABLE_ERROR,
1503 _("No caller in btrace record history"));
1504
1505 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1506 {
1507 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1508 pc = insn->pc;
1509 }
1510 else
1511 {
1512 insn = VEC_last (btrace_insn_s, caller->insn);
1513 pc = insn->pc;
1514
1515 pc += gdb_insn_length (gdbarch, pc);
1516 }
1517
1518 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1519 btrace_get_bfun_name (bfun), bfun->level,
1520 core_addr_to_string_nz (pc));
1521
1522 return frame_unwind_got_address (this_frame, regnum, pc);
1523 }
1524
1525 /* Implement sniffer method for record_btrace_frame_unwind. */
1526
1527 static int
1528 record_btrace_frame_sniffer (const struct frame_unwind *self,
1529 struct frame_info *this_frame,
1530 void **this_cache)
1531 {
1532 const struct btrace_function *bfun;
1533 struct btrace_frame_cache *cache;
1534 struct thread_info *tp;
1535 struct frame_info *next;
1536
1537 /* THIS_FRAME does not contain a reference to its thread. */
1538 tp = find_thread_ptid (inferior_ptid);
1539 gdb_assert (tp != NULL);
1540
1541 bfun = NULL;
1542 next = get_next_frame (this_frame);
1543 if (next == NULL)
1544 {
1545 const struct btrace_insn_iterator *replay;
1546
1547 replay = tp->btrace.replay;
1548 if (replay != NULL)
1549 bfun = replay->function;
1550 }
1551 else
1552 {
1553 const struct btrace_function *callee;
1554
1555 callee = btrace_get_frame_function (next);
1556 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1557 bfun = callee->up;
1558 }
1559
1560 if (bfun == NULL)
1561 return 0;
1562
1563 DEBUG ("[frame] sniffed frame for %s on level %d",
1564 btrace_get_bfun_name (bfun), bfun->level);
1565
1566 /* This is our frame. Initialize the frame cache. */
1567 cache = bfcache_new (this_frame);
1568 cache->tp = tp;
1569 cache->bfun = bfun;
1570
1571 *this_cache = cache;
1572 return 1;
1573 }
1574
1575 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1576
1577 static int
1578 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1579 struct frame_info *this_frame,
1580 void **this_cache)
1581 {
1582 const struct btrace_function *bfun, *callee;
1583 struct btrace_frame_cache *cache;
1584 struct frame_info *next;
1585
1586 next = get_next_frame (this_frame);
1587 if (next == NULL)
1588 return 0;
1589
1590 callee = btrace_get_frame_function (next);
1591 if (callee == NULL)
1592 return 0;
1593
1594 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1595 return 0;
1596
1597 bfun = callee->up;
1598 if (bfun == NULL)
1599 return 0;
1600
1601 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1602 btrace_get_bfun_name (bfun), bfun->level);
1603
1604 /* This is our frame. Initialize the frame cache. */
1605 cache = bfcache_new (this_frame);
1606 cache->tp = find_thread_ptid (inferior_ptid);
1607 cache->bfun = bfun;
1608
1609 *this_cache = cache;
1610 return 1;
1611 }
1612
1613 static void
1614 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1615 {
1616 struct btrace_frame_cache *cache;
1617 void **slot;
1618
1619 cache = (struct btrace_frame_cache *) this_cache;
1620
1621 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1622 gdb_assert (slot != NULL);
1623
1624 htab_remove_elt (bfcache, cache);
1625 }
1626
1627 /* btrace recording does not store previous memory content, neither the stack
1628 frames content. Any unwinding would return errorneous results as the stack
1629 contents no longer matches the changed PC value restored from history.
1630 Therefore this unwinder reports any possibly unwound registers as
1631 <unavailable>. */
1632
1633 const struct frame_unwind record_btrace_frame_unwind =
1634 {
1635 NORMAL_FRAME,
1636 record_btrace_frame_unwind_stop_reason,
1637 record_btrace_frame_this_id,
1638 record_btrace_frame_prev_register,
1639 NULL,
1640 record_btrace_frame_sniffer,
1641 record_btrace_frame_dealloc_cache
1642 };
1643
1644 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1645 {
1646 TAILCALL_FRAME,
1647 record_btrace_frame_unwind_stop_reason,
1648 record_btrace_frame_this_id,
1649 record_btrace_frame_prev_register,
1650 NULL,
1651 record_btrace_tailcall_frame_sniffer,
1652 record_btrace_frame_dealloc_cache
1653 };
1654
1655 /* Implement the to_get_unwinder method. */
1656
1657 static const struct frame_unwind *
1658 record_btrace_to_get_unwinder (struct target_ops *self)
1659 {
1660 return &record_btrace_frame_unwind;
1661 }
1662
1663 /* Implement the to_get_tailcall_unwinder method. */
1664
1665 static const struct frame_unwind *
1666 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1667 {
1668 return &record_btrace_tailcall_frame_unwind;
1669 }
1670
1671 /* Return a human-readable string for FLAG. */
1672
1673 static const char *
1674 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1675 {
1676 switch (flag)
1677 {
1678 case BTHR_STEP:
1679 return "step";
1680
1681 case BTHR_RSTEP:
1682 return "reverse-step";
1683
1684 case BTHR_CONT:
1685 return "cont";
1686
1687 case BTHR_RCONT:
1688 return "reverse-cont";
1689
1690 case BTHR_STOP:
1691 return "stop";
1692 }
1693
1694 return "<invalid>";
1695 }
1696
1697 /* Indicate that TP should be resumed according to FLAG. */
1698
1699 static void
1700 record_btrace_resume_thread (struct thread_info *tp,
1701 enum btrace_thread_flag flag)
1702 {
1703 struct btrace_thread_info *btinfo;
1704
1705 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1706 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1707
1708 btinfo = &tp->btrace;
1709
1710 /* Fetch the latest branch trace. */
1711 btrace_fetch (tp);
1712
1713 /* A resume request overwrites a preceding resume or stop request. */
1714 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1715 btinfo->flags |= flag;
1716 }
1717
1718 /* Get the current frame for TP. */
1719
1720 static struct frame_info *
1721 get_thread_current_frame (struct thread_info *tp)
1722 {
1723 struct frame_info *frame;
1724 ptid_t old_inferior_ptid;
1725 int executing;
1726
1727 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1728 old_inferior_ptid = inferior_ptid;
1729 inferior_ptid = tp->ptid;
1730
1731 /* Clear the executing flag to allow changes to the current frame.
1732 We are not actually running, yet. We just started a reverse execution
1733 command or a record goto command.
1734 For the latter, EXECUTING is false and this has no effect.
1735 For the former, EXECUTING is true and we're in to_wait, about to
1736 move the thread. Since we need to recompute the stack, we temporarily
1737 set EXECUTING to flase. */
1738 executing = is_executing (inferior_ptid);
1739 set_executing (inferior_ptid, 0);
1740
1741 frame = NULL;
1742 TRY
1743 {
1744 frame = get_current_frame ();
1745 }
1746 CATCH (except, RETURN_MASK_ALL)
1747 {
1748 /* Restore the previous execution state. */
1749 set_executing (inferior_ptid, executing);
1750
1751 /* Restore the previous inferior_ptid. */
1752 inferior_ptid = old_inferior_ptid;
1753
1754 throw_exception (except);
1755 }
1756 END_CATCH
1757
1758 /* Restore the previous execution state. */
1759 set_executing (inferior_ptid, executing);
1760
1761 /* Restore the previous inferior_ptid. */
1762 inferior_ptid = old_inferior_ptid;
1763
1764 return frame;
1765 }
1766
1767 /* Start replaying a thread. */
1768
1769 static struct btrace_insn_iterator *
1770 record_btrace_start_replaying (struct thread_info *tp)
1771 {
1772 struct btrace_insn_iterator *replay;
1773 struct btrace_thread_info *btinfo;
1774
1775 btinfo = &tp->btrace;
1776 replay = NULL;
1777
1778 /* We can't start replaying without trace. */
1779 if (btinfo->begin == NULL)
1780 return NULL;
1781
1782 /* GDB stores the current frame_id when stepping in order to detects steps
1783 into subroutines.
1784 Since frames are computed differently when we're replaying, we need to
1785 recompute those stored frames and fix them up so we can still detect
1786 subroutines after we started replaying. */
1787 TRY
1788 {
1789 struct frame_info *frame;
1790 struct frame_id frame_id;
1791 int upd_step_frame_id, upd_step_stack_frame_id;
1792
1793 /* The current frame without replaying - computed via normal unwind. */
1794 frame = get_thread_current_frame (tp);
1795 frame_id = get_frame_id (frame);
1796
1797 /* Check if we need to update any stepping-related frame id's. */
1798 upd_step_frame_id = frame_id_eq (frame_id,
1799 tp->control.step_frame_id);
1800 upd_step_stack_frame_id = frame_id_eq (frame_id,
1801 tp->control.step_stack_frame_id);
1802
1803 /* We start replaying at the end of the branch trace. This corresponds
1804 to the current instruction. */
1805 replay = XNEW (struct btrace_insn_iterator);
1806 btrace_insn_end (replay, btinfo);
1807
1808 /* Skip gaps at the end of the trace. */
1809 while (btrace_insn_get (replay) == NULL)
1810 {
1811 unsigned int steps;
1812
1813 steps = btrace_insn_prev (replay, 1);
1814 if (steps == 0)
1815 error (_("No trace."));
1816 }
1817
1818 /* We're not replaying, yet. */
1819 gdb_assert (btinfo->replay == NULL);
1820 btinfo->replay = replay;
1821
1822 /* Make sure we're not using any stale registers. */
1823 registers_changed_ptid (tp->ptid);
1824
1825 /* The current frame with replaying - computed via btrace unwind. */
1826 frame = get_thread_current_frame (tp);
1827 frame_id = get_frame_id (frame);
1828
1829 /* Replace stepping related frames where necessary. */
1830 if (upd_step_frame_id)
1831 tp->control.step_frame_id = frame_id;
1832 if (upd_step_stack_frame_id)
1833 tp->control.step_stack_frame_id = frame_id;
1834 }
1835 CATCH (except, RETURN_MASK_ALL)
1836 {
1837 xfree (btinfo->replay);
1838 btinfo->replay = NULL;
1839
1840 registers_changed_ptid (tp->ptid);
1841
1842 throw_exception (except);
1843 }
1844 END_CATCH
1845
1846 return replay;
1847 }
1848
1849 /* Stop replaying a thread. */
1850
1851 static void
1852 record_btrace_stop_replaying (struct thread_info *tp)
1853 {
1854 struct btrace_thread_info *btinfo;
1855
1856 btinfo = &tp->btrace;
1857
1858 xfree (btinfo->replay);
1859 btinfo->replay = NULL;
1860
1861 /* Make sure we're not leaving any stale registers. */
1862 registers_changed_ptid (tp->ptid);
1863 }
1864
1865 /* Stop replaying TP if it is at the end of its execution history. */
1866
1867 static void
1868 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1869 {
1870 struct btrace_insn_iterator *replay, end;
1871 struct btrace_thread_info *btinfo;
1872
1873 btinfo = &tp->btrace;
1874 replay = btinfo->replay;
1875
1876 if (replay == NULL)
1877 return;
1878
1879 btrace_insn_end (&end, btinfo);
1880
1881 if (btrace_insn_cmp (replay, &end) == 0)
1882 record_btrace_stop_replaying (tp);
1883 }
1884
1885 /* The to_resume method of target record-btrace. */
1886
1887 static void
1888 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1889 enum gdb_signal signal)
1890 {
1891 struct thread_info *tp;
1892 enum btrace_thread_flag flag, cflag;
1893
1894 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1895 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1896 step ? "step" : "cont");
1897
1898 /* Store the execution direction of the last resume.
1899
1900 If there is more than one to_resume call, we have to rely on infrun
1901 to not change the execution direction in-between. */
1902 record_btrace_resume_exec_dir = execution_direction;
1903
1904 /* As long as we're not replaying, just forward the request.
1905
1906 For non-stop targets this means that no thread is replaying. In order to
1907 make progress, we may need to explicitly move replaying threads to the end
1908 of their execution history. */
1909 if ((execution_direction != EXEC_REVERSE)
1910 && !record_btrace_is_replaying (ops, minus_one_ptid))
1911 {
1912 ops = ops->beneath;
1913 return ops->to_resume (ops, ptid, step, signal);
1914 }
1915
1916 /* Compute the btrace thread flag for the requested move. */
1917 if (execution_direction == EXEC_REVERSE)
1918 {
1919 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
1920 cflag = BTHR_RCONT;
1921 }
1922 else
1923 {
1924 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
1925 cflag = BTHR_CONT;
1926 }
1927
1928 /* We just indicate the resume intent here. The actual stepping happens in
1929 record_btrace_wait below.
1930
1931 For all-stop targets, we only step INFERIOR_PTID and continue others. */
1932 if (!target_is_non_stop_p ())
1933 {
1934 gdb_assert (ptid_match (inferior_ptid, ptid));
1935
1936 ALL_NON_EXITED_THREADS (tp)
1937 if (ptid_match (tp->ptid, ptid))
1938 {
1939 if (ptid_match (tp->ptid, inferior_ptid))
1940 record_btrace_resume_thread (tp, flag);
1941 else
1942 record_btrace_resume_thread (tp, cflag);
1943 }
1944 }
1945 else
1946 {
1947 ALL_NON_EXITED_THREADS (tp)
1948 if (ptid_match (tp->ptid, ptid))
1949 record_btrace_resume_thread (tp, flag);
1950 }
1951
1952 /* Async support. */
1953 if (target_can_async_p ())
1954 {
1955 target_async (1);
1956 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1957 }
1958 }
1959
1960 /* Cancel resuming TP. */
1961
1962 static void
1963 record_btrace_cancel_resume (struct thread_info *tp)
1964 {
1965 enum btrace_thread_flag flags;
1966
1967 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1968 if (flags == 0)
1969 return;
1970
1971 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1972 target_pid_to_str (tp->ptid), flags,
1973 btrace_thread_flag_to_str (flags));
1974
1975 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1976 record_btrace_stop_replaying_at_end (tp);
1977 }
1978
1979 /* Return a target_waitstatus indicating that we ran out of history. */
1980
1981 static struct target_waitstatus
1982 btrace_step_no_history (void)
1983 {
1984 struct target_waitstatus status;
1985
1986 status.kind = TARGET_WAITKIND_NO_HISTORY;
1987
1988 return status;
1989 }
1990
1991 /* Return a target_waitstatus indicating that a step finished. */
1992
1993 static struct target_waitstatus
1994 btrace_step_stopped (void)
1995 {
1996 struct target_waitstatus status;
1997
1998 status.kind = TARGET_WAITKIND_STOPPED;
1999 status.value.sig = GDB_SIGNAL_TRAP;
2000
2001 return status;
2002 }
2003
2004 /* Return a target_waitstatus indicating that a thread was stopped as
2005 requested. */
2006
2007 static struct target_waitstatus
2008 btrace_step_stopped_on_request (void)
2009 {
2010 struct target_waitstatus status;
2011
2012 status.kind = TARGET_WAITKIND_STOPPED;
2013 status.value.sig = GDB_SIGNAL_0;
2014
2015 return status;
2016 }
2017
2018 /* Return a target_waitstatus indicating a spurious stop. */
2019
2020 static struct target_waitstatus
2021 btrace_step_spurious (void)
2022 {
2023 struct target_waitstatus status;
2024
2025 status.kind = TARGET_WAITKIND_SPURIOUS;
2026
2027 return status;
2028 }
2029
2030 /* Return a target_waitstatus indicating that the thread was not resumed. */
2031
2032 static struct target_waitstatus
2033 btrace_step_no_resumed (void)
2034 {
2035 struct target_waitstatus status;
2036
2037 status.kind = TARGET_WAITKIND_NO_RESUMED;
2038
2039 return status;
2040 }
2041
2042 /* Return a target_waitstatus indicating that we should wait again. */
2043
2044 static struct target_waitstatus
2045 btrace_step_again (void)
2046 {
2047 struct target_waitstatus status;
2048
2049 status.kind = TARGET_WAITKIND_IGNORE;
2050
2051 return status;
2052 }
2053
2054 /* Clear the record histories. */
2055
2056 static void
2057 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2058 {
2059 xfree (btinfo->insn_history);
2060 xfree (btinfo->call_history);
2061
2062 btinfo->insn_history = NULL;
2063 btinfo->call_history = NULL;
2064 }
2065
2066 /* Check whether TP's current replay position is at a breakpoint. */
2067
2068 static int
2069 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2070 {
2071 struct btrace_insn_iterator *replay;
2072 struct btrace_thread_info *btinfo;
2073 const struct btrace_insn *insn;
2074 struct inferior *inf;
2075
2076 btinfo = &tp->btrace;
2077 replay = btinfo->replay;
2078
2079 if (replay == NULL)
2080 return 0;
2081
2082 insn = btrace_insn_get (replay);
2083 if (insn == NULL)
2084 return 0;
2085
2086 inf = find_inferior_ptid (tp->ptid);
2087 if (inf == NULL)
2088 return 0;
2089
2090 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2091 &btinfo->stop_reason);
2092 }
2093
2094 /* Step one instruction in forward direction. */
2095
2096 static struct target_waitstatus
2097 record_btrace_single_step_forward (struct thread_info *tp)
2098 {
2099 struct btrace_insn_iterator *replay, end;
2100 struct btrace_thread_info *btinfo;
2101
2102 btinfo = &tp->btrace;
2103 replay = btinfo->replay;
2104
2105 /* We're done if we're not replaying. */
2106 if (replay == NULL)
2107 return btrace_step_no_history ();
2108
2109 /* Check if we're stepping a breakpoint. */
2110 if (record_btrace_replay_at_breakpoint (tp))
2111 return btrace_step_stopped ();
2112
2113 /* Skip gaps during replay. */
2114 do
2115 {
2116 unsigned int steps;
2117
2118 /* We will bail out here if we continue stepping after reaching the end
2119 of the execution history. */
2120 steps = btrace_insn_next (replay, 1);
2121 if (steps == 0)
2122 return btrace_step_no_history ();
2123 }
2124 while (btrace_insn_get (replay) == NULL);
2125
2126 /* Determine the end of the instruction trace. */
2127 btrace_insn_end (&end, btinfo);
2128
2129 /* The execution trace contains (and ends with) the current instruction.
2130 This instruction has not been executed, yet, so the trace really ends
2131 one instruction earlier. */
2132 if (btrace_insn_cmp (replay, &end) == 0)
2133 return btrace_step_no_history ();
2134
2135 return btrace_step_spurious ();
2136 }
2137
2138 /* Step one instruction in backward direction. */
2139
2140 static struct target_waitstatus
2141 record_btrace_single_step_backward (struct thread_info *tp)
2142 {
2143 struct btrace_insn_iterator *replay;
2144 struct btrace_thread_info *btinfo;
2145
2146 btinfo = &tp->btrace;
2147 replay = btinfo->replay;
2148
2149 /* Start replaying if we're not already doing so. */
2150 if (replay == NULL)
2151 replay = record_btrace_start_replaying (tp);
2152
2153 /* If we can't step any further, we reached the end of the history.
2154 Skip gaps during replay. */
2155 do
2156 {
2157 unsigned int steps;
2158
2159 steps = btrace_insn_prev (replay, 1);
2160 if (steps == 0)
2161 return btrace_step_no_history ();
2162 }
2163 while (btrace_insn_get (replay) == NULL);
2164
2165 /* Check if we're stepping a breakpoint.
2166
2167 For reverse-stepping, this check is after the step. There is logic in
2168 infrun.c that handles reverse-stepping separately. See, for example,
2169 proceed and adjust_pc_after_break.
2170
2171 This code assumes that for reverse-stepping, PC points to the last
2172 de-executed instruction, whereas for forward-stepping PC points to the
2173 next to-be-executed instruction. */
2174 if (record_btrace_replay_at_breakpoint (tp))
2175 return btrace_step_stopped ();
2176
2177 return btrace_step_spurious ();
2178 }
2179
2180 /* Step a single thread. */
2181
2182 static struct target_waitstatus
2183 record_btrace_step_thread (struct thread_info *tp)
2184 {
2185 struct btrace_thread_info *btinfo;
2186 struct target_waitstatus status;
2187 enum btrace_thread_flag flags;
2188
2189 btinfo = &tp->btrace;
2190
2191 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2192 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2193
2194 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2195 target_pid_to_str (tp->ptid), flags,
2196 btrace_thread_flag_to_str (flags));
2197
2198 /* We can't step without an execution history. */
2199 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2200 return btrace_step_no_history ();
2201
2202 switch (flags)
2203 {
2204 default:
2205 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2206
2207 case BTHR_STOP:
2208 return btrace_step_stopped_on_request ();
2209
2210 case BTHR_STEP:
2211 status = record_btrace_single_step_forward (tp);
2212 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2213 break;
2214
2215 return btrace_step_stopped ();
2216
2217 case BTHR_RSTEP:
2218 status = record_btrace_single_step_backward (tp);
2219 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2220 break;
2221
2222 return btrace_step_stopped ();
2223
2224 case BTHR_CONT:
2225 status = record_btrace_single_step_forward (tp);
2226 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2227 break;
2228
2229 btinfo->flags |= flags;
2230 return btrace_step_again ();
2231
2232 case BTHR_RCONT:
2233 status = record_btrace_single_step_backward (tp);
2234 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2235 break;
2236
2237 btinfo->flags |= flags;
2238 return btrace_step_again ();
2239 }
2240
2241 /* We keep threads moving at the end of their execution history. The to_wait
2242 method will stop the thread for whom the event is reported. */
2243 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2244 btinfo->flags |= flags;
2245
2246 return status;
2247 }
2248
2249 /* A vector of threads. */
2250
2251 typedef struct thread_info * tp_t;
2252 DEF_VEC_P (tp_t);
2253
2254 /* Announce further events if necessary. */
2255
2256 static void
2257 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2258 const VEC (tp_t) *no_history)
2259 {
2260 int more_moving, more_no_history;
2261
2262 more_moving = !VEC_empty (tp_t, moving);
2263 more_no_history = !VEC_empty (tp_t, no_history);
2264
2265 if (!more_moving && !more_no_history)
2266 return;
2267
2268 if (more_moving)
2269 DEBUG ("movers pending");
2270
2271 if (more_no_history)
2272 DEBUG ("no-history pending");
2273
2274 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2275 }
2276
2277 /* The to_wait method of target record-btrace. */
2278
2279 static ptid_t
2280 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2281 struct target_waitstatus *status, int options)
2282 {
2283 VEC (tp_t) *moving, *no_history;
2284 struct thread_info *tp, *eventing;
2285 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2286
2287 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2288
2289 /* As long as we're not replaying, just forward the request. */
2290 if ((execution_direction != EXEC_REVERSE)
2291 && !record_btrace_is_replaying (ops, minus_one_ptid))
2292 {
2293 ops = ops->beneath;
2294 return ops->to_wait (ops, ptid, status, options);
2295 }
2296
2297 moving = NULL;
2298 no_history = NULL;
2299
2300 make_cleanup (VEC_cleanup (tp_t), &moving);
2301 make_cleanup (VEC_cleanup (tp_t), &no_history);
2302
2303 /* Keep a work list of moving threads. */
2304 ALL_NON_EXITED_THREADS (tp)
2305 if (ptid_match (tp->ptid, ptid)
2306 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2307 VEC_safe_push (tp_t, moving, tp);
2308
2309 if (VEC_empty (tp_t, moving))
2310 {
2311 *status = btrace_step_no_resumed ();
2312
2313 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2314 target_waitstatus_to_string (status));
2315
2316 do_cleanups (cleanups);
2317 return null_ptid;
2318 }
2319
2320 /* Step moving threads one by one, one step each, until either one thread
2321 reports an event or we run out of threads to step.
2322
2323 When stepping more than one thread, chances are that some threads reach
2324 the end of their execution history earlier than others. If we reported
2325 this immediately, all-stop on top of non-stop would stop all threads and
2326 resume the same threads next time. And we would report the same thread
2327 having reached the end of its execution history again.
2328
2329 In the worst case, this would starve the other threads. But even if other
2330 threads would be allowed to make progress, this would result in far too
2331 many intermediate stops.
2332
2333 We therefore delay the reporting of "no execution history" until we have
2334 nothing else to report. By this time, all threads should have moved to
2335 either the beginning or the end of their execution history. There will
2336 be a single user-visible stop. */
2337 eventing = NULL;
2338 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2339 {
2340 unsigned int ix;
2341
2342 ix = 0;
2343 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2344 {
2345 *status = record_btrace_step_thread (tp);
2346
2347 switch (status->kind)
2348 {
2349 case TARGET_WAITKIND_IGNORE:
2350 ix++;
2351 break;
2352
2353 case TARGET_WAITKIND_NO_HISTORY:
2354 VEC_safe_push (tp_t, no_history,
2355 VEC_ordered_remove (tp_t, moving, ix));
2356 break;
2357
2358 default:
2359 eventing = VEC_unordered_remove (tp_t, moving, ix);
2360 break;
2361 }
2362 }
2363 }
2364
2365 if (eventing == NULL)
2366 {
2367 /* We started with at least one moving thread. This thread must have
2368 either stopped or reached the end of its execution history.
2369
2370 In the former case, EVENTING must not be NULL.
2371 In the latter case, NO_HISTORY must not be empty. */
2372 gdb_assert (!VEC_empty (tp_t, no_history));
2373
2374 /* We kept threads moving at the end of their execution history. Stop
2375 EVENTING now that we are going to report its stop. */
2376 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2377 eventing->btrace.flags &= ~BTHR_MOVE;
2378
2379 *status = btrace_step_no_history ();
2380 }
2381
2382 gdb_assert (eventing != NULL);
2383
2384 /* We kept threads replaying at the end of their execution history. Stop
2385 replaying EVENTING now that we are going to report its stop. */
2386 record_btrace_stop_replaying_at_end (eventing);
2387
2388 /* Stop all other threads. */
2389 if (!target_is_non_stop_p ())
2390 ALL_NON_EXITED_THREADS (tp)
2391 record_btrace_cancel_resume (tp);
2392
2393 /* In async mode, we need to announce further events. */
2394 if (target_is_async_p ())
2395 record_btrace_maybe_mark_async_event (moving, no_history);
2396
2397 /* Start record histories anew from the current position. */
2398 record_btrace_clear_histories (&eventing->btrace);
2399
2400 /* We moved the replay position but did not update registers. */
2401 registers_changed_ptid (eventing->ptid);
2402
2403 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2404 target_pid_to_str (eventing->ptid),
2405 target_waitstatus_to_string (status));
2406
2407 do_cleanups (cleanups);
2408 return eventing->ptid;
2409 }
2410
2411 /* The to_stop method of target record-btrace. */
2412
2413 static void
2414 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2415 {
2416 DEBUG ("stop %s", target_pid_to_str (ptid));
2417
2418 /* As long as we're not replaying, just forward the request. */
2419 if ((execution_direction != EXEC_REVERSE)
2420 && !record_btrace_is_replaying (ops, minus_one_ptid))
2421 {
2422 ops = ops->beneath;
2423 ops->to_stop (ops, ptid);
2424 }
2425 else
2426 {
2427 struct thread_info *tp;
2428
2429 ALL_NON_EXITED_THREADS (tp)
2430 if (ptid_match (tp->ptid, ptid))
2431 {
2432 tp->btrace.flags &= ~BTHR_MOVE;
2433 tp->btrace.flags |= BTHR_STOP;
2434 }
2435 }
2436 }
2437
2438 /* The to_can_execute_reverse method of target record-btrace. */
2439
2440 static int
2441 record_btrace_can_execute_reverse (struct target_ops *self)
2442 {
2443 return 1;
2444 }
2445
2446 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2447
2448 static int
2449 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2450 {
2451 if (record_btrace_is_replaying (ops, minus_one_ptid))
2452 {
2453 struct thread_info *tp = inferior_thread ();
2454
2455 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2456 }
2457
2458 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2459 }
2460
2461 /* The to_supports_stopped_by_sw_breakpoint method of target
2462 record-btrace. */
2463
2464 static int
2465 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2466 {
2467 if (record_btrace_is_replaying (ops, minus_one_ptid))
2468 return 1;
2469
2470 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2471 }
2472
2473 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2474
2475 static int
2476 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2477 {
2478 if (record_btrace_is_replaying (ops, minus_one_ptid))
2479 {
2480 struct thread_info *tp = inferior_thread ();
2481
2482 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2483 }
2484
2485 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2486 }
2487
2488 /* The to_supports_stopped_by_hw_breakpoint method of target
2489 record-btrace. */
2490
2491 static int
2492 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2493 {
2494 if (record_btrace_is_replaying (ops, minus_one_ptid))
2495 return 1;
2496
2497 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2498 }
2499
2500 /* The to_update_thread_list method of target record-btrace. */
2501
2502 static void
2503 record_btrace_update_thread_list (struct target_ops *ops)
2504 {
2505 /* We don't add or remove threads during replay. */
2506 if (record_btrace_is_replaying (ops, minus_one_ptid))
2507 return;
2508
2509 /* Forward the request. */
2510 ops = ops->beneath;
2511 ops->to_update_thread_list (ops);
2512 }
2513
2514 /* The to_thread_alive method of target record-btrace. */
2515
2516 static int
2517 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2518 {
2519 /* We don't add or remove threads during replay. */
2520 if (record_btrace_is_replaying (ops, minus_one_ptid))
2521 return find_thread_ptid (ptid) != NULL;
2522
2523 /* Forward the request. */
2524 ops = ops->beneath;
2525 return ops->to_thread_alive (ops, ptid);
2526 }
2527
2528 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2529 is stopped. */
2530
2531 static void
2532 record_btrace_set_replay (struct thread_info *tp,
2533 const struct btrace_insn_iterator *it)
2534 {
2535 struct btrace_thread_info *btinfo;
2536
2537 btinfo = &tp->btrace;
2538
2539 if (it == NULL || it->function == NULL)
2540 record_btrace_stop_replaying (tp);
2541 else
2542 {
2543 if (btinfo->replay == NULL)
2544 record_btrace_start_replaying (tp);
2545 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2546 return;
2547
2548 *btinfo->replay = *it;
2549 registers_changed_ptid (tp->ptid);
2550 }
2551
2552 /* Start anew from the new replay position. */
2553 record_btrace_clear_histories (btinfo);
2554
2555 stop_pc = regcache_read_pc (get_current_regcache ());
2556 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2557 }
2558
2559 /* The to_goto_record_begin method of target record-btrace. */
2560
2561 static void
2562 record_btrace_goto_begin (struct target_ops *self)
2563 {
2564 struct thread_info *tp;
2565 struct btrace_insn_iterator begin;
2566
2567 tp = require_btrace_thread ();
2568
2569 btrace_insn_begin (&begin, &tp->btrace);
2570 record_btrace_set_replay (tp, &begin);
2571 }
2572
2573 /* The to_goto_record_end method of target record-btrace. */
2574
2575 static void
2576 record_btrace_goto_end (struct target_ops *ops)
2577 {
2578 struct thread_info *tp;
2579
2580 tp = require_btrace_thread ();
2581
2582 record_btrace_set_replay (tp, NULL);
2583 }
2584
2585 /* The to_goto_record method of target record-btrace. */
2586
2587 static void
2588 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2589 {
2590 struct thread_info *tp;
2591 struct btrace_insn_iterator it;
2592 unsigned int number;
2593 int found;
2594
2595 number = insn;
2596
2597 /* Check for wrap-arounds. */
2598 if (number != insn)
2599 error (_("Instruction number out of range."));
2600
2601 tp = require_btrace_thread ();
2602
2603 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2604 if (found == 0)
2605 error (_("No such instruction."));
2606
2607 record_btrace_set_replay (tp, &it);
2608 }
2609
2610 /* The to_record_stop_replaying method of target record-btrace. */
2611
2612 static void
2613 record_btrace_stop_replaying_all (struct target_ops *self)
2614 {
2615 struct thread_info *tp;
2616
2617 ALL_NON_EXITED_THREADS (tp)
2618 record_btrace_stop_replaying (tp);
2619 }
2620
2621 /* The to_execution_direction target method. */
2622
2623 static enum exec_direction_kind
2624 record_btrace_execution_direction (struct target_ops *self)
2625 {
2626 return record_btrace_resume_exec_dir;
2627 }
2628
2629 /* The to_prepare_to_generate_core target method. */
2630
2631 static void
2632 record_btrace_prepare_to_generate_core (struct target_ops *self)
2633 {
2634 record_btrace_generating_corefile = 1;
2635 }
2636
2637 /* The to_done_generating_core target method. */
2638
2639 static void
2640 record_btrace_done_generating_core (struct target_ops *self)
2641 {
2642 record_btrace_generating_corefile = 0;
2643 }
2644
2645 /* Initialize the record-btrace target ops. */
2646
2647 static void
2648 init_record_btrace_ops (void)
2649 {
2650 struct target_ops *ops;
2651
2652 ops = &record_btrace_ops;
2653 ops->to_shortname = "record-btrace";
2654 ops->to_longname = "Branch tracing target";
2655 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2656 ops->to_open = record_btrace_open;
2657 ops->to_close = record_btrace_close;
2658 ops->to_async = record_btrace_async;
2659 ops->to_detach = record_detach;
2660 ops->to_disconnect = record_disconnect;
2661 ops->to_mourn_inferior = record_mourn_inferior;
2662 ops->to_kill = record_kill;
2663 ops->to_stop_recording = record_btrace_stop_recording;
2664 ops->to_info_record = record_btrace_info;
2665 ops->to_insn_history = record_btrace_insn_history;
2666 ops->to_insn_history_from = record_btrace_insn_history_from;
2667 ops->to_insn_history_range = record_btrace_insn_history_range;
2668 ops->to_call_history = record_btrace_call_history;
2669 ops->to_call_history_from = record_btrace_call_history_from;
2670 ops->to_call_history_range = record_btrace_call_history_range;
2671 ops->to_record_is_replaying = record_btrace_is_replaying;
2672 ops->to_record_will_replay = record_btrace_will_replay;
2673 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2674 ops->to_xfer_partial = record_btrace_xfer_partial;
2675 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2676 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2677 ops->to_fetch_registers = record_btrace_fetch_registers;
2678 ops->to_store_registers = record_btrace_store_registers;
2679 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2680 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2681 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2682 ops->to_resume = record_btrace_resume;
2683 ops->to_wait = record_btrace_wait;
2684 ops->to_stop = record_btrace_stop;
2685 ops->to_update_thread_list = record_btrace_update_thread_list;
2686 ops->to_thread_alive = record_btrace_thread_alive;
2687 ops->to_goto_record_begin = record_btrace_goto_begin;
2688 ops->to_goto_record_end = record_btrace_goto_end;
2689 ops->to_goto_record = record_btrace_goto;
2690 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2691 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2692 ops->to_supports_stopped_by_sw_breakpoint
2693 = record_btrace_supports_stopped_by_sw_breakpoint;
2694 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2695 ops->to_supports_stopped_by_hw_breakpoint
2696 = record_btrace_supports_stopped_by_hw_breakpoint;
2697 ops->to_execution_direction = record_btrace_execution_direction;
2698 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2699 ops->to_done_generating_core = record_btrace_done_generating_core;
2700 ops->to_stratum = record_stratum;
2701 ops->to_magic = OPS_MAGIC;
2702 }
2703
2704 /* Start recording in BTS format. */
2705
2706 static void
2707 cmd_record_btrace_bts_start (char *args, int from_tty)
2708 {
2709 if (args != NULL && *args != 0)
2710 error (_("Invalid argument."));
2711
2712 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2713
2714 TRY
2715 {
2716 execute_command ("target record-btrace", from_tty);
2717 }
2718 CATCH (exception, RETURN_MASK_ALL)
2719 {
2720 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2721 throw_exception (exception);
2722 }
2723 END_CATCH
2724 }
2725
2726 /* Start recording Intel(R) Processor Trace. */
2727
2728 static void
2729 cmd_record_btrace_pt_start (char *args, int from_tty)
2730 {
2731 if (args != NULL && *args != 0)
2732 error (_("Invalid argument."));
2733
2734 record_btrace_conf.format = BTRACE_FORMAT_PT;
2735
2736 TRY
2737 {
2738 execute_command ("target record-btrace", from_tty);
2739 }
2740 CATCH (exception, RETURN_MASK_ALL)
2741 {
2742 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2743 throw_exception (exception);
2744 }
2745 END_CATCH
2746 }
2747
2748 /* Alias for "target record". */
2749
2750 static void
2751 cmd_record_btrace_start (char *args, int from_tty)
2752 {
2753 if (args != NULL && *args != 0)
2754 error (_("Invalid argument."));
2755
2756 record_btrace_conf.format = BTRACE_FORMAT_PT;
2757
2758 TRY
2759 {
2760 execute_command ("target record-btrace", from_tty);
2761 }
2762 CATCH (exception, RETURN_MASK_ALL)
2763 {
2764 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2765
2766 TRY
2767 {
2768 execute_command ("target record-btrace", from_tty);
2769 }
2770 CATCH (exception, RETURN_MASK_ALL)
2771 {
2772 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2773 throw_exception (exception);
2774 }
2775 END_CATCH
2776 }
2777 END_CATCH
2778 }
2779
2780 /* The "set record btrace" command. */
2781
2782 static void
2783 cmd_set_record_btrace (char *args, int from_tty)
2784 {
2785 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2786 }
2787
2788 /* The "show record btrace" command. */
2789
2790 static void
2791 cmd_show_record_btrace (char *args, int from_tty)
2792 {
2793 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2794 }
2795
2796 /* The "show record btrace replay-memory-access" command. */
2797
2798 static void
2799 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2800 struct cmd_list_element *c, const char *value)
2801 {
2802 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2803 replay_memory_access);
2804 }
2805
2806 /* The "set record btrace bts" command. */
2807
2808 static void
2809 cmd_set_record_btrace_bts (char *args, int from_tty)
2810 {
2811 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2812 "by an appropriate subcommand.\n"));
2813 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2814 all_commands, gdb_stdout);
2815 }
2816
2817 /* The "show record btrace bts" command. */
2818
2819 static void
2820 cmd_show_record_btrace_bts (char *args, int from_tty)
2821 {
2822 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2823 }
2824
2825 /* The "set record btrace pt" command. */
2826
2827 static void
2828 cmd_set_record_btrace_pt (char *args, int from_tty)
2829 {
2830 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2831 "by an appropriate subcommand.\n"));
2832 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2833 all_commands, gdb_stdout);
2834 }
2835
2836 /* The "show record btrace pt" command. */
2837
2838 static void
2839 cmd_show_record_btrace_pt (char *args, int from_tty)
2840 {
2841 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2842 }
2843
2844 /* The "record bts buffer-size" show value function. */
2845
2846 static void
2847 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2848 struct cmd_list_element *c,
2849 const char *value)
2850 {
2851 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2852 value);
2853 }
2854
2855 /* The "record pt buffer-size" show value function. */
2856
2857 static void
2858 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2859 struct cmd_list_element *c,
2860 const char *value)
2861 {
2862 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2863 value);
2864 }
2865
2866 void _initialize_record_btrace (void);
2867
2868 /* Initialize btrace commands. */
2869
2870 void
2871 _initialize_record_btrace (void)
2872 {
2873 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2874 _("Start branch trace recording."), &record_btrace_cmdlist,
2875 "record btrace ", 0, &record_cmdlist);
2876 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2877
2878 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2879 _("\
2880 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2881 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2882 This format may not be available on all processors."),
2883 &record_btrace_cmdlist);
2884 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2885
2886 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2887 _("\
2888 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2889 This format may not be available on all processors."),
2890 &record_btrace_cmdlist);
2891 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2892
2893 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2894 _("Set record options"), &set_record_btrace_cmdlist,
2895 "set record btrace ", 0, &set_record_cmdlist);
2896
2897 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2898 _("Show record options"), &show_record_btrace_cmdlist,
2899 "show record btrace ", 0, &show_record_cmdlist);
2900
2901 add_setshow_enum_cmd ("replay-memory-access", no_class,
2902 replay_memory_access_types, &replay_memory_access, _("\
2903 Set what memory accesses are allowed during replay."), _("\
2904 Show what memory accesses are allowed during replay."),
2905 _("Default is READ-ONLY.\n\n\
2906 The btrace record target does not trace data.\n\
2907 The memory therefore corresponds to the live target and not \
2908 to the current replay position.\n\n\
2909 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2910 When READ-WRITE, allow accesses to read-only and read-write memory during \
2911 replay."),
2912 NULL, cmd_show_replay_memory_access,
2913 &set_record_btrace_cmdlist,
2914 &show_record_btrace_cmdlist);
2915
2916 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2917 _("Set record btrace bts options"),
2918 &set_record_btrace_bts_cmdlist,
2919 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2920
2921 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2922 _("Show record btrace bts options"),
2923 &show_record_btrace_bts_cmdlist,
2924 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2925
2926 add_setshow_uinteger_cmd ("buffer-size", no_class,
2927 &record_btrace_conf.bts.size,
2928 _("Set the record/replay bts buffer size."),
2929 _("Show the record/replay bts buffer size."), _("\
2930 When starting recording request a trace buffer of this size. \
2931 The actual buffer size may differ from the requested size. \
2932 Use \"info record\" to see the actual buffer size.\n\n\
2933 Bigger buffers allow longer recording but also take more time to process \
2934 the recorded execution trace.\n\n\
2935 The trace buffer size may not be changed while recording."), NULL,
2936 show_record_bts_buffer_size_value,
2937 &set_record_btrace_bts_cmdlist,
2938 &show_record_btrace_bts_cmdlist);
2939
2940 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2941 _("Set record btrace pt options"),
2942 &set_record_btrace_pt_cmdlist,
2943 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2944
2945 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2946 _("Show record btrace pt options"),
2947 &show_record_btrace_pt_cmdlist,
2948 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2949
2950 add_setshow_uinteger_cmd ("buffer-size", no_class,
2951 &record_btrace_conf.pt.size,
2952 _("Set the record/replay pt buffer size."),
2953 _("Show the record/replay pt buffer size."), _("\
2954 Bigger buffers allow longer recording but also take more time to process \
2955 the recorded execution.\n\
2956 The actual buffer size may differ from the requested size. Use \"info record\" \
2957 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2958 &set_record_btrace_pt_cmdlist,
2959 &show_record_btrace_pt_cmdlist);
2960
2961 init_record_btrace_ops ();
2962 add_target (&record_btrace_ops);
2963
2964 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2965 xcalloc, xfree);
2966
2967 record_btrace_conf.bts.size = 64 * 1024;
2968 record_btrace_conf.pt.size = 16 * 1024;
2969 }
This page took 0.125714 seconds and 4 git commands to generate.