Put GDB's terminal settings into effect when paginating
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* Print a record-btrace debug message. Use do ... while (0) to avoid
75 ambiguities when used in if statements. */
76
77 #define DEBUG(msg, args...) \
78 do \
79 { \
80 if (record_debug != 0) \
81 fprintf_unfiltered (gdb_stdlog, \
82 "[record-btrace] " msg "\n", ##args); \
83 } \
84 while (0)
85
86
87 /* Update the branch trace for the current thread and return a pointer to its
88 thread_info.
89
90 Throws an error if there is no thread or no trace. This function never
91 returns NULL. */
92
93 static struct thread_info *
94 require_btrace_thread (void)
95 {
96 struct thread_info *tp;
97
98 DEBUG ("require");
99
100 tp = find_thread_ptid (inferior_ptid);
101 if (tp == NULL)
102 error (_("No thread."));
103
104 btrace_fetch (tp);
105
106 if (btrace_is_empty (tp))
107 error (_("No trace."));
108
109 return tp;
110 }
111
112 /* Update the branch trace for the current thread and return a pointer to its
113 branch trace information struct.
114
115 Throws an error if there is no thread or no trace. This function never
116 returns NULL. */
117
118 static struct btrace_thread_info *
119 require_btrace (void)
120 {
121 struct thread_info *tp;
122
123 tp = require_btrace_thread ();
124
125 return &tp->btrace;
126 }
127
128 /* Enable branch tracing for one thread. Warn on errors. */
129
130 static void
131 record_btrace_enable_warn (struct thread_info *tp)
132 {
133 volatile struct gdb_exception error;
134
135 TRY_CATCH (error, RETURN_MASK_ERROR)
136 btrace_enable (tp);
137
138 if (error.message != NULL)
139 warning ("%s", error.message);
140 }
141
142 /* Callback function to disable branch tracing for one thread. */
143
144 static void
145 record_btrace_disable_callback (void *arg)
146 {
147 struct thread_info *tp;
148
149 tp = arg;
150
151 btrace_disable (tp);
152 }
153
154 /* Enable automatic tracing of new threads. */
155
156 static void
157 record_btrace_auto_enable (void)
158 {
159 DEBUG ("attach thread observer");
160
161 record_btrace_thread_observer
162 = observer_attach_new_thread (record_btrace_enable_warn);
163 }
164
165 /* Disable automatic tracing of new threads. */
166
167 static void
168 record_btrace_auto_disable (void)
169 {
170 /* The observer may have been detached, already. */
171 if (record_btrace_thread_observer == NULL)
172 return;
173
174 DEBUG ("detach thread observer");
175
176 observer_detach_new_thread (record_btrace_thread_observer);
177 record_btrace_thread_observer = NULL;
178 }
179
180 /* The record-btrace async event handler function. */
181
182 static void
183 record_btrace_handle_async_inferior_event (gdb_client_data data)
184 {
185 inferior_event_handler (INF_REG_EVENT, NULL);
186 }
187
188 /* The to_open method of target record-btrace. */
189
190 static void
191 record_btrace_open (char *args, int from_tty)
192 {
193 struct cleanup *disable_chain;
194 struct thread_info *tp;
195
196 DEBUG ("open");
197
198 record_preopen ();
199
200 if (!target_has_execution)
201 error (_("The program is not being run."));
202
203 if (!target_supports_btrace ())
204 error (_("Target does not support branch tracing."));
205
206 if (non_stop)
207 error (_("Record btrace can't debug inferior in non-stop mode."));
208
209 gdb_assert (record_btrace_thread_observer == NULL);
210
211 disable_chain = make_cleanup (null_cleanup, NULL);
212 ALL_NON_EXITED_THREADS (tp)
213 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
214 {
215 btrace_enable (tp);
216
217 make_cleanup (record_btrace_disable_callback, tp);
218 }
219
220 record_btrace_auto_enable ();
221
222 push_target (&record_btrace_ops);
223
224 record_btrace_async_inferior_event_handler
225 = create_async_event_handler (record_btrace_handle_async_inferior_event,
226 NULL);
227 record_btrace_generating_corefile = 0;
228
229 observer_notify_record_changed (current_inferior (), 1);
230
231 discard_cleanups (disable_chain);
232 }
233
234 /* The to_stop_recording method of target record-btrace. */
235
236 static void
237 record_btrace_stop_recording (struct target_ops *self)
238 {
239 struct thread_info *tp;
240
241 DEBUG ("stop recording");
242
243 record_btrace_auto_disable ();
244
245 ALL_NON_EXITED_THREADS (tp)
246 if (tp->btrace.target != NULL)
247 btrace_disable (tp);
248 }
249
250 /* The to_close method of target record-btrace. */
251
252 static void
253 record_btrace_close (struct target_ops *self)
254 {
255 struct thread_info *tp;
256
257 if (record_btrace_async_inferior_event_handler != NULL)
258 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
259
260 /* Make sure automatic recording gets disabled even if we did not stop
261 recording before closing the record-btrace target. */
262 record_btrace_auto_disable ();
263
264 /* We should have already stopped recording.
265 Tear down btrace in case we have not. */
266 ALL_NON_EXITED_THREADS (tp)
267 btrace_teardown (tp);
268 }
269
270 /* The to_info_record method of target record-btrace. */
271
272 static void
273 record_btrace_info (struct target_ops *self)
274 {
275 struct btrace_thread_info *btinfo;
276 struct thread_info *tp;
277 unsigned int insns, calls;
278
279 DEBUG ("info");
280
281 tp = find_thread_ptid (inferior_ptid);
282 if (tp == NULL)
283 error (_("No thread."));
284
285 btrace_fetch (tp);
286
287 insns = 0;
288 calls = 0;
289
290 btinfo = &tp->btrace;
291
292 if (!btrace_is_empty (tp))
293 {
294 struct btrace_call_iterator call;
295 struct btrace_insn_iterator insn;
296
297 btrace_call_end (&call, btinfo);
298 btrace_call_prev (&call, 1);
299 calls = btrace_call_number (&call);
300
301 btrace_insn_end (&insn, btinfo);
302 btrace_insn_prev (&insn, 1);
303 insns = btrace_insn_number (&insn);
304 }
305
306 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
307 "%d (%s).\n"), insns, calls, tp->num,
308 target_pid_to_str (tp->ptid));
309
310 if (btrace_is_replaying (tp))
311 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
312 btrace_insn_number (btinfo->replay));
313 }
314
315 /* Print an unsigned int. */
316
317 static void
318 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
319 {
320 ui_out_field_fmt (uiout, fld, "%u", val);
321 }
322
323 /* Disassemble a section of the recorded instruction trace. */
324
325 static void
326 btrace_insn_history (struct ui_out *uiout,
327 const struct btrace_insn_iterator *begin,
328 const struct btrace_insn_iterator *end, int flags)
329 {
330 struct gdbarch *gdbarch;
331 struct btrace_insn_iterator it;
332
333 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
334 btrace_insn_number (end));
335
336 gdbarch = target_gdbarch ();
337
338 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
339 {
340 const struct btrace_insn *insn;
341
342 insn = btrace_insn_get (&it);
343
344 /* Print the instruction index. */
345 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
346 ui_out_text (uiout, "\t");
347
348 /* Disassembly with '/m' flag may not produce the expected result.
349 See PR gdb/11833. */
350 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
351 }
352 }
353
354 /* The to_insn_history method of target record-btrace. */
355
356 static void
357 record_btrace_insn_history (struct target_ops *self, int size, int flags)
358 {
359 struct btrace_thread_info *btinfo;
360 struct btrace_insn_history *history;
361 struct btrace_insn_iterator begin, end;
362 struct cleanup *uiout_cleanup;
363 struct ui_out *uiout;
364 unsigned int context, covered;
365
366 uiout = current_uiout;
367 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
368 "insn history");
369 context = abs (size);
370 if (context == 0)
371 error (_("Bad record instruction-history-size."));
372
373 btinfo = require_btrace ();
374 history = btinfo->insn_history;
375 if (history == NULL)
376 {
377 struct btrace_insn_iterator *replay;
378
379 DEBUG ("insn-history (0x%x): %d", flags, size);
380
381 /* If we're replaying, we start at the replay position. Otherwise, we
382 start at the tail of the trace. */
383 replay = btinfo->replay;
384 if (replay != NULL)
385 begin = *replay;
386 else
387 btrace_insn_end (&begin, btinfo);
388
389 /* We start from here and expand in the requested direction. Then we
390 expand in the other direction, as well, to fill up any remaining
391 context. */
392 end = begin;
393 if (size < 0)
394 {
395 /* We want the current position covered, as well. */
396 covered = btrace_insn_next (&end, 1);
397 covered += btrace_insn_prev (&begin, context - covered);
398 covered += btrace_insn_next (&end, context - covered);
399 }
400 else
401 {
402 covered = btrace_insn_next (&end, context);
403 covered += btrace_insn_prev (&begin, context - covered);
404 }
405 }
406 else
407 {
408 begin = history->begin;
409 end = history->end;
410
411 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
412 btrace_insn_number (&begin), btrace_insn_number (&end));
413
414 if (size < 0)
415 {
416 end = begin;
417 covered = btrace_insn_prev (&begin, context);
418 }
419 else
420 {
421 begin = end;
422 covered = btrace_insn_next (&end, context);
423 }
424 }
425
426 if (covered > 0)
427 btrace_insn_history (uiout, &begin, &end, flags);
428 else
429 {
430 if (size < 0)
431 printf_unfiltered (_("At the start of the branch trace record.\n"));
432 else
433 printf_unfiltered (_("At the end of the branch trace record.\n"));
434 }
435
436 btrace_set_insn_history (btinfo, &begin, &end);
437 do_cleanups (uiout_cleanup);
438 }
439
440 /* The to_insn_history_range method of target record-btrace. */
441
442 static void
443 record_btrace_insn_history_range (struct target_ops *self,
444 ULONGEST from, ULONGEST to, int flags)
445 {
446 struct btrace_thread_info *btinfo;
447 struct btrace_insn_history *history;
448 struct btrace_insn_iterator begin, end;
449 struct cleanup *uiout_cleanup;
450 struct ui_out *uiout;
451 unsigned int low, high;
452 int found;
453
454 uiout = current_uiout;
455 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
456 "insn history");
457 low = from;
458 high = to;
459
460 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
461
462 /* Check for wrap-arounds. */
463 if (low != from || high != to)
464 error (_("Bad range."));
465
466 if (high < low)
467 error (_("Bad range."));
468
469 btinfo = require_btrace ();
470
471 found = btrace_find_insn_by_number (&begin, btinfo, low);
472 if (found == 0)
473 error (_("Range out of bounds."));
474
475 found = btrace_find_insn_by_number (&end, btinfo, high);
476 if (found == 0)
477 {
478 /* Silently truncate the range. */
479 btrace_insn_end (&end, btinfo);
480 }
481 else
482 {
483 /* We want both begin and end to be inclusive. */
484 btrace_insn_next (&end, 1);
485 }
486
487 btrace_insn_history (uiout, &begin, &end, flags);
488 btrace_set_insn_history (btinfo, &begin, &end);
489
490 do_cleanups (uiout_cleanup);
491 }
492
493 /* The to_insn_history_from method of target record-btrace. */
494
495 static void
496 record_btrace_insn_history_from (struct target_ops *self,
497 ULONGEST from, int size, int flags)
498 {
499 ULONGEST begin, end, context;
500
501 context = abs (size);
502 if (context == 0)
503 error (_("Bad record instruction-history-size."));
504
505 if (size < 0)
506 {
507 end = from;
508
509 if (from < context)
510 begin = 0;
511 else
512 begin = from - context + 1;
513 }
514 else
515 {
516 begin = from;
517 end = from + context - 1;
518
519 /* Check for wrap-around. */
520 if (end < begin)
521 end = ULONGEST_MAX;
522 }
523
524 record_btrace_insn_history_range (self, begin, end, flags);
525 }
526
527 /* Print the instruction number range for a function call history line. */
528
529 static void
530 btrace_call_history_insn_range (struct ui_out *uiout,
531 const struct btrace_function *bfun)
532 {
533 unsigned int begin, end, size;
534
535 size = VEC_length (btrace_insn_s, bfun->insn);
536 gdb_assert (size > 0);
537
538 begin = bfun->insn_offset;
539 end = begin + size - 1;
540
541 ui_out_field_uint (uiout, "insn begin", begin);
542 ui_out_text (uiout, ",");
543 ui_out_field_uint (uiout, "insn end", end);
544 }
545
546 /* Print the source line information for a function call history line. */
547
548 static void
549 btrace_call_history_src_line (struct ui_out *uiout,
550 const struct btrace_function *bfun)
551 {
552 struct symbol *sym;
553 int begin, end;
554
555 sym = bfun->sym;
556 if (sym == NULL)
557 return;
558
559 ui_out_field_string (uiout, "file",
560 symtab_to_filename_for_display (sym->symtab));
561
562 begin = bfun->lbegin;
563 end = bfun->lend;
564
565 if (end < begin)
566 return;
567
568 ui_out_text (uiout, ":");
569 ui_out_field_int (uiout, "min line", begin);
570
571 if (end == begin)
572 return;
573
574 ui_out_text (uiout, ",");
575 ui_out_field_int (uiout, "max line", end);
576 }
577
578 /* Get the name of a branch trace function. */
579
580 static const char *
581 btrace_get_bfun_name (const struct btrace_function *bfun)
582 {
583 struct minimal_symbol *msym;
584 struct symbol *sym;
585
586 if (bfun == NULL)
587 return "??";
588
589 msym = bfun->msym;
590 sym = bfun->sym;
591
592 if (sym != NULL)
593 return SYMBOL_PRINT_NAME (sym);
594 else if (msym != NULL)
595 return MSYMBOL_PRINT_NAME (msym);
596 else
597 return "??";
598 }
599
600 /* Disassemble a section of the recorded function trace. */
601
602 static void
603 btrace_call_history (struct ui_out *uiout,
604 const struct btrace_thread_info *btinfo,
605 const struct btrace_call_iterator *begin,
606 const struct btrace_call_iterator *end,
607 enum record_print_flag flags)
608 {
609 struct btrace_call_iterator it;
610
611 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
612 btrace_call_number (end));
613
614 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
615 {
616 const struct btrace_function *bfun;
617 struct minimal_symbol *msym;
618 struct symbol *sym;
619
620 bfun = btrace_call_get (&it);
621 sym = bfun->sym;
622 msym = bfun->msym;
623
624 /* Print the function index. */
625 ui_out_field_uint (uiout, "index", bfun->number);
626 ui_out_text (uiout, "\t");
627
628 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
629 {
630 int level = bfun->level + btinfo->level, i;
631
632 for (i = 0; i < level; ++i)
633 ui_out_text (uiout, " ");
634 }
635
636 if (sym != NULL)
637 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
638 else if (msym != NULL)
639 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
640 else if (!ui_out_is_mi_like_p (uiout))
641 ui_out_field_string (uiout, "function", "??");
642
643 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
644 {
645 ui_out_text (uiout, _("\tinst "));
646 btrace_call_history_insn_range (uiout, bfun);
647 }
648
649 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
650 {
651 ui_out_text (uiout, _("\tat "));
652 btrace_call_history_src_line (uiout, bfun);
653 }
654
655 ui_out_text (uiout, "\n");
656 }
657 }
658
659 /* The to_call_history method of target record-btrace. */
660
661 static void
662 record_btrace_call_history (struct target_ops *self, int size, int flags)
663 {
664 struct btrace_thread_info *btinfo;
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
669 unsigned int context, covered;
670
671 uiout = current_uiout;
672 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
673 "insn history");
674 context = abs (size);
675 if (context == 0)
676 error (_("Bad record function-call-history-size."));
677
678 btinfo = require_btrace ();
679 history = btinfo->call_history;
680 if (history == NULL)
681 {
682 struct btrace_insn_iterator *replay;
683
684 DEBUG ("call-history (0x%x): %d", flags, size);
685
686 /* If we're replaying, we start at the replay position. Otherwise, we
687 start at the tail of the trace. */
688 replay = btinfo->replay;
689 if (replay != NULL)
690 {
691 begin.function = replay->function;
692 begin.btinfo = btinfo;
693 }
694 else
695 btrace_call_end (&begin, btinfo);
696
697 /* We start from here and expand in the requested direction. Then we
698 expand in the other direction, as well, to fill up any remaining
699 context. */
700 end = begin;
701 if (size < 0)
702 {
703 /* We want the current position covered, as well. */
704 covered = btrace_call_next (&end, 1);
705 covered += btrace_call_prev (&begin, context - covered);
706 covered += btrace_call_next (&end, context - covered);
707 }
708 else
709 {
710 covered = btrace_call_next (&end, context);
711 covered += btrace_call_prev (&begin, context- covered);
712 }
713 }
714 else
715 {
716 begin = history->begin;
717 end = history->end;
718
719 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
720 btrace_call_number (&begin), btrace_call_number (&end));
721
722 if (size < 0)
723 {
724 end = begin;
725 covered = btrace_call_prev (&begin, context);
726 }
727 else
728 {
729 begin = end;
730 covered = btrace_call_next (&end, context);
731 }
732 }
733
734 if (covered > 0)
735 btrace_call_history (uiout, btinfo, &begin, &end, flags);
736 else
737 {
738 if (size < 0)
739 printf_unfiltered (_("At the start of the branch trace record.\n"));
740 else
741 printf_unfiltered (_("At the end of the branch trace record.\n"));
742 }
743
744 btrace_set_call_history (btinfo, &begin, &end);
745 do_cleanups (uiout_cleanup);
746 }
747
748 /* The to_call_history_range method of target record-btrace. */
749
750 static void
751 record_btrace_call_history_range (struct target_ops *self,
752 ULONGEST from, ULONGEST to, int flags)
753 {
754 struct btrace_thread_info *btinfo;
755 struct btrace_call_history *history;
756 struct btrace_call_iterator begin, end;
757 struct cleanup *uiout_cleanup;
758 struct ui_out *uiout;
759 unsigned int low, high;
760 int found;
761
762 uiout = current_uiout;
763 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
764 "func history");
765 low = from;
766 high = to;
767
768 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
769
770 /* Check for wrap-arounds. */
771 if (low != from || high != to)
772 error (_("Bad range."));
773
774 if (high < low)
775 error (_("Bad range."));
776
777 btinfo = require_btrace ();
778
779 found = btrace_find_call_by_number (&begin, btinfo, low);
780 if (found == 0)
781 error (_("Range out of bounds."));
782
783 found = btrace_find_call_by_number (&end, btinfo, high);
784 if (found == 0)
785 {
786 /* Silently truncate the range. */
787 btrace_call_end (&end, btinfo);
788 }
789 else
790 {
791 /* We want both begin and end to be inclusive. */
792 btrace_call_next (&end, 1);
793 }
794
795 btrace_call_history (uiout, btinfo, &begin, &end, flags);
796 btrace_set_call_history (btinfo, &begin, &end);
797
798 do_cleanups (uiout_cleanup);
799 }
800
801 /* The to_call_history_from method of target record-btrace. */
802
803 static void
804 record_btrace_call_history_from (struct target_ops *self,
805 ULONGEST from, int size, int flags)
806 {
807 ULONGEST begin, end, context;
808
809 context = abs (size);
810 if (context == 0)
811 error (_("Bad record function-call-history-size."));
812
813 if (size < 0)
814 {
815 end = from;
816
817 if (from < context)
818 begin = 0;
819 else
820 begin = from - context + 1;
821 }
822 else
823 {
824 begin = from;
825 end = from + context - 1;
826
827 /* Check for wrap-around. */
828 if (end < begin)
829 end = ULONGEST_MAX;
830 }
831
832 record_btrace_call_history_range (self, begin, end, flags);
833 }
834
835 /* The to_record_is_replaying method of target record-btrace. */
836
837 static int
838 record_btrace_is_replaying (struct target_ops *self)
839 {
840 struct thread_info *tp;
841
842 ALL_NON_EXITED_THREADS (tp)
843 if (btrace_is_replaying (tp))
844 return 1;
845
846 return 0;
847 }
848
849 /* The to_xfer_partial method of target record-btrace. */
850
851 static enum target_xfer_status
852 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
853 const char *annex, gdb_byte *readbuf,
854 const gdb_byte *writebuf, ULONGEST offset,
855 ULONGEST len, ULONGEST *xfered_len)
856 {
857 struct target_ops *t;
858
859 /* Filter out requests that don't make sense during replay. */
860 if (replay_memory_access == replay_memory_access_read_only
861 && !record_btrace_generating_corefile
862 && record_btrace_is_replaying (ops))
863 {
864 switch (object)
865 {
866 case TARGET_OBJECT_MEMORY:
867 {
868 struct target_section *section;
869
870 /* We do not allow writing memory in general. */
871 if (writebuf != NULL)
872 {
873 *xfered_len = len;
874 return TARGET_XFER_UNAVAILABLE;
875 }
876
877 /* We allow reading readonly memory. */
878 section = target_section_by_addr (ops, offset);
879 if (section != NULL)
880 {
881 /* Check if the section we found is readonly. */
882 if ((bfd_get_section_flags (section->the_bfd_section->owner,
883 section->the_bfd_section)
884 & SEC_READONLY) != 0)
885 {
886 /* Truncate the request to fit into this section. */
887 len = min (len, section->endaddr - offset);
888 break;
889 }
890 }
891
892 *xfered_len = len;
893 return TARGET_XFER_UNAVAILABLE;
894 }
895 }
896 }
897
898 /* Forward the request. */
899 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
900 if (ops->to_xfer_partial != NULL)
901 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
902 offset, len, xfered_len);
903
904 *xfered_len = len;
905 return TARGET_XFER_UNAVAILABLE;
906 }
907
908 /* The to_insert_breakpoint method of target record-btrace. */
909
910 static int
911 record_btrace_insert_breakpoint (struct target_ops *ops,
912 struct gdbarch *gdbarch,
913 struct bp_target_info *bp_tgt)
914 {
915 volatile struct gdb_exception except;
916 const char *old;
917 int ret;
918
919 /* Inserting breakpoints requires accessing memory. Allow it for the
920 duration of this function. */
921 old = replay_memory_access;
922 replay_memory_access = replay_memory_access_read_write;
923
924 ret = 0;
925 TRY_CATCH (except, RETURN_MASK_ALL)
926 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
927
928 replay_memory_access = old;
929
930 if (except.reason < 0)
931 throw_exception (except);
932
933 return ret;
934 }
935
936 /* The to_remove_breakpoint method of target record-btrace. */
937
938 static int
939 record_btrace_remove_breakpoint (struct target_ops *ops,
940 struct gdbarch *gdbarch,
941 struct bp_target_info *bp_tgt)
942 {
943 volatile struct gdb_exception except;
944 const char *old;
945 int ret;
946
947 /* Removing breakpoints requires accessing memory. Allow it for the
948 duration of this function. */
949 old = replay_memory_access;
950 replay_memory_access = replay_memory_access_read_write;
951
952 ret = 0;
953 TRY_CATCH (except, RETURN_MASK_ALL)
954 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
955
956 replay_memory_access = old;
957
958 if (except.reason < 0)
959 throw_exception (except);
960
961 return ret;
962 }
963
964 /* The to_fetch_registers method of target record-btrace. */
965
966 static void
967 record_btrace_fetch_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
969 {
970 struct btrace_insn_iterator *replay;
971 struct thread_info *tp;
972
973 tp = find_thread_ptid (inferior_ptid);
974 gdb_assert (tp != NULL);
975
976 replay = tp->btrace.replay;
977 if (replay != NULL && !record_btrace_generating_corefile)
978 {
979 const struct btrace_insn *insn;
980 struct gdbarch *gdbarch;
981 int pcreg;
982
983 gdbarch = get_regcache_arch (regcache);
984 pcreg = gdbarch_pc_regnum (gdbarch);
985 if (pcreg < 0)
986 return;
987
988 /* We can only provide the PC register. */
989 if (regno >= 0 && regno != pcreg)
990 return;
991
992 insn = btrace_insn_get (replay);
993 gdb_assert (insn != NULL);
994
995 regcache_raw_supply (regcache, regno, &insn->pc);
996 }
997 else
998 {
999 struct target_ops *t;
1000
1001 for (t = ops->beneath; t != NULL; t = t->beneath)
1002 if (t->to_fetch_registers != NULL)
1003 {
1004 t->to_fetch_registers (t, regcache, regno);
1005 break;
1006 }
1007 }
1008 }
1009
1010 /* The to_store_registers method of target record-btrace. */
1011
1012 static void
1013 record_btrace_store_registers (struct target_ops *ops,
1014 struct regcache *regcache, int regno)
1015 {
1016 struct target_ops *t;
1017
1018 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1019 error (_("This record target does not allow writing registers."));
1020
1021 gdb_assert (may_write_registers != 0);
1022
1023 for (t = ops->beneath; t != NULL; t = t->beneath)
1024 if (t->to_store_registers != NULL)
1025 {
1026 t->to_store_registers (t, regcache, regno);
1027 return;
1028 }
1029
1030 noprocess ();
1031 }
1032
1033 /* The to_prepare_to_store method of target record-btrace. */
1034
1035 static void
1036 record_btrace_prepare_to_store (struct target_ops *ops,
1037 struct regcache *regcache)
1038 {
1039 struct target_ops *t;
1040
1041 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1042 return;
1043
1044 for (t = ops->beneath; t != NULL; t = t->beneath)
1045 if (t->to_prepare_to_store != NULL)
1046 {
1047 t->to_prepare_to_store (t, regcache);
1048 return;
1049 }
1050 }
1051
1052 /* The branch trace frame cache. */
1053
1054 struct btrace_frame_cache
1055 {
1056 /* The thread. */
1057 struct thread_info *tp;
1058
1059 /* The frame info. */
1060 struct frame_info *frame;
1061
1062 /* The branch trace function segment. */
1063 const struct btrace_function *bfun;
1064 };
1065
1066 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1067
1068 static htab_t bfcache;
1069
1070 /* hash_f for htab_create_alloc of bfcache. */
1071
1072 static hashval_t
1073 bfcache_hash (const void *arg)
1074 {
1075 const struct btrace_frame_cache *cache = arg;
1076
1077 return htab_hash_pointer (cache->frame);
1078 }
1079
1080 /* eq_f for htab_create_alloc of bfcache. */
1081
1082 static int
1083 bfcache_eq (const void *arg1, const void *arg2)
1084 {
1085 const struct btrace_frame_cache *cache1 = arg1;
1086 const struct btrace_frame_cache *cache2 = arg2;
1087
1088 return cache1->frame == cache2->frame;
1089 }
1090
1091 /* Create a new btrace frame cache. */
1092
1093 static struct btrace_frame_cache *
1094 bfcache_new (struct frame_info *frame)
1095 {
1096 struct btrace_frame_cache *cache;
1097 void **slot;
1098
1099 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1100 cache->frame = frame;
1101
1102 slot = htab_find_slot (bfcache, cache, INSERT);
1103 gdb_assert (*slot == NULL);
1104 *slot = cache;
1105
1106 return cache;
1107 }
1108
1109 /* Extract the branch trace function from a branch trace frame. */
1110
1111 static const struct btrace_function *
1112 btrace_get_frame_function (struct frame_info *frame)
1113 {
1114 const struct btrace_frame_cache *cache;
1115 const struct btrace_function *bfun;
1116 struct btrace_frame_cache pattern;
1117 void **slot;
1118
1119 pattern.frame = frame;
1120
1121 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1122 if (slot == NULL)
1123 return NULL;
1124
1125 cache = *slot;
1126 return cache->bfun;
1127 }
1128
1129 /* Implement stop_reason method for record_btrace_frame_unwind. */
1130
1131 static enum unwind_stop_reason
1132 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1133 void **this_cache)
1134 {
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun;
1137
1138 cache = *this_cache;
1139 bfun = cache->bfun;
1140 gdb_assert (bfun != NULL);
1141
1142 if (bfun->up == NULL)
1143 return UNWIND_UNAVAILABLE;
1144
1145 return UNWIND_NO_REASON;
1146 }
1147
1148 /* Implement this_id method for record_btrace_frame_unwind. */
1149
1150 static void
1151 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1152 struct frame_id *this_id)
1153 {
1154 const struct btrace_frame_cache *cache;
1155 const struct btrace_function *bfun;
1156 CORE_ADDR code, special;
1157
1158 cache = *this_cache;
1159
1160 bfun = cache->bfun;
1161 gdb_assert (bfun != NULL);
1162
1163 while (bfun->segment.prev != NULL)
1164 bfun = bfun->segment.prev;
1165
1166 code = get_frame_func (this_frame);
1167 special = bfun->number;
1168
1169 *this_id = frame_id_build_unavailable_stack_special (code, special);
1170
1171 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1172 btrace_get_bfun_name (cache->bfun),
1173 core_addr_to_string_nz (this_id->code_addr),
1174 core_addr_to_string_nz (this_id->special_addr));
1175 }
1176
1177 /* Implement prev_register method for record_btrace_frame_unwind. */
1178
1179 static struct value *
1180 record_btrace_frame_prev_register (struct frame_info *this_frame,
1181 void **this_cache,
1182 int regnum)
1183 {
1184 const struct btrace_frame_cache *cache;
1185 const struct btrace_function *bfun, *caller;
1186 const struct btrace_insn *insn;
1187 struct gdbarch *gdbarch;
1188 CORE_ADDR pc;
1189 int pcreg;
1190
1191 gdbarch = get_frame_arch (this_frame);
1192 pcreg = gdbarch_pc_regnum (gdbarch);
1193 if (pcreg < 0 || regnum != pcreg)
1194 throw_error (NOT_AVAILABLE_ERROR,
1195 _("Registers are not available in btrace record history"));
1196
1197 cache = *this_cache;
1198 bfun = cache->bfun;
1199 gdb_assert (bfun != NULL);
1200
1201 caller = bfun->up;
1202 if (caller == NULL)
1203 throw_error (NOT_AVAILABLE_ERROR,
1204 _("No caller in btrace record history"));
1205
1206 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1207 {
1208 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1209 pc = insn->pc;
1210 }
1211 else
1212 {
1213 insn = VEC_last (btrace_insn_s, caller->insn);
1214 pc = insn->pc;
1215
1216 pc += gdb_insn_length (gdbarch, pc);
1217 }
1218
1219 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1220 btrace_get_bfun_name (bfun), bfun->level,
1221 core_addr_to_string_nz (pc));
1222
1223 return frame_unwind_got_address (this_frame, regnum, pc);
1224 }
1225
1226 /* Implement sniffer method for record_btrace_frame_unwind. */
1227
1228 static int
1229 record_btrace_frame_sniffer (const struct frame_unwind *self,
1230 struct frame_info *this_frame,
1231 void **this_cache)
1232 {
1233 const struct btrace_function *bfun;
1234 struct btrace_frame_cache *cache;
1235 struct thread_info *tp;
1236 struct frame_info *next;
1237
1238 /* THIS_FRAME does not contain a reference to its thread. */
1239 tp = find_thread_ptid (inferior_ptid);
1240 gdb_assert (tp != NULL);
1241
1242 bfun = NULL;
1243 next = get_next_frame (this_frame);
1244 if (next == NULL)
1245 {
1246 const struct btrace_insn_iterator *replay;
1247
1248 replay = tp->btrace.replay;
1249 if (replay != NULL)
1250 bfun = replay->function;
1251 }
1252 else
1253 {
1254 const struct btrace_function *callee;
1255
1256 callee = btrace_get_frame_function (next);
1257 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1258 bfun = callee->up;
1259 }
1260
1261 if (bfun == NULL)
1262 return 0;
1263
1264 DEBUG ("[frame] sniffed frame for %s on level %d",
1265 btrace_get_bfun_name (bfun), bfun->level);
1266
1267 /* This is our frame. Initialize the frame cache. */
1268 cache = bfcache_new (this_frame);
1269 cache->tp = tp;
1270 cache->bfun = bfun;
1271
1272 *this_cache = cache;
1273 return 1;
1274 }
1275
1276 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1277
1278 static int
1279 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1280 struct frame_info *this_frame,
1281 void **this_cache)
1282 {
1283 const struct btrace_function *bfun, *callee;
1284 struct btrace_frame_cache *cache;
1285 struct frame_info *next;
1286
1287 next = get_next_frame (this_frame);
1288 if (next == NULL)
1289 return 0;
1290
1291 callee = btrace_get_frame_function (next);
1292 if (callee == NULL)
1293 return 0;
1294
1295 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1296 return 0;
1297
1298 bfun = callee->up;
1299 if (bfun == NULL)
1300 return 0;
1301
1302 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1303 btrace_get_bfun_name (bfun), bfun->level);
1304
1305 /* This is our frame. Initialize the frame cache. */
1306 cache = bfcache_new (this_frame);
1307 cache->tp = find_thread_ptid (inferior_ptid);
1308 cache->bfun = bfun;
1309
1310 *this_cache = cache;
1311 return 1;
1312 }
1313
1314 static void
1315 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1316 {
1317 struct btrace_frame_cache *cache;
1318 void **slot;
1319
1320 cache = this_cache;
1321
1322 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1323 gdb_assert (slot != NULL);
1324
1325 htab_remove_elt (bfcache, cache);
1326 }
1327
1328 /* btrace recording does not store previous memory content, neither the stack
1329 frames content. Any unwinding would return errorneous results as the stack
1330 contents no longer matches the changed PC value restored from history.
1331 Therefore this unwinder reports any possibly unwound registers as
1332 <unavailable>. */
1333
1334 const struct frame_unwind record_btrace_frame_unwind =
1335 {
1336 NORMAL_FRAME,
1337 record_btrace_frame_unwind_stop_reason,
1338 record_btrace_frame_this_id,
1339 record_btrace_frame_prev_register,
1340 NULL,
1341 record_btrace_frame_sniffer,
1342 record_btrace_frame_dealloc_cache
1343 };
1344
1345 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1346 {
1347 TAILCALL_FRAME,
1348 record_btrace_frame_unwind_stop_reason,
1349 record_btrace_frame_this_id,
1350 record_btrace_frame_prev_register,
1351 NULL,
1352 record_btrace_tailcall_frame_sniffer,
1353 record_btrace_frame_dealloc_cache
1354 };
1355
1356 /* Implement the to_get_unwinder method. */
1357
1358 static const struct frame_unwind *
1359 record_btrace_to_get_unwinder (struct target_ops *self)
1360 {
1361 return &record_btrace_frame_unwind;
1362 }
1363
1364 /* Implement the to_get_tailcall_unwinder method. */
1365
1366 static const struct frame_unwind *
1367 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1368 {
1369 return &record_btrace_tailcall_frame_unwind;
1370 }
1371
1372 /* Indicate that TP should be resumed according to FLAG. */
1373
1374 static void
1375 record_btrace_resume_thread (struct thread_info *tp,
1376 enum btrace_thread_flag flag)
1377 {
1378 struct btrace_thread_info *btinfo;
1379
1380 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1381
1382 btinfo = &tp->btrace;
1383
1384 if ((btinfo->flags & BTHR_MOVE) != 0)
1385 error (_("Thread already moving."));
1386
1387 /* Fetch the latest branch trace. */
1388 btrace_fetch (tp);
1389
1390 btinfo->flags |= flag;
1391 }
1392
1393 /* Find the thread to resume given a PTID. */
1394
1395 static struct thread_info *
1396 record_btrace_find_resume_thread (ptid_t ptid)
1397 {
1398 struct thread_info *tp;
1399
1400 /* When asked to resume everything, we pick the current thread. */
1401 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1402 ptid = inferior_ptid;
1403
1404 return find_thread_ptid (ptid);
1405 }
1406
1407 /* Start replaying a thread. */
1408
1409 static struct btrace_insn_iterator *
1410 record_btrace_start_replaying (struct thread_info *tp)
1411 {
1412 volatile struct gdb_exception except;
1413 struct btrace_insn_iterator *replay;
1414 struct btrace_thread_info *btinfo;
1415 int executing;
1416
1417 btinfo = &tp->btrace;
1418 replay = NULL;
1419
1420 /* We can't start replaying without trace. */
1421 if (btinfo->begin == NULL)
1422 return NULL;
1423
1424 /* Clear the executing flag to allow changes to the current frame.
1425 We are not actually running, yet. We just started a reverse execution
1426 command or a record goto command.
1427 For the latter, EXECUTING is false and this has no effect.
1428 For the former, EXECUTING is true and we're in to_wait, about to
1429 move the thread. Since we need to recompute the stack, we temporarily
1430 set EXECUTING to flase. */
1431 executing = is_executing (tp->ptid);
1432 set_executing (tp->ptid, 0);
1433
1434 /* GDB stores the current frame_id when stepping in order to detects steps
1435 into subroutines.
1436 Since frames are computed differently when we're replaying, we need to
1437 recompute those stored frames and fix them up so we can still detect
1438 subroutines after we started replaying. */
1439 TRY_CATCH (except, RETURN_MASK_ALL)
1440 {
1441 struct frame_info *frame;
1442 struct frame_id frame_id;
1443 int upd_step_frame_id, upd_step_stack_frame_id;
1444
1445 /* The current frame without replaying - computed via normal unwind. */
1446 frame = get_current_frame ();
1447 frame_id = get_frame_id (frame);
1448
1449 /* Check if we need to update any stepping-related frame id's. */
1450 upd_step_frame_id = frame_id_eq (frame_id,
1451 tp->control.step_frame_id);
1452 upd_step_stack_frame_id = frame_id_eq (frame_id,
1453 tp->control.step_stack_frame_id);
1454
1455 /* We start replaying at the end of the branch trace. This corresponds
1456 to the current instruction. */
1457 replay = xmalloc (sizeof (*replay));
1458 btrace_insn_end (replay, btinfo);
1459
1460 /* We're not replaying, yet. */
1461 gdb_assert (btinfo->replay == NULL);
1462 btinfo->replay = replay;
1463
1464 /* Make sure we're not using any stale registers. */
1465 registers_changed_ptid (tp->ptid);
1466
1467 /* The current frame with replaying - computed via btrace unwind. */
1468 frame = get_current_frame ();
1469 frame_id = get_frame_id (frame);
1470
1471 /* Replace stepping related frames where necessary. */
1472 if (upd_step_frame_id)
1473 tp->control.step_frame_id = frame_id;
1474 if (upd_step_stack_frame_id)
1475 tp->control.step_stack_frame_id = frame_id;
1476 }
1477
1478 /* Restore the previous execution state. */
1479 set_executing (tp->ptid, executing);
1480
1481 if (except.reason < 0)
1482 {
1483 xfree (btinfo->replay);
1484 btinfo->replay = NULL;
1485
1486 registers_changed_ptid (tp->ptid);
1487
1488 throw_exception (except);
1489 }
1490
1491 return replay;
1492 }
1493
1494 /* Stop replaying a thread. */
1495
1496 static void
1497 record_btrace_stop_replaying (struct thread_info *tp)
1498 {
1499 struct btrace_thread_info *btinfo;
1500
1501 btinfo = &tp->btrace;
1502
1503 xfree (btinfo->replay);
1504 btinfo->replay = NULL;
1505
1506 /* Make sure we're not leaving any stale registers. */
1507 registers_changed_ptid (tp->ptid);
1508 }
1509
1510 /* The to_resume method of target record-btrace. */
1511
1512 static void
1513 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1514 enum gdb_signal signal)
1515 {
1516 struct thread_info *tp, *other;
1517 enum btrace_thread_flag flag;
1518
1519 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1520
1521 /* Store the execution direction of the last resume. */
1522 record_btrace_resume_exec_dir = execution_direction;
1523
1524 tp = record_btrace_find_resume_thread (ptid);
1525 if (tp == NULL)
1526 error (_("Cannot find thread to resume."));
1527
1528 /* Stop replaying other threads if the thread to resume is not replaying. */
1529 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1530 ALL_NON_EXITED_THREADS (other)
1531 record_btrace_stop_replaying (other);
1532
1533 /* As long as we're not replaying, just forward the request. */
1534 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1535 {
1536 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1537 if (ops->to_resume != NULL)
1538 return ops->to_resume (ops, ptid, step, signal);
1539
1540 error (_("Cannot find target for stepping."));
1541 }
1542
1543 /* Compute the btrace thread flag for the requested move. */
1544 if (step == 0)
1545 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1546 else
1547 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1548
1549 /* At the moment, we only move a single thread. We could also move
1550 all threads in parallel by single-stepping each resumed thread
1551 until the first runs into an event.
1552 When we do that, we would want to continue all other threads.
1553 For now, just resume one thread to not confuse to_wait. */
1554 record_btrace_resume_thread (tp, flag);
1555
1556 /* We just indicate the resume intent here. The actual stepping happens in
1557 record_btrace_wait below. */
1558
1559 /* Async support. */
1560 if (target_can_async_p ())
1561 {
1562 target_async (inferior_event_handler, 0);
1563 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1564 }
1565 }
1566
1567 /* Find a thread to move. */
1568
1569 static struct thread_info *
1570 record_btrace_find_thread_to_move (ptid_t ptid)
1571 {
1572 struct thread_info *tp;
1573
1574 /* First check the parameter thread. */
1575 tp = find_thread_ptid (ptid);
1576 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1577 return tp;
1578
1579 /* Otherwise, find one other thread that has been resumed. */
1580 ALL_NON_EXITED_THREADS (tp)
1581 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1582 return tp;
1583
1584 return NULL;
1585 }
1586
1587 /* Return a target_waitstatus indicating that we ran out of history. */
1588
1589 static struct target_waitstatus
1590 btrace_step_no_history (void)
1591 {
1592 struct target_waitstatus status;
1593
1594 status.kind = TARGET_WAITKIND_NO_HISTORY;
1595
1596 return status;
1597 }
1598
1599 /* Return a target_waitstatus indicating that a step finished. */
1600
1601 static struct target_waitstatus
1602 btrace_step_stopped (void)
1603 {
1604 struct target_waitstatus status;
1605
1606 status.kind = TARGET_WAITKIND_STOPPED;
1607 status.value.sig = GDB_SIGNAL_TRAP;
1608
1609 return status;
1610 }
1611
1612 /* Clear the record histories. */
1613
1614 static void
1615 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1616 {
1617 xfree (btinfo->insn_history);
1618 xfree (btinfo->call_history);
1619
1620 btinfo->insn_history = NULL;
1621 btinfo->call_history = NULL;
1622 }
1623
1624 /* Step a single thread. */
1625
1626 static struct target_waitstatus
1627 record_btrace_step_thread (struct thread_info *tp)
1628 {
1629 struct btrace_insn_iterator *replay, end;
1630 struct btrace_thread_info *btinfo;
1631 struct address_space *aspace;
1632 struct inferior *inf;
1633 enum btrace_thread_flag flags;
1634 unsigned int steps;
1635
1636 /* We can't step without an execution history. */
1637 if (btrace_is_empty (tp))
1638 return btrace_step_no_history ();
1639
1640 btinfo = &tp->btrace;
1641 replay = btinfo->replay;
1642
1643 flags = btinfo->flags & BTHR_MOVE;
1644 btinfo->flags &= ~BTHR_MOVE;
1645
1646 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1647
1648 switch (flags)
1649 {
1650 default:
1651 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1652
1653 case BTHR_STEP:
1654 /* We're done if we're not replaying. */
1655 if (replay == NULL)
1656 return btrace_step_no_history ();
1657
1658 /* We are always able to step at least once. */
1659 steps = btrace_insn_next (replay, 1);
1660 gdb_assert (steps == 1);
1661
1662 /* Determine the end of the instruction trace. */
1663 btrace_insn_end (&end, btinfo);
1664
1665 /* We stop replaying if we reached the end of the trace. */
1666 if (btrace_insn_cmp (replay, &end) == 0)
1667 record_btrace_stop_replaying (tp);
1668
1669 return btrace_step_stopped ();
1670
1671 case BTHR_RSTEP:
1672 /* Start replaying if we're not already doing so. */
1673 if (replay == NULL)
1674 replay = record_btrace_start_replaying (tp);
1675
1676 /* If we can't step any further, we reached the end of the history. */
1677 steps = btrace_insn_prev (replay, 1);
1678 if (steps == 0)
1679 return btrace_step_no_history ();
1680
1681 return btrace_step_stopped ();
1682
1683 case BTHR_CONT:
1684 /* We're done if we're not replaying. */
1685 if (replay == NULL)
1686 return btrace_step_no_history ();
1687
1688 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1689 aspace = inf->aspace;
1690
1691 /* Determine the end of the instruction trace. */
1692 btrace_insn_end (&end, btinfo);
1693
1694 for (;;)
1695 {
1696 const struct btrace_insn *insn;
1697
1698 /* We are always able to step at least once. */
1699 steps = btrace_insn_next (replay, 1);
1700 gdb_assert (steps == 1);
1701
1702 /* We stop replaying if we reached the end of the trace. */
1703 if (btrace_insn_cmp (replay, &end) == 0)
1704 {
1705 record_btrace_stop_replaying (tp);
1706 return btrace_step_no_history ();
1707 }
1708
1709 insn = btrace_insn_get (replay);
1710 gdb_assert (insn);
1711
1712 DEBUG ("stepping %d (%s) ... %s", tp->num,
1713 target_pid_to_str (tp->ptid),
1714 core_addr_to_string_nz (insn->pc));
1715
1716 if (breakpoint_here_p (aspace, insn->pc))
1717 return btrace_step_stopped ();
1718 }
1719
1720 case BTHR_RCONT:
1721 /* Start replaying if we're not already doing so. */
1722 if (replay == NULL)
1723 replay = record_btrace_start_replaying (tp);
1724
1725 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1726 aspace = inf->aspace;
1727
1728 for (;;)
1729 {
1730 const struct btrace_insn *insn;
1731
1732 /* If we can't step any further, we're done. */
1733 steps = btrace_insn_prev (replay, 1);
1734 if (steps == 0)
1735 return btrace_step_no_history ();
1736
1737 insn = btrace_insn_get (replay);
1738 gdb_assert (insn);
1739
1740 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1741 target_pid_to_str (tp->ptid),
1742 core_addr_to_string_nz (insn->pc));
1743
1744 if (breakpoint_here_p (aspace, insn->pc))
1745 return btrace_step_stopped ();
1746 }
1747 }
1748 }
1749
1750 /* The to_wait method of target record-btrace. */
1751
1752 static ptid_t
1753 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1754 struct target_waitstatus *status, int options)
1755 {
1756 struct thread_info *tp, *other;
1757
1758 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1759
1760 /* As long as we're not replaying, just forward the request. */
1761 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1762 {
1763 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1764 if (ops->to_wait != NULL)
1765 return ops->to_wait (ops, ptid, status, options);
1766
1767 error (_("Cannot find target for waiting."));
1768 }
1769
1770 /* Let's find a thread to move. */
1771 tp = record_btrace_find_thread_to_move (ptid);
1772 if (tp == NULL)
1773 {
1774 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1775
1776 status->kind = TARGET_WAITKIND_IGNORE;
1777 return minus_one_ptid;
1778 }
1779
1780 /* We only move a single thread. We're not able to correlate threads. */
1781 *status = record_btrace_step_thread (tp);
1782
1783 /* Stop all other threads. */
1784 if (!non_stop)
1785 ALL_NON_EXITED_THREADS (other)
1786 other->btrace.flags &= ~BTHR_MOVE;
1787
1788 /* Start record histories anew from the current position. */
1789 record_btrace_clear_histories (&tp->btrace);
1790
1791 /* We moved the replay position but did not update registers. */
1792 registers_changed_ptid (tp->ptid);
1793
1794 return tp->ptid;
1795 }
1796
1797 /* The to_can_execute_reverse method of target record-btrace. */
1798
1799 static int
1800 record_btrace_can_execute_reverse (struct target_ops *self)
1801 {
1802 return 1;
1803 }
1804
1805 /* The to_decr_pc_after_break method of target record-btrace. */
1806
1807 static CORE_ADDR
1808 record_btrace_decr_pc_after_break (struct target_ops *ops,
1809 struct gdbarch *gdbarch)
1810 {
1811 /* When replaying, we do not actually execute the breakpoint instruction
1812 so there is no need to adjust the PC after hitting a breakpoint. */
1813 if (record_btrace_is_replaying (ops))
1814 return 0;
1815
1816 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1817 }
1818
1819 /* The to_find_new_threads method of target record-btrace. */
1820
1821 static void
1822 record_btrace_find_new_threads (struct target_ops *ops)
1823 {
1824 /* Don't expect new threads if we're replaying. */
1825 if (record_btrace_is_replaying (ops))
1826 return;
1827
1828 /* Forward the request. */
1829 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1830 if (ops->to_find_new_threads != NULL)
1831 {
1832 ops->to_find_new_threads (ops);
1833 break;
1834 }
1835 }
1836
1837 /* The to_thread_alive method of target record-btrace. */
1838
1839 static int
1840 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1841 {
1842 /* We don't add or remove threads during replay. */
1843 if (record_btrace_is_replaying (ops))
1844 return find_thread_ptid (ptid) != NULL;
1845
1846 /* Forward the request. */
1847 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1848 if (ops->to_thread_alive != NULL)
1849 return ops->to_thread_alive (ops, ptid);
1850
1851 return 0;
1852 }
1853
1854 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1855 is stopped. */
1856
1857 static void
1858 record_btrace_set_replay (struct thread_info *tp,
1859 const struct btrace_insn_iterator *it)
1860 {
1861 struct btrace_thread_info *btinfo;
1862
1863 btinfo = &tp->btrace;
1864
1865 if (it == NULL || it->function == NULL)
1866 record_btrace_stop_replaying (tp);
1867 else
1868 {
1869 if (btinfo->replay == NULL)
1870 record_btrace_start_replaying (tp);
1871 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1872 return;
1873
1874 *btinfo->replay = *it;
1875 registers_changed_ptid (tp->ptid);
1876 }
1877
1878 /* Start anew from the new replay position. */
1879 record_btrace_clear_histories (btinfo);
1880 }
1881
1882 /* The to_goto_record_begin method of target record-btrace. */
1883
1884 static void
1885 record_btrace_goto_begin (struct target_ops *self)
1886 {
1887 struct thread_info *tp;
1888 struct btrace_insn_iterator begin;
1889
1890 tp = require_btrace_thread ();
1891
1892 btrace_insn_begin (&begin, &tp->btrace);
1893 record_btrace_set_replay (tp, &begin);
1894
1895 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1896 }
1897
1898 /* The to_goto_record_end method of target record-btrace. */
1899
1900 static void
1901 record_btrace_goto_end (struct target_ops *ops)
1902 {
1903 struct thread_info *tp;
1904
1905 tp = require_btrace_thread ();
1906
1907 record_btrace_set_replay (tp, NULL);
1908
1909 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1910 }
1911
1912 /* The to_goto_record method of target record-btrace. */
1913
1914 static void
1915 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1916 {
1917 struct thread_info *tp;
1918 struct btrace_insn_iterator it;
1919 unsigned int number;
1920 int found;
1921
1922 number = insn;
1923
1924 /* Check for wrap-arounds. */
1925 if (number != insn)
1926 error (_("Instruction number out of range."));
1927
1928 tp = require_btrace_thread ();
1929
1930 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1931 if (found == 0)
1932 error (_("No such instruction."));
1933
1934 record_btrace_set_replay (tp, &it);
1935
1936 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1937 }
1938
1939 /* The to_execution_direction target method. */
1940
1941 static enum exec_direction_kind
1942 record_btrace_execution_direction (struct target_ops *self)
1943 {
1944 return record_btrace_resume_exec_dir;
1945 }
1946
1947 /* The to_prepare_to_generate_core target method. */
1948
1949 static void
1950 record_btrace_prepare_to_generate_core (struct target_ops *self)
1951 {
1952 record_btrace_generating_corefile = 1;
1953 }
1954
1955 /* The to_done_generating_core target method. */
1956
1957 static void
1958 record_btrace_done_generating_core (struct target_ops *self)
1959 {
1960 record_btrace_generating_corefile = 0;
1961 }
1962
1963 /* Initialize the record-btrace target ops. */
1964
1965 static void
1966 init_record_btrace_ops (void)
1967 {
1968 struct target_ops *ops;
1969
1970 ops = &record_btrace_ops;
1971 ops->to_shortname = "record-btrace";
1972 ops->to_longname = "Branch tracing target";
1973 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1974 ops->to_open = record_btrace_open;
1975 ops->to_close = record_btrace_close;
1976 ops->to_detach = record_detach;
1977 ops->to_disconnect = record_disconnect;
1978 ops->to_mourn_inferior = record_mourn_inferior;
1979 ops->to_kill = record_kill;
1980 ops->to_stop_recording = record_btrace_stop_recording;
1981 ops->to_info_record = record_btrace_info;
1982 ops->to_insn_history = record_btrace_insn_history;
1983 ops->to_insn_history_from = record_btrace_insn_history_from;
1984 ops->to_insn_history_range = record_btrace_insn_history_range;
1985 ops->to_call_history = record_btrace_call_history;
1986 ops->to_call_history_from = record_btrace_call_history_from;
1987 ops->to_call_history_range = record_btrace_call_history_range;
1988 ops->to_record_is_replaying = record_btrace_is_replaying;
1989 ops->to_xfer_partial = record_btrace_xfer_partial;
1990 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1991 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1992 ops->to_fetch_registers = record_btrace_fetch_registers;
1993 ops->to_store_registers = record_btrace_store_registers;
1994 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1995 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1996 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1997 ops->to_resume = record_btrace_resume;
1998 ops->to_wait = record_btrace_wait;
1999 ops->to_find_new_threads = record_btrace_find_new_threads;
2000 ops->to_thread_alive = record_btrace_thread_alive;
2001 ops->to_goto_record_begin = record_btrace_goto_begin;
2002 ops->to_goto_record_end = record_btrace_goto_end;
2003 ops->to_goto_record = record_btrace_goto;
2004 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2005 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
2006 ops->to_execution_direction = record_btrace_execution_direction;
2007 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2008 ops->to_done_generating_core = record_btrace_done_generating_core;
2009 ops->to_stratum = record_stratum;
2010 ops->to_magic = OPS_MAGIC;
2011 }
2012
2013 /* Alias for "target record". */
2014
2015 static void
2016 cmd_record_btrace_start (char *args, int from_tty)
2017 {
2018 if (args != NULL && *args != 0)
2019 error (_("Invalid argument."));
2020
2021 execute_command ("target record-btrace", from_tty);
2022 }
2023
2024 /* The "set record btrace" command. */
2025
2026 static void
2027 cmd_set_record_btrace (char *args, int from_tty)
2028 {
2029 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2030 }
2031
2032 /* The "show record btrace" command. */
2033
2034 static void
2035 cmd_show_record_btrace (char *args, int from_tty)
2036 {
2037 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2038 }
2039
2040 /* The "show record btrace replay-memory-access" command. */
2041
2042 static void
2043 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2044 struct cmd_list_element *c, const char *value)
2045 {
2046 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2047 replay_memory_access);
2048 }
2049
2050 void _initialize_record_btrace (void);
2051
2052 /* Initialize btrace commands. */
2053
2054 void
2055 _initialize_record_btrace (void)
2056 {
2057 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2058 _("Start branch trace recording."),
2059 &record_cmdlist);
2060 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2061
2062 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2063 _("Set record options"), &set_record_btrace_cmdlist,
2064 "set record btrace ", 0, &set_record_cmdlist);
2065
2066 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2067 _("Show record options"), &show_record_btrace_cmdlist,
2068 "show record btrace ", 0, &show_record_cmdlist);
2069
2070 add_setshow_enum_cmd ("replay-memory-access", no_class,
2071 replay_memory_access_types, &replay_memory_access, _("\
2072 Set what memory accesses are allowed during replay."), _("\
2073 Show what memory accesses are allowed during replay."),
2074 _("Default is READ-ONLY.\n\n\
2075 The btrace record target does not trace data.\n\
2076 The memory therefore corresponds to the live target and not \
2077 to the current replay position.\n\n\
2078 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2079 When READ-WRITE, allow accesses to read-only and read-write memory during \
2080 replay."),
2081 NULL, cmd_show_replay_memory_access,
2082 &set_record_btrace_cmdlist,
2083 &show_record_btrace_cmdlist);
2084
2085 init_record_btrace_ops ();
2086 add_target (&record_btrace_ops);
2087
2088 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2089 xcalloc, xfree);
2090 }
This page took 0.073079 seconds and 4 git commands to generate.