Make array object extern
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
75
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
78
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
85
86 #define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
94
95
96 /* Update the branch trace for the current thread and return a pointer to its
97 thread_info.
98
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
101
102 static struct thread_info *
103 require_btrace_thread (void)
104 {
105 struct thread_info *tp;
106
107 DEBUG ("require");
108
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
112
113 btrace_fetch (tp);
114
115 if (btrace_is_empty (tp))
116 error (_("No trace."));
117
118 return tp;
119 }
120
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
123
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
126
127 static struct btrace_thread_info *
128 require_btrace (void)
129 {
130 struct thread_info *tp;
131
132 tp = require_btrace_thread ();
133
134 return &tp->btrace;
135 }
136
137 /* Enable branch tracing for one thread. Warn on errors. */
138
139 static void
140 record_btrace_enable_warn (struct thread_info *tp)
141 {
142 volatile struct gdb_exception error;
143
144 TRY_CATCH (error, RETURN_MASK_ERROR)
145 btrace_enable (tp, &record_btrace_conf);
146
147 if (error.message != NULL)
148 warning ("%s", error.message);
149 }
150
151 /* Callback function to disable branch tracing for one thread. */
152
153 static void
154 record_btrace_disable_callback (void *arg)
155 {
156 struct thread_info *tp;
157
158 tp = arg;
159
160 btrace_disable (tp);
161 }
162
163 /* Enable automatic tracing of new threads. */
164
165 static void
166 record_btrace_auto_enable (void)
167 {
168 DEBUG ("attach thread observer");
169
170 record_btrace_thread_observer
171 = observer_attach_new_thread (record_btrace_enable_warn);
172 }
173
174 /* Disable automatic tracing of new threads. */
175
176 static void
177 record_btrace_auto_disable (void)
178 {
179 /* The observer may have been detached, already. */
180 if (record_btrace_thread_observer == NULL)
181 return;
182
183 DEBUG ("detach thread observer");
184
185 observer_detach_new_thread (record_btrace_thread_observer);
186 record_btrace_thread_observer = NULL;
187 }
188
189 /* The record-btrace async event handler function. */
190
191 static void
192 record_btrace_handle_async_inferior_event (gdb_client_data data)
193 {
194 inferior_event_handler (INF_REG_EVENT, NULL);
195 }
196
197 /* The to_open method of target record-btrace. */
198
199 static void
200 record_btrace_open (const char *args, int from_tty)
201 {
202 struct cleanup *disable_chain;
203 struct thread_info *tp;
204
205 DEBUG ("open");
206
207 record_preopen ();
208
209 if (!target_has_execution)
210 error (_("The program is not being run."));
211
212 if (non_stop)
213 error (_("Record btrace can't debug inferior in non-stop mode."));
214
215 gdb_assert (record_btrace_thread_observer == NULL);
216
217 disable_chain = make_cleanup (null_cleanup, NULL);
218 ALL_NON_EXITED_THREADS (tp)
219 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
220 {
221 btrace_enable (tp, &record_btrace_conf);
222
223 make_cleanup (record_btrace_disable_callback, tp);
224 }
225
226 record_btrace_auto_enable ();
227
228 push_target (&record_btrace_ops);
229
230 record_btrace_async_inferior_event_handler
231 = create_async_event_handler (record_btrace_handle_async_inferior_event,
232 NULL);
233 record_btrace_generating_corefile = 0;
234
235 observer_notify_record_changed (current_inferior (), 1);
236
237 discard_cleanups (disable_chain);
238 }
239
240 /* The to_stop_recording method of target record-btrace. */
241
242 static void
243 record_btrace_stop_recording (struct target_ops *self)
244 {
245 struct thread_info *tp;
246
247 DEBUG ("stop recording");
248
249 record_btrace_auto_disable ();
250
251 ALL_NON_EXITED_THREADS (tp)
252 if (tp->btrace.target != NULL)
253 btrace_disable (tp);
254 }
255
256 /* The to_close method of target record-btrace. */
257
258 static void
259 record_btrace_close (struct target_ops *self)
260 {
261 struct thread_info *tp;
262
263 if (record_btrace_async_inferior_event_handler != NULL)
264 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
265
266 /* Make sure automatic recording gets disabled even if we did not stop
267 recording before closing the record-btrace target. */
268 record_btrace_auto_disable ();
269
270 /* We should have already stopped recording.
271 Tear down btrace in case we have not. */
272 ALL_NON_EXITED_THREADS (tp)
273 btrace_teardown (tp);
274 }
275
276 /* The to_async method of target record-btrace. */
277
278 static void
279 record_btrace_async (struct target_ops *ops,
280 void (*callback) (enum inferior_event_type event_type,
281 void *context),
282 void *context)
283 {
284 if (callback != NULL)
285 mark_async_event_handler (record_btrace_async_inferior_event_handler);
286 else
287 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288
289 ops->beneath->to_async (ops->beneath, callback, context);
290 }
291
292 /* Adjusts the size and returns a human readable size suffix. */
293
294 static const char *
295 record_btrace_adjust_size (unsigned int *size)
296 {
297 unsigned int sz;
298
299 sz = *size;
300
301 if ((sz & ((1u << 30) - 1)) == 0)
302 {
303 *size = sz >> 30;
304 return "GB";
305 }
306 else if ((sz & ((1u << 20) - 1)) == 0)
307 {
308 *size = sz >> 20;
309 return "MB";
310 }
311 else if ((sz & ((1u << 10) - 1)) == 0)
312 {
313 *size = sz >> 10;
314 return "kB";
315 }
316 else
317 return "";
318 }
319
320 /* Print a BTS configuration. */
321
322 static void
323 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
324 {
325 const char *suffix;
326 unsigned int size;
327
328 size = conf->size;
329 if (size > 0)
330 {
331 suffix = record_btrace_adjust_size (&size);
332 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
333 }
334 }
335
336 /* Print a branch tracing configuration. */
337
338 static void
339 record_btrace_print_conf (const struct btrace_config *conf)
340 {
341 printf_unfiltered (_("Recording format: %s.\n"),
342 btrace_format_string (conf->format));
343
344 switch (conf->format)
345 {
346 case BTRACE_FORMAT_NONE:
347 return;
348
349 case BTRACE_FORMAT_BTS:
350 record_btrace_print_bts_conf (&conf->bts);
351 return;
352 }
353
354 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
355 }
356
357 /* The to_info_record method of target record-btrace. */
358
359 static void
360 record_btrace_info (struct target_ops *self)
361 {
362 struct btrace_thread_info *btinfo;
363 const struct btrace_config *conf;
364 struct thread_info *tp;
365 unsigned int insns, calls, gaps;
366
367 DEBUG ("info");
368
369 tp = find_thread_ptid (inferior_ptid);
370 if (tp == NULL)
371 error (_("No thread."));
372
373 btinfo = &tp->btrace;
374
375 conf = btrace_conf (btinfo);
376 if (conf != NULL)
377 record_btrace_print_conf (conf);
378
379 btrace_fetch (tp);
380
381 insns = 0;
382 calls = 0;
383 gaps = 0;
384
385 if (!btrace_is_empty (tp))
386 {
387 struct btrace_call_iterator call;
388 struct btrace_insn_iterator insn;
389
390 btrace_call_end (&call, btinfo);
391 btrace_call_prev (&call, 1);
392 calls = btrace_call_number (&call);
393
394 btrace_insn_end (&insn, btinfo);
395
396 insns = btrace_insn_number (&insn);
397 if (insns != 0)
398 {
399 /* The last instruction does not really belong to the trace. */
400 insns -= 1;
401 }
402 else
403 {
404 unsigned int steps;
405
406 /* Skip gaps at the end. */
407 do
408 {
409 steps = btrace_insn_prev (&insn, 1);
410 if (steps == 0)
411 break;
412
413 insns = btrace_insn_number (&insn);
414 }
415 while (insns == 0);
416 }
417
418 gaps = btinfo->ngaps;
419 }
420
421 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
422 "for thread %d (%s).\n"), insns, calls, gaps,
423 tp->num, target_pid_to_str (tp->ptid));
424
425 if (btrace_is_replaying (tp))
426 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
427 btrace_insn_number (btinfo->replay));
428 }
429
430 /* Print a decode error. */
431
432 static void
433 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
434 enum btrace_format format)
435 {
436 const char *errstr;
437 int is_error;
438
439 errstr = _("unknown");
440 is_error = 1;
441
442 switch (format)
443 {
444 default:
445 break;
446
447 case BTRACE_FORMAT_BTS:
448 switch (errcode)
449 {
450 default:
451 break;
452
453 case BDE_BTS_OVERFLOW:
454 errstr = _("instruction overflow");
455 break;
456
457 case BDE_BTS_INSN_SIZE:
458 errstr = _("unknown instruction");
459 break;
460 }
461 break;
462 }
463
464 ui_out_text (uiout, _("["));
465 if (is_error)
466 {
467 ui_out_text (uiout, _("decode error ("));
468 ui_out_field_int (uiout, "errcode", errcode);
469 ui_out_text (uiout, _("): "));
470 }
471 ui_out_text (uiout, errstr);
472 ui_out_text (uiout, _("]\n"));
473 }
474
475 /* Print an unsigned int. */
476
477 static void
478 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
479 {
480 ui_out_field_fmt (uiout, fld, "%u", val);
481 }
482
483 /* Disassemble a section of the recorded instruction trace. */
484
485 static void
486 btrace_insn_history (struct ui_out *uiout,
487 const struct btrace_thread_info *btinfo,
488 const struct btrace_insn_iterator *begin,
489 const struct btrace_insn_iterator *end, int flags)
490 {
491 struct gdbarch *gdbarch;
492 struct btrace_insn_iterator it;
493
494 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
495 btrace_insn_number (end));
496
497 gdbarch = target_gdbarch ();
498
499 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
500 {
501 const struct btrace_insn *insn;
502
503 insn = btrace_insn_get (&it);
504
505 /* A NULL instruction indicates a gap in the trace. */
506 if (insn == NULL)
507 {
508 const struct btrace_config *conf;
509
510 conf = btrace_conf (btinfo);
511
512 /* We have trace so we must have a configuration. */
513 gdb_assert (conf != NULL);
514
515 btrace_ui_out_decode_error (uiout, it.function->errcode,
516 conf->format);
517 }
518 else
519 {
520 /* Print the instruction index. */
521 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
522 ui_out_text (uiout, "\t");
523
524 /* Disassembly with '/m' flag may not produce the expected result.
525 See PR gdb/11833. */
526 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
527 insn->pc + 1);
528 }
529 }
530 }
531
532 /* The to_insn_history method of target record-btrace. */
533
534 static void
535 record_btrace_insn_history (struct target_ops *self, int size, int flags)
536 {
537 struct btrace_thread_info *btinfo;
538 struct btrace_insn_history *history;
539 struct btrace_insn_iterator begin, end;
540 struct cleanup *uiout_cleanup;
541 struct ui_out *uiout;
542 unsigned int context, covered;
543
544 uiout = current_uiout;
545 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
546 "insn history");
547 context = abs (size);
548 if (context == 0)
549 error (_("Bad record instruction-history-size."));
550
551 btinfo = require_btrace ();
552 history = btinfo->insn_history;
553 if (history == NULL)
554 {
555 struct btrace_insn_iterator *replay;
556
557 DEBUG ("insn-history (0x%x): %d", flags, size);
558
559 /* If we're replaying, we start at the replay position. Otherwise, we
560 start at the tail of the trace. */
561 replay = btinfo->replay;
562 if (replay != NULL)
563 begin = *replay;
564 else
565 btrace_insn_end (&begin, btinfo);
566
567 /* We start from here and expand in the requested direction. Then we
568 expand in the other direction, as well, to fill up any remaining
569 context. */
570 end = begin;
571 if (size < 0)
572 {
573 /* We want the current position covered, as well. */
574 covered = btrace_insn_next (&end, 1);
575 covered += btrace_insn_prev (&begin, context - covered);
576 covered += btrace_insn_next (&end, context - covered);
577 }
578 else
579 {
580 covered = btrace_insn_next (&end, context);
581 covered += btrace_insn_prev (&begin, context - covered);
582 }
583 }
584 else
585 {
586 begin = history->begin;
587 end = history->end;
588
589 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
590 btrace_insn_number (&begin), btrace_insn_number (&end));
591
592 if (size < 0)
593 {
594 end = begin;
595 covered = btrace_insn_prev (&begin, context);
596 }
597 else
598 {
599 begin = end;
600 covered = btrace_insn_next (&end, context);
601 }
602 }
603
604 if (covered > 0)
605 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
606 else
607 {
608 if (size < 0)
609 printf_unfiltered (_("At the start of the branch trace record.\n"));
610 else
611 printf_unfiltered (_("At the end of the branch trace record.\n"));
612 }
613
614 btrace_set_insn_history (btinfo, &begin, &end);
615 do_cleanups (uiout_cleanup);
616 }
617
618 /* The to_insn_history_range method of target record-btrace. */
619
620 static void
621 record_btrace_insn_history_range (struct target_ops *self,
622 ULONGEST from, ULONGEST to, int flags)
623 {
624 struct btrace_thread_info *btinfo;
625 struct btrace_insn_history *history;
626 struct btrace_insn_iterator begin, end;
627 struct cleanup *uiout_cleanup;
628 struct ui_out *uiout;
629 unsigned int low, high;
630 int found;
631
632 uiout = current_uiout;
633 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
634 "insn history");
635 low = from;
636 high = to;
637
638 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
639
640 /* Check for wrap-arounds. */
641 if (low != from || high != to)
642 error (_("Bad range."));
643
644 if (high < low)
645 error (_("Bad range."));
646
647 btinfo = require_btrace ();
648
649 found = btrace_find_insn_by_number (&begin, btinfo, low);
650 if (found == 0)
651 error (_("Range out of bounds."));
652
653 found = btrace_find_insn_by_number (&end, btinfo, high);
654 if (found == 0)
655 {
656 /* Silently truncate the range. */
657 btrace_insn_end (&end, btinfo);
658 }
659 else
660 {
661 /* We want both begin and end to be inclusive. */
662 btrace_insn_next (&end, 1);
663 }
664
665 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
666 btrace_set_insn_history (btinfo, &begin, &end);
667
668 do_cleanups (uiout_cleanup);
669 }
670
671 /* The to_insn_history_from method of target record-btrace. */
672
673 static void
674 record_btrace_insn_history_from (struct target_ops *self,
675 ULONGEST from, int size, int flags)
676 {
677 ULONGEST begin, end, context;
678
679 context = abs (size);
680 if (context == 0)
681 error (_("Bad record instruction-history-size."));
682
683 if (size < 0)
684 {
685 end = from;
686
687 if (from < context)
688 begin = 0;
689 else
690 begin = from - context + 1;
691 }
692 else
693 {
694 begin = from;
695 end = from + context - 1;
696
697 /* Check for wrap-around. */
698 if (end < begin)
699 end = ULONGEST_MAX;
700 }
701
702 record_btrace_insn_history_range (self, begin, end, flags);
703 }
704
705 /* Print the instruction number range for a function call history line. */
706
707 static void
708 btrace_call_history_insn_range (struct ui_out *uiout,
709 const struct btrace_function *bfun)
710 {
711 unsigned int begin, end, size;
712
713 size = VEC_length (btrace_insn_s, bfun->insn);
714 gdb_assert (size > 0);
715
716 begin = bfun->insn_offset;
717 end = begin + size - 1;
718
719 ui_out_field_uint (uiout, "insn begin", begin);
720 ui_out_text (uiout, ",");
721 ui_out_field_uint (uiout, "insn end", end);
722 }
723
724 /* Print the source line information for a function call history line. */
725
726 static void
727 btrace_call_history_src_line (struct ui_out *uiout,
728 const struct btrace_function *bfun)
729 {
730 struct symbol *sym;
731 int begin, end;
732
733 sym = bfun->sym;
734 if (sym == NULL)
735 return;
736
737 ui_out_field_string (uiout, "file",
738 symtab_to_filename_for_display (symbol_symtab (sym)));
739
740 begin = bfun->lbegin;
741 end = bfun->lend;
742
743 if (end < begin)
744 return;
745
746 ui_out_text (uiout, ":");
747 ui_out_field_int (uiout, "min line", begin);
748
749 if (end == begin)
750 return;
751
752 ui_out_text (uiout, ",");
753 ui_out_field_int (uiout, "max line", end);
754 }
755
756 /* Get the name of a branch trace function. */
757
758 static const char *
759 btrace_get_bfun_name (const struct btrace_function *bfun)
760 {
761 struct minimal_symbol *msym;
762 struct symbol *sym;
763
764 if (bfun == NULL)
765 return "??";
766
767 msym = bfun->msym;
768 sym = bfun->sym;
769
770 if (sym != NULL)
771 return SYMBOL_PRINT_NAME (sym);
772 else if (msym != NULL)
773 return MSYMBOL_PRINT_NAME (msym);
774 else
775 return "??";
776 }
777
778 /* Disassemble a section of the recorded function trace. */
779
780 static void
781 btrace_call_history (struct ui_out *uiout,
782 const struct btrace_thread_info *btinfo,
783 const struct btrace_call_iterator *begin,
784 const struct btrace_call_iterator *end,
785 enum record_print_flag flags)
786 {
787 struct btrace_call_iterator it;
788
789 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
790 btrace_call_number (end));
791
792 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
793 {
794 const struct btrace_function *bfun;
795 struct minimal_symbol *msym;
796 struct symbol *sym;
797
798 bfun = btrace_call_get (&it);
799 sym = bfun->sym;
800 msym = bfun->msym;
801
802 /* Print the function index. */
803 ui_out_field_uint (uiout, "index", bfun->number);
804 ui_out_text (uiout, "\t");
805
806 /* Indicate gaps in the trace. */
807 if (bfun->errcode != 0)
808 {
809 const struct btrace_config *conf;
810
811 conf = btrace_conf (btinfo);
812
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf != NULL);
815
816 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
817
818 continue;
819 }
820
821 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
822 {
823 int level = bfun->level + btinfo->level, i;
824
825 for (i = 0; i < level; ++i)
826 ui_out_text (uiout, " ");
827 }
828
829 if (sym != NULL)
830 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
831 else if (msym != NULL)
832 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
833 else if (!ui_out_is_mi_like_p (uiout))
834 ui_out_field_string (uiout, "function", "??");
835
836 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
837 {
838 ui_out_text (uiout, _("\tinst "));
839 btrace_call_history_insn_range (uiout, bfun);
840 }
841
842 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
843 {
844 ui_out_text (uiout, _("\tat "));
845 btrace_call_history_src_line (uiout, bfun);
846 }
847
848 ui_out_text (uiout, "\n");
849 }
850 }
851
852 /* The to_call_history method of target record-btrace. */
853
854 static void
855 record_btrace_call_history (struct target_ops *self, int size, int flags)
856 {
857 struct btrace_thread_info *btinfo;
858 struct btrace_call_history *history;
859 struct btrace_call_iterator begin, end;
860 struct cleanup *uiout_cleanup;
861 struct ui_out *uiout;
862 unsigned int context, covered;
863
864 uiout = current_uiout;
865 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
866 "insn history");
867 context = abs (size);
868 if (context == 0)
869 error (_("Bad record function-call-history-size."));
870
871 btinfo = require_btrace ();
872 history = btinfo->call_history;
873 if (history == NULL)
874 {
875 struct btrace_insn_iterator *replay;
876
877 DEBUG ("call-history (0x%x): %d", flags, size);
878
879 /* If we're replaying, we start at the replay position. Otherwise, we
880 start at the tail of the trace. */
881 replay = btinfo->replay;
882 if (replay != NULL)
883 {
884 begin.function = replay->function;
885 begin.btinfo = btinfo;
886 }
887 else
888 btrace_call_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_call_next (&end, 1);
898 covered += btrace_call_prev (&begin, context - covered);
899 covered += btrace_call_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_call_next (&end, context);
904 covered += btrace_call_prev (&begin, context- covered);
905 }
906 }
907 else
908 {
909 begin = history->begin;
910 end = history->end;
911
912 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
913 btrace_call_number (&begin), btrace_call_number (&end));
914
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_call_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_call_next (&end, context);
924 }
925 }
926
927 if (covered > 0)
928 btrace_call_history (uiout, btinfo, &begin, &end, flags);
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
936
937 btrace_set_call_history (btinfo, &begin, &end);
938 do_cleanups (uiout_cleanup);
939 }
940
941 /* The to_call_history_range method of target record-btrace. */
942
943 static void
944 record_btrace_call_history_range (struct target_ops *self,
945 ULONGEST from, ULONGEST to, int flags)
946 {
947 struct btrace_thread_info *btinfo;
948 struct btrace_call_history *history;
949 struct btrace_call_iterator begin, end;
950 struct cleanup *uiout_cleanup;
951 struct ui_out *uiout;
952 unsigned int low, high;
953 int found;
954
955 uiout = current_uiout;
956 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
957 "func history");
958 low = from;
959 high = to;
960
961 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
962
963 /* Check for wrap-arounds. */
964 if (low != from || high != to)
965 error (_("Bad range."));
966
967 if (high < low)
968 error (_("Bad range."));
969
970 btinfo = require_btrace ();
971
972 found = btrace_find_call_by_number (&begin, btinfo, low);
973 if (found == 0)
974 error (_("Range out of bounds."));
975
976 found = btrace_find_call_by_number (&end, btinfo, high);
977 if (found == 0)
978 {
979 /* Silently truncate the range. */
980 btrace_call_end (&end, btinfo);
981 }
982 else
983 {
984 /* We want both begin and end to be inclusive. */
985 btrace_call_next (&end, 1);
986 }
987
988 btrace_call_history (uiout, btinfo, &begin, &end, flags);
989 btrace_set_call_history (btinfo, &begin, &end);
990
991 do_cleanups (uiout_cleanup);
992 }
993
994 /* The to_call_history_from method of target record-btrace. */
995
996 static void
997 record_btrace_call_history_from (struct target_ops *self,
998 ULONGEST from, int size, int flags)
999 {
1000 ULONGEST begin, end, context;
1001
1002 context = abs (size);
1003 if (context == 0)
1004 error (_("Bad record function-call-history-size."));
1005
1006 if (size < 0)
1007 {
1008 end = from;
1009
1010 if (from < context)
1011 begin = 0;
1012 else
1013 begin = from - context + 1;
1014 }
1015 else
1016 {
1017 begin = from;
1018 end = from + context - 1;
1019
1020 /* Check for wrap-around. */
1021 if (end < begin)
1022 end = ULONGEST_MAX;
1023 }
1024
1025 record_btrace_call_history_range (self, begin, end, flags);
1026 }
1027
1028 /* The to_record_is_replaying method of target record-btrace. */
1029
1030 static int
1031 record_btrace_is_replaying (struct target_ops *self)
1032 {
1033 struct thread_info *tp;
1034
1035 ALL_NON_EXITED_THREADS (tp)
1036 if (btrace_is_replaying (tp))
1037 return 1;
1038
1039 return 0;
1040 }
1041
1042 /* The to_xfer_partial method of target record-btrace. */
1043
1044 static enum target_xfer_status
1045 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1046 const char *annex, gdb_byte *readbuf,
1047 const gdb_byte *writebuf, ULONGEST offset,
1048 ULONGEST len, ULONGEST *xfered_len)
1049 {
1050 struct target_ops *t;
1051
1052 /* Filter out requests that don't make sense during replay. */
1053 if (replay_memory_access == replay_memory_access_read_only
1054 && !record_btrace_generating_corefile
1055 && record_btrace_is_replaying (ops))
1056 {
1057 switch (object)
1058 {
1059 case TARGET_OBJECT_MEMORY:
1060 {
1061 struct target_section *section;
1062
1063 /* We do not allow writing memory in general. */
1064 if (writebuf != NULL)
1065 {
1066 *xfered_len = len;
1067 return TARGET_XFER_UNAVAILABLE;
1068 }
1069
1070 /* We allow reading readonly memory. */
1071 section = target_section_by_addr (ops, offset);
1072 if (section != NULL)
1073 {
1074 /* Check if the section we found is readonly. */
1075 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1076 section->the_bfd_section)
1077 & SEC_READONLY) != 0)
1078 {
1079 /* Truncate the request to fit into this section. */
1080 len = min (len, section->endaddr - offset);
1081 break;
1082 }
1083 }
1084
1085 *xfered_len = len;
1086 return TARGET_XFER_UNAVAILABLE;
1087 }
1088 }
1089 }
1090
1091 /* Forward the request. */
1092 ops = ops->beneath;
1093 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1094 offset, len, xfered_len);
1095 }
1096
1097 /* The to_insert_breakpoint method of target record-btrace. */
1098
1099 static int
1100 record_btrace_insert_breakpoint (struct target_ops *ops,
1101 struct gdbarch *gdbarch,
1102 struct bp_target_info *bp_tgt)
1103 {
1104 volatile struct gdb_exception except;
1105 const char *old;
1106 int ret;
1107
1108 /* Inserting breakpoints requires accessing memory. Allow it for the
1109 duration of this function. */
1110 old = replay_memory_access;
1111 replay_memory_access = replay_memory_access_read_write;
1112
1113 ret = 0;
1114 TRY_CATCH (except, RETURN_MASK_ALL)
1115 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1116
1117 replay_memory_access = old;
1118
1119 if (except.reason < 0)
1120 throw_exception (except);
1121
1122 return ret;
1123 }
1124
1125 /* The to_remove_breakpoint method of target record-btrace. */
1126
1127 static int
1128 record_btrace_remove_breakpoint (struct target_ops *ops,
1129 struct gdbarch *gdbarch,
1130 struct bp_target_info *bp_tgt)
1131 {
1132 volatile struct gdb_exception except;
1133 const char *old;
1134 int ret;
1135
1136 /* Removing breakpoints requires accessing memory. Allow it for the
1137 duration of this function. */
1138 old = replay_memory_access;
1139 replay_memory_access = replay_memory_access_read_write;
1140
1141 ret = 0;
1142 TRY_CATCH (except, RETURN_MASK_ALL)
1143 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1144
1145 replay_memory_access = old;
1146
1147 if (except.reason < 0)
1148 throw_exception (except);
1149
1150 return ret;
1151 }
1152
1153 /* The to_fetch_registers method of target record-btrace. */
1154
1155 static void
1156 record_btrace_fetch_registers (struct target_ops *ops,
1157 struct regcache *regcache, int regno)
1158 {
1159 struct btrace_insn_iterator *replay;
1160 struct thread_info *tp;
1161
1162 tp = find_thread_ptid (inferior_ptid);
1163 gdb_assert (tp != NULL);
1164
1165 replay = tp->btrace.replay;
1166 if (replay != NULL && !record_btrace_generating_corefile)
1167 {
1168 const struct btrace_insn *insn;
1169 struct gdbarch *gdbarch;
1170 int pcreg;
1171
1172 gdbarch = get_regcache_arch (regcache);
1173 pcreg = gdbarch_pc_regnum (gdbarch);
1174 if (pcreg < 0)
1175 return;
1176
1177 /* We can only provide the PC register. */
1178 if (regno >= 0 && regno != pcreg)
1179 return;
1180
1181 insn = btrace_insn_get (replay);
1182 gdb_assert (insn != NULL);
1183
1184 regcache_raw_supply (regcache, regno, &insn->pc);
1185 }
1186 else
1187 {
1188 struct target_ops *t = ops->beneath;
1189
1190 t->to_fetch_registers (t, regcache, regno);
1191 }
1192 }
1193
1194 /* The to_store_registers method of target record-btrace. */
1195
1196 static void
1197 record_btrace_store_registers (struct target_ops *ops,
1198 struct regcache *regcache, int regno)
1199 {
1200 struct target_ops *t;
1201
1202 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1203 error (_("This record target does not allow writing registers."));
1204
1205 gdb_assert (may_write_registers != 0);
1206
1207 t = ops->beneath;
1208 t->to_store_registers (t, regcache, regno);
1209 }
1210
1211 /* The to_prepare_to_store method of target record-btrace. */
1212
1213 static void
1214 record_btrace_prepare_to_store (struct target_ops *ops,
1215 struct regcache *regcache)
1216 {
1217 struct target_ops *t;
1218
1219 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1220 return;
1221
1222 t = ops->beneath;
1223 t->to_prepare_to_store (t, regcache);
1224 }
1225
1226 /* The branch trace frame cache. */
1227
1228 struct btrace_frame_cache
1229 {
1230 /* The thread. */
1231 struct thread_info *tp;
1232
1233 /* The frame info. */
1234 struct frame_info *frame;
1235
1236 /* The branch trace function segment. */
1237 const struct btrace_function *bfun;
1238 };
1239
1240 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1241
1242 static htab_t bfcache;
1243
1244 /* hash_f for htab_create_alloc of bfcache. */
1245
1246 static hashval_t
1247 bfcache_hash (const void *arg)
1248 {
1249 const struct btrace_frame_cache *cache = arg;
1250
1251 return htab_hash_pointer (cache->frame);
1252 }
1253
1254 /* eq_f for htab_create_alloc of bfcache. */
1255
1256 static int
1257 bfcache_eq (const void *arg1, const void *arg2)
1258 {
1259 const struct btrace_frame_cache *cache1 = arg1;
1260 const struct btrace_frame_cache *cache2 = arg2;
1261
1262 return cache1->frame == cache2->frame;
1263 }
1264
1265 /* Create a new btrace frame cache. */
1266
1267 static struct btrace_frame_cache *
1268 bfcache_new (struct frame_info *frame)
1269 {
1270 struct btrace_frame_cache *cache;
1271 void **slot;
1272
1273 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1274 cache->frame = frame;
1275
1276 slot = htab_find_slot (bfcache, cache, INSERT);
1277 gdb_assert (*slot == NULL);
1278 *slot = cache;
1279
1280 return cache;
1281 }
1282
1283 /* Extract the branch trace function from a branch trace frame. */
1284
1285 static const struct btrace_function *
1286 btrace_get_frame_function (struct frame_info *frame)
1287 {
1288 const struct btrace_frame_cache *cache;
1289 const struct btrace_function *bfun;
1290 struct btrace_frame_cache pattern;
1291 void **slot;
1292
1293 pattern.frame = frame;
1294
1295 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1296 if (slot == NULL)
1297 return NULL;
1298
1299 cache = *slot;
1300 return cache->bfun;
1301 }
1302
1303 /* Implement stop_reason method for record_btrace_frame_unwind. */
1304
1305 static enum unwind_stop_reason
1306 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1307 void **this_cache)
1308 {
1309 const struct btrace_frame_cache *cache;
1310 const struct btrace_function *bfun;
1311
1312 cache = *this_cache;
1313 bfun = cache->bfun;
1314 gdb_assert (bfun != NULL);
1315
1316 if (bfun->up == NULL)
1317 return UNWIND_UNAVAILABLE;
1318
1319 return UNWIND_NO_REASON;
1320 }
1321
1322 /* Implement this_id method for record_btrace_frame_unwind. */
1323
1324 static void
1325 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1326 struct frame_id *this_id)
1327 {
1328 const struct btrace_frame_cache *cache;
1329 const struct btrace_function *bfun;
1330 CORE_ADDR code, special;
1331
1332 cache = *this_cache;
1333
1334 bfun = cache->bfun;
1335 gdb_assert (bfun != NULL);
1336
1337 while (bfun->segment.prev != NULL)
1338 bfun = bfun->segment.prev;
1339
1340 code = get_frame_func (this_frame);
1341 special = bfun->number;
1342
1343 *this_id = frame_id_build_unavailable_stack_special (code, special);
1344
1345 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1346 btrace_get_bfun_name (cache->bfun),
1347 core_addr_to_string_nz (this_id->code_addr),
1348 core_addr_to_string_nz (this_id->special_addr));
1349 }
1350
1351 /* Implement prev_register method for record_btrace_frame_unwind. */
1352
1353 static struct value *
1354 record_btrace_frame_prev_register (struct frame_info *this_frame,
1355 void **this_cache,
1356 int regnum)
1357 {
1358 const struct btrace_frame_cache *cache;
1359 const struct btrace_function *bfun, *caller;
1360 const struct btrace_insn *insn;
1361 struct gdbarch *gdbarch;
1362 CORE_ADDR pc;
1363 int pcreg;
1364
1365 gdbarch = get_frame_arch (this_frame);
1366 pcreg = gdbarch_pc_regnum (gdbarch);
1367 if (pcreg < 0 || regnum != pcreg)
1368 throw_error (NOT_AVAILABLE_ERROR,
1369 _("Registers are not available in btrace record history"));
1370
1371 cache = *this_cache;
1372 bfun = cache->bfun;
1373 gdb_assert (bfun != NULL);
1374
1375 caller = bfun->up;
1376 if (caller == NULL)
1377 throw_error (NOT_AVAILABLE_ERROR,
1378 _("No caller in btrace record history"));
1379
1380 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1381 {
1382 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1383 pc = insn->pc;
1384 }
1385 else
1386 {
1387 insn = VEC_last (btrace_insn_s, caller->insn);
1388 pc = insn->pc;
1389
1390 pc += gdb_insn_length (gdbarch, pc);
1391 }
1392
1393 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1394 btrace_get_bfun_name (bfun), bfun->level,
1395 core_addr_to_string_nz (pc));
1396
1397 return frame_unwind_got_address (this_frame, regnum, pc);
1398 }
1399
1400 /* Implement sniffer method for record_btrace_frame_unwind. */
1401
1402 static int
1403 record_btrace_frame_sniffer (const struct frame_unwind *self,
1404 struct frame_info *this_frame,
1405 void **this_cache)
1406 {
1407 const struct btrace_function *bfun;
1408 struct btrace_frame_cache *cache;
1409 struct thread_info *tp;
1410 struct frame_info *next;
1411
1412 /* THIS_FRAME does not contain a reference to its thread. */
1413 tp = find_thread_ptid (inferior_ptid);
1414 gdb_assert (tp != NULL);
1415
1416 bfun = NULL;
1417 next = get_next_frame (this_frame);
1418 if (next == NULL)
1419 {
1420 const struct btrace_insn_iterator *replay;
1421
1422 replay = tp->btrace.replay;
1423 if (replay != NULL)
1424 bfun = replay->function;
1425 }
1426 else
1427 {
1428 const struct btrace_function *callee;
1429
1430 callee = btrace_get_frame_function (next);
1431 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1432 bfun = callee->up;
1433 }
1434
1435 if (bfun == NULL)
1436 return 0;
1437
1438 DEBUG ("[frame] sniffed frame for %s on level %d",
1439 btrace_get_bfun_name (bfun), bfun->level);
1440
1441 /* This is our frame. Initialize the frame cache. */
1442 cache = bfcache_new (this_frame);
1443 cache->tp = tp;
1444 cache->bfun = bfun;
1445
1446 *this_cache = cache;
1447 return 1;
1448 }
1449
1450 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1451
1452 static int
1453 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1454 struct frame_info *this_frame,
1455 void **this_cache)
1456 {
1457 const struct btrace_function *bfun, *callee;
1458 struct btrace_frame_cache *cache;
1459 struct frame_info *next;
1460
1461 next = get_next_frame (this_frame);
1462 if (next == NULL)
1463 return 0;
1464
1465 callee = btrace_get_frame_function (next);
1466 if (callee == NULL)
1467 return 0;
1468
1469 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1470 return 0;
1471
1472 bfun = callee->up;
1473 if (bfun == NULL)
1474 return 0;
1475
1476 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1477 btrace_get_bfun_name (bfun), bfun->level);
1478
1479 /* This is our frame. Initialize the frame cache. */
1480 cache = bfcache_new (this_frame);
1481 cache->tp = find_thread_ptid (inferior_ptid);
1482 cache->bfun = bfun;
1483
1484 *this_cache = cache;
1485 return 1;
1486 }
1487
1488 static void
1489 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1490 {
1491 struct btrace_frame_cache *cache;
1492 void **slot;
1493
1494 cache = this_cache;
1495
1496 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1497 gdb_assert (slot != NULL);
1498
1499 htab_remove_elt (bfcache, cache);
1500 }
1501
1502 /* btrace recording does not store previous memory content, neither the stack
1503 frames content. Any unwinding would return errorneous results as the stack
1504 contents no longer matches the changed PC value restored from history.
1505 Therefore this unwinder reports any possibly unwound registers as
1506 <unavailable>. */
1507
1508 const struct frame_unwind record_btrace_frame_unwind =
1509 {
1510 NORMAL_FRAME,
1511 record_btrace_frame_unwind_stop_reason,
1512 record_btrace_frame_this_id,
1513 record_btrace_frame_prev_register,
1514 NULL,
1515 record_btrace_frame_sniffer,
1516 record_btrace_frame_dealloc_cache
1517 };
1518
1519 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1520 {
1521 TAILCALL_FRAME,
1522 record_btrace_frame_unwind_stop_reason,
1523 record_btrace_frame_this_id,
1524 record_btrace_frame_prev_register,
1525 NULL,
1526 record_btrace_tailcall_frame_sniffer,
1527 record_btrace_frame_dealloc_cache
1528 };
1529
1530 /* Implement the to_get_unwinder method. */
1531
1532 static const struct frame_unwind *
1533 record_btrace_to_get_unwinder (struct target_ops *self)
1534 {
1535 return &record_btrace_frame_unwind;
1536 }
1537
1538 /* Implement the to_get_tailcall_unwinder method. */
1539
1540 static const struct frame_unwind *
1541 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1542 {
1543 return &record_btrace_tailcall_frame_unwind;
1544 }
1545
1546 /* Indicate that TP should be resumed according to FLAG. */
1547
1548 static void
1549 record_btrace_resume_thread (struct thread_info *tp,
1550 enum btrace_thread_flag flag)
1551 {
1552 struct btrace_thread_info *btinfo;
1553
1554 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1555
1556 btinfo = &tp->btrace;
1557
1558 if ((btinfo->flags & BTHR_MOVE) != 0)
1559 error (_("Thread already moving."));
1560
1561 /* Fetch the latest branch trace. */
1562 btrace_fetch (tp);
1563
1564 btinfo->flags |= flag;
1565 }
1566
1567 /* Find the thread to resume given a PTID. */
1568
1569 static struct thread_info *
1570 record_btrace_find_resume_thread (ptid_t ptid)
1571 {
1572 struct thread_info *tp;
1573
1574 /* When asked to resume everything, we pick the current thread. */
1575 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1576 ptid = inferior_ptid;
1577
1578 return find_thread_ptid (ptid);
1579 }
1580
1581 /* Start replaying a thread. */
1582
1583 static struct btrace_insn_iterator *
1584 record_btrace_start_replaying (struct thread_info *tp)
1585 {
1586 volatile struct gdb_exception except;
1587 struct btrace_insn_iterator *replay;
1588 struct btrace_thread_info *btinfo;
1589 int executing;
1590
1591 btinfo = &tp->btrace;
1592 replay = NULL;
1593
1594 /* We can't start replaying without trace. */
1595 if (btinfo->begin == NULL)
1596 return NULL;
1597
1598 /* Clear the executing flag to allow changes to the current frame.
1599 We are not actually running, yet. We just started a reverse execution
1600 command or a record goto command.
1601 For the latter, EXECUTING is false and this has no effect.
1602 For the former, EXECUTING is true and we're in to_wait, about to
1603 move the thread. Since we need to recompute the stack, we temporarily
1604 set EXECUTING to flase. */
1605 executing = is_executing (tp->ptid);
1606 set_executing (tp->ptid, 0);
1607
1608 /* GDB stores the current frame_id when stepping in order to detects steps
1609 into subroutines.
1610 Since frames are computed differently when we're replaying, we need to
1611 recompute those stored frames and fix them up so we can still detect
1612 subroutines after we started replaying. */
1613 TRY_CATCH (except, RETURN_MASK_ALL)
1614 {
1615 struct frame_info *frame;
1616 struct frame_id frame_id;
1617 int upd_step_frame_id, upd_step_stack_frame_id;
1618
1619 /* The current frame without replaying - computed via normal unwind. */
1620 frame = get_current_frame ();
1621 frame_id = get_frame_id (frame);
1622
1623 /* Check if we need to update any stepping-related frame id's. */
1624 upd_step_frame_id = frame_id_eq (frame_id,
1625 tp->control.step_frame_id);
1626 upd_step_stack_frame_id = frame_id_eq (frame_id,
1627 tp->control.step_stack_frame_id);
1628
1629 /* We start replaying at the end of the branch trace. This corresponds
1630 to the current instruction. */
1631 replay = xmalloc (sizeof (*replay));
1632 btrace_insn_end (replay, btinfo);
1633
1634 /* Skip gaps at the end of the trace. */
1635 while (btrace_insn_get (replay) == NULL)
1636 {
1637 unsigned int steps;
1638
1639 steps = btrace_insn_prev (replay, 1);
1640 if (steps == 0)
1641 error (_("No trace."));
1642 }
1643
1644 /* We're not replaying, yet. */
1645 gdb_assert (btinfo->replay == NULL);
1646 btinfo->replay = replay;
1647
1648 /* Make sure we're not using any stale registers. */
1649 registers_changed_ptid (tp->ptid);
1650
1651 /* The current frame with replaying - computed via btrace unwind. */
1652 frame = get_current_frame ();
1653 frame_id = get_frame_id (frame);
1654
1655 /* Replace stepping related frames where necessary. */
1656 if (upd_step_frame_id)
1657 tp->control.step_frame_id = frame_id;
1658 if (upd_step_stack_frame_id)
1659 tp->control.step_stack_frame_id = frame_id;
1660 }
1661
1662 /* Restore the previous execution state. */
1663 set_executing (tp->ptid, executing);
1664
1665 if (except.reason < 0)
1666 {
1667 xfree (btinfo->replay);
1668 btinfo->replay = NULL;
1669
1670 registers_changed_ptid (tp->ptid);
1671
1672 throw_exception (except);
1673 }
1674
1675 return replay;
1676 }
1677
1678 /* Stop replaying a thread. */
1679
1680 static void
1681 record_btrace_stop_replaying (struct thread_info *tp)
1682 {
1683 struct btrace_thread_info *btinfo;
1684
1685 btinfo = &tp->btrace;
1686
1687 xfree (btinfo->replay);
1688 btinfo->replay = NULL;
1689
1690 /* Make sure we're not leaving any stale registers. */
1691 registers_changed_ptid (tp->ptid);
1692 }
1693
1694 /* The to_resume method of target record-btrace. */
1695
1696 static void
1697 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1698 enum gdb_signal signal)
1699 {
1700 struct thread_info *tp, *other;
1701 enum btrace_thread_flag flag;
1702
1703 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1704
1705 /* Store the execution direction of the last resume. */
1706 record_btrace_resume_exec_dir = execution_direction;
1707
1708 tp = record_btrace_find_resume_thread (ptid);
1709 if (tp == NULL)
1710 error (_("Cannot find thread to resume."));
1711
1712 /* Stop replaying other threads if the thread to resume is not replaying. */
1713 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1714 ALL_NON_EXITED_THREADS (other)
1715 record_btrace_stop_replaying (other);
1716
1717 /* As long as we're not replaying, just forward the request. */
1718 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1719 {
1720 ops = ops->beneath;
1721 return ops->to_resume (ops, ptid, step, signal);
1722 }
1723
1724 /* Compute the btrace thread flag for the requested move. */
1725 if (step == 0)
1726 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1727 else
1728 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1729
1730 /* At the moment, we only move a single thread. We could also move
1731 all threads in parallel by single-stepping each resumed thread
1732 until the first runs into an event.
1733 When we do that, we would want to continue all other threads.
1734 For now, just resume one thread to not confuse to_wait. */
1735 record_btrace_resume_thread (tp, flag);
1736
1737 /* We just indicate the resume intent here. The actual stepping happens in
1738 record_btrace_wait below. */
1739
1740 /* Async support. */
1741 if (target_can_async_p ())
1742 {
1743 target_async (inferior_event_handler, 0);
1744 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1745 }
1746 }
1747
1748 /* Find a thread to move. */
1749
1750 static struct thread_info *
1751 record_btrace_find_thread_to_move (ptid_t ptid)
1752 {
1753 struct thread_info *tp;
1754
1755 /* First check the parameter thread. */
1756 tp = find_thread_ptid (ptid);
1757 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1758 return tp;
1759
1760 /* Otherwise, find one other thread that has been resumed. */
1761 ALL_NON_EXITED_THREADS (tp)
1762 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1763 return tp;
1764
1765 return NULL;
1766 }
1767
1768 /* Return a target_waitstatus indicating that we ran out of history. */
1769
1770 static struct target_waitstatus
1771 btrace_step_no_history (void)
1772 {
1773 struct target_waitstatus status;
1774
1775 status.kind = TARGET_WAITKIND_NO_HISTORY;
1776
1777 return status;
1778 }
1779
1780 /* Return a target_waitstatus indicating that a step finished. */
1781
1782 static struct target_waitstatus
1783 btrace_step_stopped (void)
1784 {
1785 struct target_waitstatus status;
1786
1787 status.kind = TARGET_WAITKIND_STOPPED;
1788 status.value.sig = GDB_SIGNAL_TRAP;
1789
1790 return status;
1791 }
1792
1793 /* Clear the record histories. */
1794
1795 static void
1796 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1797 {
1798 xfree (btinfo->insn_history);
1799 xfree (btinfo->call_history);
1800
1801 btinfo->insn_history = NULL;
1802 btinfo->call_history = NULL;
1803 }
1804
1805 /* Step a single thread. */
1806
1807 static struct target_waitstatus
1808 record_btrace_step_thread (struct thread_info *tp)
1809 {
1810 struct btrace_insn_iterator *replay, end;
1811 struct btrace_thread_info *btinfo;
1812 struct address_space *aspace;
1813 struct inferior *inf;
1814 enum btrace_thread_flag flags;
1815 unsigned int steps;
1816
1817 /* We can't step without an execution history. */
1818 if (btrace_is_empty (tp))
1819 return btrace_step_no_history ();
1820
1821 btinfo = &tp->btrace;
1822 replay = btinfo->replay;
1823
1824 flags = btinfo->flags & BTHR_MOVE;
1825 btinfo->flags &= ~BTHR_MOVE;
1826
1827 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1828
1829 switch (flags)
1830 {
1831 default:
1832 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1833
1834 case BTHR_STEP:
1835 /* We're done if we're not replaying. */
1836 if (replay == NULL)
1837 return btrace_step_no_history ();
1838
1839 /* Skip gaps during replay. */
1840 do
1841 {
1842 steps = btrace_insn_next (replay, 1);
1843 if (steps == 0)
1844 {
1845 record_btrace_stop_replaying (tp);
1846 return btrace_step_no_history ();
1847 }
1848 }
1849 while (btrace_insn_get (replay) == NULL);
1850
1851 /* Determine the end of the instruction trace. */
1852 btrace_insn_end (&end, btinfo);
1853
1854 /* We stop replaying if we reached the end of the trace. */
1855 if (btrace_insn_cmp (replay, &end) == 0)
1856 record_btrace_stop_replaying (tp);
1857
1858 return btrace_step_stopped ();
1859
1860 case BTHR_RSTEP:
1861 /* Start replaying if we're not already doing so. */
1862 if (replay == NULL)
1863 replay = record_btrace_start_replaying (tp);
1864
1865 /* If we can't step any further, we reached the end of the history.
1866 Skip gaps during replay. */
1867 do
1868 {
1869 steps = btrace_insn_prev (replay, 1);
1870 if (steps == 0)
1871 return btrace_step_no_history ();
1872
1873 }
1874 while (btrace_insn_get (replay) == NULL);
1875
1876 return btrace_step_stopped ();
1877
1878 case BTHR_CONT:
1879 /* We're done if we're not replaying. */
1880 if (replay == NULL)
1881 return btrace_step_no_history ();
1882
1883 inf = find_inferior_ptid (tp->ptid);
1884 aspace = inf->aspace;
1885
1886 /* Determine the end of the instruction trace. */
1887 btrace_insn_end (&end, btinfo);
1888
1889 for (;;)
1890 {
1891 const struct btrace_insn *insn;
1892
1893 /* Skip gaps during replay. */
1894 do
1895 {
1896 steps = btrace_insn_next (replay, 1);
1897 if (steps == 0)
1898 {
1899 record_btrace_stop_replaying (tp);
1900 return btrace_step_no_history ();
1901 }
1902
1903 insn = btrace_insn_get (replay);
1904 }
1905 while (insn == NULL);
1906
1907 /* We stop replaying if we reached the end of the trace. */
1908 if (btrace_insn_cmp (replay, &end) == 0)
1909 {
1910 record_btrace_stop_replaying (tp);
1911 return btrace_step_no_history ();
1912 }
1913
1914 DEBUG ("stepping %d (%s) ... %s", tp->num,
1915 target_pid_to_str (tp->ptid),
1916 core_addr_to_string_nz (insn->pc));
1917
1918 if (breakpoint_here_p (aspace, insn->pc))
1919 return btrace_step_stopped ();
1920 }
1921
1922 case BTHR_RCONT:
1923 /* Start replaying if we're not already doing so. */
1924 if (replay == NULL)
1925 replay = record_btrace_start_replaying (tp);
1926
1927 inf = find_inferior_ptid (tp->ptid);
1928 aspace = inf->aspace;
1929
1930 for (;;)
1931 {
1932 const struct btrace_insn *insn;
1933
1934 /* If we can't step any further, we reached the end of the history.
1935 Skip gaps during replay. */
1936 do
1937 {
1938 steps = btrace_insn_prev (replay, 1);
1939 if (steps == 0)
1940 return btrace_step_no_history ();
1941
1942 insn = btrace_insn_get (replay);
1943 }
1944 while (insn == NULL);
1945
1946 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1947 target_pid_to_str (tp->ptid),
1948 core_addr_to_string_nz (insn->pc));
1949
1950 if (breakpoint_here_p (aspace, insn->pc))
1951 return btrace_step_stopped ();
1952 }
1953 }
1954 }
1955
1956 /* The to_wait method of target record-btrace. */
1957
1958 static ptid_t
1959 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1960 struct target_waitstatus *status, int options)
1961 {
1962 struct thread_info *tp, *other;
1963
1964 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1965
1966 /* As long as we're not replaying, just forward the request. */
1967 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1968 {
1969 ops = ops->beneath;
1970 return ops->to_wait (ops, ptid, status, options);
1971 }
1972
1973 /* Let's find a thread to move. */
1974 tp = record_btrace_find_thread_to_move (ptid);
1975 if (tp == NULL)
1976 {
1977 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1978
1979 status->kind = TARGET_WAITKIND_IGNORE;
1980 return minus_one_ptid;
1981 }
1982
1983 /* We only move a single thread. We're not able to correlate threads. */
1984 *status = record_btrace_step_thread (tp);
1985
1986 /* Stop all other threads. */
1987 if (!non_stop)
1988 ALL_NON_EXITED_THREADS (other)
1989 other->btrace.flags &= ~BTHR_MOVE;
1990
1991 /* Start record histories anew from the current position. */
1992 record_btrace_clear_histories (&tp->btrace);
1993
1994 /* We moved the replay position but did not update registers. */
1995 registers_changed_ptid (tp->ptid);
1996
1997 return tp->ptid;
1998 }
1999
2000 /* The to_can_execute_reverse method of target record-btrace. */
2001
2002 static int
2003 record_btrace_can_execute_reverse (struct target_ops *self)
2004 {
2005 return 1;
2006 }
2007
2008 /* The to_decr_pc_after_break method of target record-btrace. */
2009
2010 static CORE_ADDR
2011 record_btrace_decr_pc_after_break (struct target_ops *ops,
2012 struct gdbarch *gdbarch)
2013 {
2014 /* When replaying, we do not actually execute the breakpoint instruction
2015 so there is no need to adjust the PC after hitting a breakpoint. */
2016 if (record_btrace_is_replaying (ops))
2017 return 0;
2018
2019 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
2020 }
2021
2022 /* The to_update_thread_list method of target record-btrace. */
2023
2024 static void
2025 record_btrace_update_thread_list (struct target_ops *ops)
2026 {
2027 /* We don't add or remove threads during replay. */
2028 if (record_btrace_is_replaying (ops))
2029 return;
2030
2031 /* Forward the request. */
2032 ops = ops->beneath;
2033 ops->to_update_thread_list (ops);
2034 }
2035
2036 /* The to_thread_alive method of target record-btrace. */
2037
2038 static int
2039 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2040 {
2041 /* We don't add or remove threads during replay. */
2042 if (record_btrace_is_replaying (ops))
2043 return find_thread_ptid (ptid) != NULL;
2044
2045 /* Forward the request. */
2046 ops = ops->beneath;
2047 return ops->to_thread_alive (ops, ptid);
2048 }
2049
2050 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2051 is stopped. */
2052
2053 static void
2054 record_btrace_set_replay (struct thread_info *tp,
2055 const struct btrace_insn_iterator *it)
2056 {
2057 struct btrace_thread_info *btinfo;
2058
2059 btinfo = &tp->btrace;
2060
2061 if (it == NULL || it->function == NULL)
2062 record_btrace_stop_replaying (tp);
2063 else
2064 {
2065 if (btinfo->replay == NULL)
2066 record_btrace_start_replaying (tp);
2067 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2068 return;
2069
2070 *btinfo->replay = *it;
2071 registers_changed_ptid (tp->ptid);
2072 }
2073
2074 /* Start anew from the new replay position. */
2075 record_btrace_clear_histories (btinfo);
2076 }
2077
2078 /* The to_goto_record_begin method of target record-btrace. */
2079
2080 static void
2081 record_btrace_goto_begin (struct target_ops *self)
2082 {
2083 struct thread_info *tp;
2084 struct btrace_insn_iterator begin;
2085
2086 tp = require_btrace_thread ();
2087
2088 btrace_insn_begin (&begin, &tp->btrace);
2089 record_btrace_set_replay (tp, &begin);
2090
2091 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2092 }
2093
2094 /* The to_goto_record_end method of target record-btrace. */
2095
2096 static void
2097 record_btrace_goto_end (struct target_ops *ops)
2098 {
2099 struct thread_info *tp;
2100
2101 tp = require_btrace_thread ();
2102
2103 record_btrace_set_replay (tp, NULL);
2104
2105 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2106 }
2107
2108 /* The to_goto_record method of target record-btrace. */
2109
2110 static void
2111 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2112 {
2113 struct thread_info *tp;
2114 struct btrace_insn_iterator it;
2115 unsigned int number;
2116 int found;
2117
2118 number = insn;
2119
2120 /* Check for wrap-arounds. */
2121 if (number != insn)
2122 error (_("Instruction number out of range."));
2123
2124 tp = require_btrace_thread ();
2125
2126 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2127 if (found == 0)
2128 error (_("No such instruction."));
2129
2130 record_btrace_set_replay (tp, &it);
2131
2132 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2133 }
2134
2135 /* The to_execution_direction target method. */
2136
2137 static enum exec_direction_kind
2138 record_btrace_execution_direction (struct target_ops *self)
2139 {
2140 return record_btrace_resume_exec_dir;
2141 }
2142
2143 /* The to_prepare_to_generate_core target method. */
2144
2145 static void
2146 record_btrace_prepare_to_generate_core (struct target_ops *self)
2147 {
2148 record_btrace_generating_corefile = 1;
2149 }
2150
2151 /* The to_done_generating_core target method. */
2152
2153 static void
2154 record_btrace_done_generating_core (struct target_ops *self)
2155 {
2156 record_btrace_generating_corefile = 0;
2157 }
2158
2159 /* Initialize the record-btrace target ops. */
2160
2161 static void
2162 init_record_btrace_ops (void)
2163 {
2164 struct target_ops *ops;
2165
2166 ops = &record_btrace_ops;
2167 ops->to_shortname = "record-btrace";
2168 ops->to_longname = "Branch tracing target";
2169 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2170 ops->to_open = record_btrace_open;
2171 ops->to_close = record_btrace_close;
2172 ops->to_async = record_btrace_async;
2173 ops->to_detach = record_detach;
2174 ops->to_disconnect = record_disconnect;
2175 ops->to_mourn_inferior = record_mourn_inferior;
2176 ops->to_kill = record_kill;
2177 ops->to_stop_recording = record_btrace_stop_recording;
2178 ops->to_info_record = record_btrace_info;
2179 ops->to_insn_history = record_btrace_insn_history;
2180 ops->to_insn_history_from = record_btrace_insn_history_from;
2181 ops->to_insn_history_range = record_btrace_insn_history_range;
2182 ops->to_call_history = record_btrace_call_history;
2183 ops->to_call_history_from = record_btrace_call_history_from;
2184 ops->to_call_history_range = record_btrace_call_history_range;
2185 ops->to_record_is_replaying = record_btrace_is_replaying;
2186 ops->to_xfer_partial = record_btrace_xfer_partial;
2187 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2188 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2189 ops->to_fetch_registers = record_btrace_fetch_registers;
2190 ops->to_store_registers = record_btrace_store_registers;
2191 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2192 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2193 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2194 ops->to_resume = record_btrace_resume;
2195 ops->to_wait = record_btrace_wait;
2196 ops->to_update_thread_list = record_btrace_update_thread_list;
2197 ops->to_thread_alive = record_btrace_thread_alive;
2198 ops->to_goto_record_begin = record_btrace_goto_begin;
2199 ops->to_goto_record_end = record_btrace_goto_end;
2200 ops->to_goto_record = record_btrace_goto;
2201 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2202 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
2203 ops->to_execution_direction = record_btrace_execution_direction;
2204 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2205 ops->to_done_generating_core = record_btrace_done_generating_core;
2206 ops->to_stratum = record_stratum;
2207 ops->to_magic = OPS_MAGIC;
2208 }
2209
2210 /* Start recording in BTS format. */
2211
2212 static void
2213 cmd_record_btrace_bts_start (char *args, int from_tty)
2214 {
2215 volatile struct gdb_exception exception;
2216
2217 if (args != NULL && *args != 0)
2218 error (_("Invalid argument."));
2219
2220 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2221
2222 TRY_CATCH (exception, RETURN_MASK_ALL)
2223 execute_command ("target record-btrace", from_tty);
2224
2225 if (exception.error != 0)
2226 {
2227 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2228 throw_exception (exception);
2229 }
2230 }
2231
2232 /* Alias for "target record". */
2233
2234 static void
2235 cmd_record_btrace_start (char *args, int from_tty)
2236 {
2237 volatile struct gdb_exception exception;
2238
2239 if (args != NULL && *args != 0)
2240 error (_("Invalid argument."));
2241
2242 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2243
2244 TRY_CATCH (exception, RETURN_MASK_ALL)
2245 execute_command ("target record-btrace", from_tty);
2246
2247 if (exception.error == 0)
2248 return;
2249
2250 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2251 throw_exception (exception);
2252 }
2253
2254 /* The "set record btrace" command. */
2255
2256 static void
2257 cmd_set_record_btrace (char *args, int from_tty)
2258 {
2259 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2260 }
2261
2262 /* The "show record btrace" command. */
2263
2264 static void
2265 cmd_show_record_btrace (char *args, int from_tty)
2266 {
2267 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2268 }
2269
2270 /* The "show record btrace replay-memory-access" command. */
2271
2272 static void
2273 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2274 struct cmd_list_element *c, const char *value)
2275 {
2276 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2277 replay_memory_access);
2278 }
2279
2280 /* The "set record btrace bts" command. */
2281
2282 static void
2283 cmd_set_record_btrace_bts (char *args, int from_tty)
2284 {
2285 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2286 "by an apporpriate subcommand.\n"));
2287 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2288 all_commands, gdb_stdout);
2289 }
2290
2291 /* The "show record btrace bts" command. */
2292
2293 static void
2294 cmd_show_record_btrace_bts (char *args, int from_tty)
2295 {
2296 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2297 }
2298
2299 void _initialize_record_btrace (void);
2300
2301 /* Initialize btrace commands. */
2302
2303 void
2304 _initialize_record_btrace (void)
2305 {
2306 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2307 _("Start branch trace recording."), &record_btrace_cmdlist,
2308 "record btrace ", 0, &record_cmdlist);
2309 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2310
2311 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2312 _("\
2313 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2314 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2315 This format may not be available on all processors."),
2316 &record_btrace_cmdlist);
2317 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2318
2319 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2320 _("Set record options"), &set_record_btrace_cmdlist,
2321 "set record btrace ", 0, &set_record_cmdlist);
2322
2323 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2324 _("Show record options"), &show_record_btrace_cmdlist,
2325 "show record btrace ", 0, &show_record_cmdlist);
2326
2327 add_setshow_enum_cmd ("replay-memory-access", no_class,
2328 replay_memory_access_types, &replay_memory_access, _("\
2329 Set what memory accesses are allowed during replay."), _("\
2330 Show what memory accesses are allowed during replay."),
2331 _("Default is READ-ONLY.\n\n\
2332 The btrace record target does not trace data.\n\
2333 The memory therefore corresponds to the live target and not \
2334 to the current replay position.\n\n\
2335 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2336 When READ-WRITE, allow accesses to read-only and read-write memory during \
2337 replay."),
2338 NULL, cmd_show_replay_memory_access,
2339 &set_record_btrace_cmdlist,
2340 &show_record_btrace_cmdlist);
2341
2342 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2343 _("Set record btrace bts options"),
2344 &set_record_btrace_bts_cmdlist,
2345 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2346
2347 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2348 _("Show record btrace bts options"),
2349 &show_record_btrace_bts_cmdlist,
2350 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2351
2352 add_setshow_uinteger_cmd ("buffer-size", no_class,
2353 &record_btrace_conf.bts.size,
2354 _("Set the record/replay bts buffer size."),
2355 _("Show the record/replay bts buffer size."), _("\
2356 When starting recording request a trace buffer of this size. \
2357 The actual buffer size may differ from the requested size. \
2358 Use \"info record\" to see the actual buffer size.\n\n\
2359 Bigger buffers allow longer recording but also take more time to process \
2360 the recorded execution trace.\n\n\
2361 The trace buffer size may not be changed while recording."), NULL, NULL,
2362 &set_record_btrace_bts_cmdlist,
2363 &show_record_btrace_bts_cmdlist);
2364
2365 init_record_btrace_ops ();
2366 add_target (&record_btrace_ops);
2367
2368 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2369 xcalloc, xfree);
2370
2371 record_btrace_conf.bts.size = 64 * 1024;
2372 }
This page took 0.085862 seconds and 4 git commands to generate.