record btrace: add configuration struct
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
afedecd3
MM
79/* Print a record-btrace debug message. Use do ... while (0) to avoid
80 ambiguities when used in if statements. */
81
82#define DEBUG(msg, args...) \
83 do \
84 { \
85 if (record_debug != 0) \
86 fprintf_unfiltered (gdb_stdlog, \
87 "[record-btrace] " msg "\n", ##args); \
88 } \
89 while (0)
90
91
92/* Update the branch trace for the current thread and return a pointer to its
066ce621 93 thread_info.
afedecd3
MM
94
95 Throws an error if there is no thread or no trace. This function never
96 returns NULL. */
97
066ce621
MM
98static struct thread_info *
99require_btrace_thread (void)
afedecd3
MM
100{
101 struct thread_info *tp;
afedecd3
MM
102
103 DEBUG ("require");
104
105 tp = find_thread_ptid (inferior_ptid);
106 if (tp == NULL)
107 error (_("No thread."));
108
109 btrace_fetch (tp);
110
6e07b1d2 111 if (btrace_is_empty (tp))
afedecd3
MM
112 error (_("No trace."));
113
066ce621
MM
114 return tp;
115}
116
117/* Update the branch trace for the current thread and return a pointer to its
118 branch trace information struct.
119
120 Throws an error if there is no thread or no trace. This function never
121 returns NULL. */
122
123static struct btrace_thread_info *
124require_btrace (void)
125{
126 struct thread_info *tp;
127
128 tp = require_btrace_thread ();
129
130 return &tp->btrace;
afedecd3
MM
131}
132
133/* Enable branch tracing for one thread. Warn on errors. */
134
135static void
136record_btrace_enable_warn (struct thread_info *tp)
137{
138 volatile struct gdb_exception error;
139
140 TRY_CATCH (error, RETURN_MASK_ERROR)
f4abbc16 141 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
142
143 if (error.message != NULL)
144 warning ("%s", error.message);
145}
146
147/* Callback function to disable branch tracing for one thread. */
148
149static void
150record_btrace_disable_callback (void *arg)
151{
152 struct thread_info *tp;
153
154 tp = arg;
155
156 btrace_disable (tp);
157}
158
159/* Enable automatic tracing of new threads. */
160
161static void
162record_btrace_auto_enable (void)
163{
164 DEBUG ("attach thread observer");
165
166 record_btrace_thread_observer
167 = observer_attach_new_thread (record_btrace_enable_warn);
168}
169
170/* Disable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_disable (void)
174{
175 /* The observer may have been detached, already. */
176 if (record_btrace_thread_observer == NULL)
177 return;
178
179 DEBUG ("detach thread observer");
180
181 observer_detach_new_thread (record_btrace_thread_observer);
182 record_btrace_thread_observer = NULL;
183}
184
70ad5bff
MM
185/* The record-btrace async event handler function. */
186
187static void
188record_btrace_handle_async_inferior_event (gdb_client_data data)
189{
190 inferior_event_handler (INF_REG_EVENT, NULL);
191}
192
afedecd3
MM
193/* The to_open method of target record-btrace. */
194
195static void
014f9477 196record_btrace_open (const char *args, int from_tty)
afedecd3
MM
197{
198 struct cleanup *disable_chain;
199 struct thread_info *tp;
200
201 DEBUG ("open");
202
8213266a 203 record_preopen ();
afedecd3
MM
204
205 if (!target_has_execution)
206 error (_("The program is not being run."));
207
52834460
MM
208 if (non_stop)
209 error (_("Record btrace can't debug inferior in non-stop mode."));
210
afedecd3
MM
211 gdb_assert (record_btrace_thread_observer == NULL);
212
213 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 214 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
215 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
216 {
f4abbc16 217 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
218
219 make_cleanup (record_btrace_disable_callback, tp);
220 }
221
222 record_btrace_auto_enable ();
223
224 push_target (&record_btrace_ops);
225
70ad5bff
MM
226 record_btrace_async_inferior_event_handler
227 = create_async_event_handler (record_btrace_handle_async_inferior_event,
228 NULL);
aef92902 229 record_btrace_generating_corefile = 0;
70ad5bff 230
afedecd3
MM
231 observer_notify_record_changed (current_inferior (), 1);
232
233 discard_cleanups (disable_chain);
234}
235
236/* The to_stop_recording method of target record-btrace. */
237
238static void
c6cd7c02 239record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
240{
241 struct thread_info *tp;
242
243 DEBUG ("stop recording");
244
245 record_btrace_auto_disable ();
246
034f788c 247 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
248 if (tp->btrace.target != NULL)
249 btrace_disable (tp);
250}
251
252/* The to_close method of target record-btrace. */
253
254static void
de90e03d 255record_btrace_close (struct target_ops *self)
afedecd3 256{
568e808b
MM
257 struct thread_info *tp;
258
70ad5bff
MM
259 if (record_btrace_async_inferior_event_handler != NULL)
260 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
261
99c819ee
MM
262 /* Make sure automatic recording gets disabled even if we did not stop
263 recording before closing the record-btrace target. */
264 record_btrace_auto_disable ();
265
568e808b
MM
266 /* We should have already stopped recording.
267 Tear down btrace in case we have not. */
034f788c 268 ALL_NON_EXITED_THREADS (tp)
568e808b 269 btrace_teardown (tp);
afedecd3
MM
270}
271
b7d2e916
PA
272/* The to_async method of target record-btrace. */
273
274static void
275record_btrace_async (struct target_ops *ops,
276 void (*callback) (enum inferior_event_type event_type,
277 void *context),
278 void *context)
279{
280 if (callback != NULL)
281 mark_async_event_handler (record_btrace_async_inferior_event_handler);
282 else
283 clear_async_event_handler (record_btrace_async_inferior_event_handler);
284
285 ops->beneath->to_async (ops->beneath, callback, context);
286}
287
afedecd3
MM
288/* The to_info_record method of target record-btrace. */
289
290static void
630d6a4a 291record_btrace_info (struct target_ops *self)
afedecd3
MM
292{
293 struct btrace_thread_info *btinfo;
f4abbc16 294 const struct btrace_config *conf;
afedecd3 295 struct thread_info *tp;
23a7fe75 296 unsigned int insns, calls;
afedecd3
MM
297
298 DEBUG ("info");
299
300 tp = find_thread_ptid (inferior_ptid);
301 if (tp == NULL)
302 error (_("No thread."));
303
f4abbc16
MM
304 btinfo = &tp->btrace;
305
306 conf = btrace_conf (btinfo);
307 if (conf != NULL)
308 printf_unfiltered (_("Recording format: %s.\n"),
309 btrace_format_string (conf->format));
310
afedecd3
MM
311 btrace_fetch (tp);
312
23a7fe75
MM
313 insns = 0;
314 calls = 0;
315
6e07b1d2 316 if (!btrace_is_empty (tp))
23a7fe75
MM
317 {
318 struct btrace_call_iterator call;
319 struct btrace_insn_iterator insn;
320
321 btrace_call_end (&call, btinfo);
322 btrace_call_prev (&call, 1);
5de9129b 323 calls = btrace_call_number (&call);
23a7fe75
MM
324
325 btrace_insn_end (&insn, btinfo);
326 btrace_insn_prev (&insn, 1);
5de9129b 327 insns = btrace_insn_number (&insn);
23a7fe75 328 }
afedecd3
MM
329
330 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 331 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 332 target_pid_to_str (tp->ptid));
07bbe694
MM
333
334 if (btrace_is_replaying (tp))
335 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
336 btrace_insn_number (btinfo->replay));
afedecd3
MM
337}
338
339/* Print an unsigned int. */
340
341static void
342ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
343{
344 ui_out_field_fmt (uiout, fld, "%u", val);
345}
346
347/* Disassemble a section of the recorded instruction trace. */
348
349static void
23a7fe75
MM
350btrace_insn_history (struct ui_out *uiout,
351 const struct btrace_insn_iterator *begin,
352 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
353{
354 struct gdbarch *gdbarch;
23a7fe75 355 struct btrace_insn_iterator it;
afedecd3 356
23a7fe75
MM
357 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
358 btrace_insn_number (end));
afedecd3
MM
359
360 gdbarch = target_gdbarch ();
361
23a7fe75 362 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 363 {
23a7fe75
MM
364 const struct btrace_insn *insn;
365
366 insn = btrace_insn_get (&it);
367
afedecd3 368 /* Print the instruction index. */
23a7fe75 369 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
370 ui_out_text (uiout, "\t");
371
372 /* Disassembly with '/m' flag may not produce the expected result.
373 See PR gdb/11833. */
23a7fe75 374 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
375 }
376}
377
378/* The to_insn_history method of target record-btrace. */
379
380static void
7a6c5609 381record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
382{
383 struct btrace_thread_info *btinfo;
23a7fe75
MM
384 struct btrace_insn_history *history;
385 struct btrace_insn_iterator begin, end;
afedecd3
MM
386 struct cleanup *uiout_cleanup;
387 struct ui_out *uiout;
23a7fe75 388 unsigned int context, covered;
afedecd3
MM
389
390 uiout = current_uiout;
391 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
392 "insn history");
afedecd3 393 context = abs (size);
afedecd3
MM
394 if (context == 0)
395 error (_("Bad record instruction-history-size."));
396
23a7fe75
MM
397 btinfo = require_btrace ();
398 history = btinfo->insn_history;
399 if (history == NULL)
afedecd3 400 {
07bbe694 401 struct btrace_insn_iterator *replay;
afedecd3 402
23a7fe75 403 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 404
07bbe694
MM
405 /* If we're replaying, we start at the replay position. Otherwise, we
406 start at the tail of the trace. */
407 replay = btinfo->replay;
408 if (replay != NULL)
409 begin = *replay;
410 else
411 btrace_insn_end (&begin, btinfo);
412
413 /* We start from here and expand in the requested direction. Then we
414 expand in the other direction, as well, to fill up any remaining
415 context. */
416 end = begin;
417 if (size < 0)
418 {
419 /* We want the current position covered, as well. */
420 covered = btrace_insn_next (&end, 1);
421 covered += btrace_insn_prev (&begin, context - covered);
422 covered += btrace_insn_next (&end, context - covered);
423 }
424 else
425 {
426 covered = btrace_insn_next (&end, context);
427 covered += btrace_insn_prev (&begin, context - covered);
428 }
afedecd3
MM
429 }
430 else
431 {
23a7fe75
MM
432 begin = history->begin;
433 end = history->end;
afedecd3 434
23a7fe75
MM
435 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
436 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 437
23a7fe75
MM
438 if (size < 0)
439 {
440 end = begin;
441 covered = btrace_insn_prev (&begin, context);
442 }
443 else
444 {
445 begin = end;
446 covered = btrace_insn_next (&end, context);
447 }
afedecd3
MM
448 }
449
23a7fe75
MM
450 if (covered > 0)
451 btrace_insn_history (uiout, &begin, &end, flags);
452 else
453 {
454 if (size < 0)
455 printf_unfiltered (_("At the start of the branch trace record.\n"));
456 else
457 printf_unfiltered (_("At the end of the branch trace record.\n"));
458 }
afedecd3 459
23a7fe75 460 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
461 do_cleanups (uiout_cleanup);
462}
463
464/* The to_insn_history_range method of target record-btrace. */
465
466static void
4e99c6b7
TT
467record_btrace_insn_history_range (struct target_ops *self,
468 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
469{
470 struct btrace_thread_info *btinfo;
23a7fe75
MM
471 struct btrace_insn_history *history;
472 struct btrace_insn_iterator begin, end;
afedecd3
MM
473 struct cleanup *uiout_cleanup;
474 struct ui_out *uiout;
23a7fe75
MM
475 unsigned int low, high;
476 int found;
afedecd3
MM
477
478 uiout = current_uiout;
479 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
480 "insn history");
23a7fe75
MM
481 low = from;
482 high = to;
afedecd3 483
23a7fe75 484 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
485
486 /* Check for wrap-arounds. */
23a7fe75 487 if (low != from || high != to)
afedecd3
MM
488 error (_("Bad range."));
489
0688d04e 490 if (high < low)
afedecd3
MM
491 error (_("Bad range."));
492
23a7fe75 493 btinfo = require_btrace ();
afedecd3 494
23a7fe75
MM
495 found = btrace_find_insn_by_number (&begin, btinfo, low);
496 if (found == 0)
497 error (_("Range out of bounds."));
afedecd3 498
23a7fe75
MM
499 found = btrace_find_insn_by_number (&end, btinfo, high);
500 if (found == 0)
0688d04e
MM
501 {
502 /* Silently truncate the range. */
503 btrace_insn_end (&end, btinfo);
504 }
505 else
506 {
507 /* We want both begin and end to be inclusive. */
508 btrace_insn_next (&end, 1);
509 }
afedecd3 510
23a7fe75
MM
511 btrace_insn_history (uiout, &begin, &end, flags);
512 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
513
514 do_cleanups (uiout_cleanup);
515}
516
517/* The to_insn_history_from method of target record-btrace. */
518
519static void
9abc3ff3
TT
520record_btrace_insn_history_from (struct target_ops *self,
521 ULONGEST from, int size, int flags)
afedecd3
MM
522{
523 ULONGEST begin, end, context;
524
525 context = abs (size);
0688d04e
MM
526 if (context == 0)
527 error (_("Bad record instruction-history-size."));
afedecd3
MM
528
529 if (size < 0)
530 {
531 end = from;
532
533 if (from < context)
534 begin = 0;
535 else
0688d04e 536 begin = from - context + 1;
afedecd3
MM
537 }
538 else
539 {
540 begin = from;
0688d04e 541 end = from + context - 1;
afedecd3
MM
542
543 /* Check for wrap-around. */
544 if (end < begin)
545 end = ULONGEST_MAX;
546 }
547
4e99c6b7 548 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
549}
550
551/* Print the instruction number range for a function call history line. */
552
553static void
23a7fe75
MM
554btrace_call_history_insn_range (struct ui_out *uiout,
555 const struct btrace_function *bfun)
afedecd3 556{
7acbe133
MM
557 unsigned int begin, end, size;
558
559 size = VEC_length (btrace_insn_s, bfun->insn);
560 gdb_assert (size > 0);
afedecd3 561
23a7fe75 562 begin = bfun->insn_offset;
7acbe133 563 end = begin + size - 1;
afedecd3 564
23a7fe75 565 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 566 ui_out_text (uiout, ",");
23a7fe75 567 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
568}
569
570/* Print the source line information for a function call history line. */
571
572static void
23a7fe75
MM
573btrace_call_history_src_line (struct ui_out *uiout,
574 const struct btrace_function *bfun)
afedecd3
MM
575{
576 struct symbol *sym;
23a7fe75 577 int begin, end;
afedecd3
MM
578
579 sym = bfun->sym;
580 if (sym == NULL)
581 return;
582
583 ui_out_field_string (uiout, "file",
08be3fe3 584 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 585
23a7fe75
MM
586 begin = bfun->lbegin;
587 end = bfun->lend;
588
589 if (end < begin)
afedecd3
MM
590 return;
591
592 ui_out_text (uiout, ":");
23a7fe75 593 ui_out_field_int (uiout, "min line", begin);
afedecd3 594
23a7fe75 595 if (end == begin)
afedecd3
MM
596 return;
597
8710b709 598 ui_out_text (uiout, ",");
23a7fe75 599 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
600}
601
0b722aec
MM
602/* Get the name of a branch trace function. */
603
604static const char *
605btrace_get_bfun_name (const struct btrace_function *bfun)
606{
607 struct minimal_symbol *msym;
608 struct symbol *sym;
609
610 if (bfun == NULL)
611 return "??";
612
613 msym = bfun->msym;
614 sym = bfun->sym;
615
616 if (sym != NULL)
617 return SYMBOL_PRINT_NAME (sym);
618 else if (msym != NULL)
efd66ac6 619 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
620 else
621 return "??";
622}
623
afedecd3
MM
624/* Disassemble a section of the recorded function trace. */
625
626static void
23a7fe75 627btrace_call_history (struct ui_out *uiout,
8710b709 628 const struct btrace_thread_info *btinfo,
23a7fe75
MM
629 const struct btrace_call_iterator *begin,
630 const struct btrace_call_iterator *end,
afedecd3
MM
631 enum record_print_flag flags)
632{
23a7fe75 633 struct btrace_call_iterator it;
afedecd3 634
23a7fe75
MM
635 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
636 btrace_call_number (end));
afedecd3 637
23a7fe75 638 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 639 {
23a7fe75
MM
640 const struct btrace_function *bfun;
641 struct minimal_symbol *msym;
642 struct symbol *sym;
643
644 bfun = btrace_call_get (&it);
23a7fe75 645 sym = bfun->sym;
0b722aec 646 msym = bfun->msym;
23a7fe75 647
afedecd3 648 /* Print the function index. */
23a7fe75 649 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
650 ui_out_text (uiout, "\t");
651
8710b709
MM
652 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
653 {
654 int level = bfun->level + btinfo->level, i;
655
656 for (i = 0; i < level; ++i)
657 ui_out_text (uiout, " ");
658 }
659
660 if (sym != NULL)
661 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
662 else if (msym != NULL)
efd66ac6 663 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
664 else if (!ui_out_is_mi_like_p (uiout))
665 ui_out_field_string (uiout, "function", "??");
666
1e038f67 667 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 668 {
8710b709 669 ui_out_text (uiout, _("\tinst "));
23a7fe75 670 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
671 }
672
1e038f67 673 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 674 {
8710b709 675 ui_out_text (uiout, _("\tat "));
23a7fe75 676 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
677 }
678
afedecd3
MM
679 ui_out_text (uiout, "\n");
680 }
681}
682
683/* The to_call_history method of target record-btrace. */
684
685static void
5df2fcba 686record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
687{
688 struct btrace_thread_info *btinfo;
23a7fe75
MM
689 struct btrace_call_history *history;
690 struct btrace_call_iterator begin, end;
afedecd3
MM
691 struct cleanup *uiout_cleanup;
692 struct ui_out *uiout;
23a7fe75 693 unsigned int context, covered;
afedecd3
MM
694
695 uiout = current_uiout;
696 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
697 "insn history");
afedecd3 698 context = abs (size);
afedecd3
MM
699 if (context == 0)
700 error (_("Bad record function-call-history-size."));
701
23a7fe75
MM
702 btinfo = require_btrace ();
703 history = btinfo->call_history;
704 if (history == NULL)
afedecd3 705 {
07bbe694 706 struct btrace_insn_iterator *replay;
afedecd3 707
23a7fe75 708 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 709
07bbe694
MM
710 /* If we're replaying, we start at the replay position. Otherwise, we
711 start at the tail of the trace. */
712 replay = btinfo->replay;
713 if (replay != NULL)
714 {
715 begin.function = replay->function;
716 begin.btinfo = btinfo;
717 }
718 else
719 btrace_call_end (&begin, btinfo);
720
721 /* We start from here and expand in the requested direction. Then we
722 expand in the other direction, as well, to fill up any remaining
723 context. */
724 end = begin;
725 if (size < 0)
726 {
727 /* We want the current position covered, as well. */
728 covered = btrace_call_next (&end, 1);
729 covered += btrace_call_prev (&begin, context - covered);
730 covered += btrace_call_next (&end, context - covered);
731 }
732 else
733 {
734 covered = btrace_call_next (&end, context);
735 covered += btrace_call_prev (&begin, context- covered);
736 }
afedecd3
MM
737 }
738 else
739 {
23a7fe75
MM
740 begin = history->begin;
741 end = history->end;
afedecd3 742
23a7fe75
MM
743 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
744 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 745
23a7fe75
MM
746 if (size < 0)
747 {
748 end = begin;
749 covered = btrace_call_prev (&begin, context);
750 }
751 else
752 {
753 begin = end;
754 covered = btrace_call_next (&end, context);
755 }
afedecd3
MM
756 }
757
23a7fe75 758 if (covered > 0)
8710b709 759 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
760 else
761 {
762 if (size < 0)
763 printf_unfiltered (_("At the start of the branch trace record.\n"));
764 else
765 printf_unfiltered (_("At the end of the branch trace record.\n"));
766 }
afedecd3 767
23a7fe75 768 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
769 do_cleanups (uiout_cleanup);
770}
771
772/* The to_call_history_range method of target record-btrace. */
773
774static void
f0d960ea
TT
775record_btrace_call_history_range (struct target_ops *self,
776 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
777{
778 struct btrace_thread_info *btinfo;
23a7fe75
MM
779 struct btrace_call_history *history;
780 struct btrace_call_iterator begin, end;
afedecd3
MM
781 struct cleanup *uiout_cleanup;
782 struct ui_out *uiout;
23a7fe75
MM
783 unsigned int low, high;
784 int found;
afedecd3
MM
785
786 uiout = current_uiout;
787 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
788 "func history");
23a7fe75
MM
789 low = from;
790 high = to;
afedecd3 791
23a7fe75 792 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
793
794 /* Check for wrap-arounds. */
23a7fe75 795 if (low != from || high != to)
afedecd3
MM
796 error (_("Bad range."));
797
0688d04e 798 if (high < low)
afedecd3
MM
799 error (_("Bad range."));
800
23a7fe75 801 btinfo = require_btrace ();
afedecd3 802
23a7fe75
MM
803 found = btrace_find_call_by_number (&begin, btinfo, low);
804 if (found == 0)
805 error (_("Range out of bounds."));
afedecd3 806
23a7fe75
MM
807 found = btrace_find_call_by_number (&end, btinfo, high);
808 if (found == 0)
0688d04e
MM
809 {
810 /* Silently truncate the range. */
811 btrace_call_end (&end, btinfo);
812 }
813 else
814 {
815 /* We want both begin and end to be inclusive. */
816 btrace_call_next (&end, 1);
817 }
afedecd3 818
8710b709 819 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 820 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
821
822 do_cleanups (uiout_cleanup);
823}
824
825/* The to_call_history_from method of target record-btrace. */
826
827static void
ec0aea04
TT
828record_btrace_call_history_from (struct target_ops *self,
829 ULONGEST from, int size, int flags)
afedecd3
MM
830{
831 ULONGEST begin, end, context;
832
833 context = abs (size);
0688d04e
MM
834 if (context == 0)
835 error (_("Bad record function-call-history-size."));
afedecd3
MM
836
837 if (size < 0)
838 {
839 end = from;
840
841 if (from < context)
842 begin = 0;
843 else
0688d04e 844 begin = from - context + 1;
afedecd3
MM
845 }
846 else
847 {
848 begin = from;
0688d04e 849 end = from + context - 1;
afedecd3
MM
850
851 /* Check for wrap-around. */
852 if (end < begin)
853 end = ULONGEST_MAX;
854 }
855
f0d960ea 856 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
857}
858
07bbe694
MM
859/* The to_record_is_replaying method of target record-btrace. */
860
861static int
1c63c994 862record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
863{
864 struct thread_info *tp;
865
034f788c 866 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
867 if (btrace_is_replaying (tp))
868 return 1;
869
870 return 0;
871}
872
633785ff
MM
873/* The to_xfer_partial method of target record-btrace. */
874
9b409511 875static enum target_xfer_status
633785ff
MM
876record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
877 const char *annex, gdb_byte *readbuf,
878 const gdb_byte *writebuf, ULONGEST offset,
9b409511 879 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
880{
881 struct target_ops *t;
882
883 /* Filter out requests that don't make sense during replay. */
67b5c0c1 884 if (replay_memory_access == replay_memory_access_read_only
aef92902 885 && !record_btrace_generating_corefile
67b5c0c1 886 && record_btrace_is_replaying (ops))
633785ff
MM
887 {
888 switch (object)
889 {
890 case TARGET_OBJECT_MEMORY:
891 {
892 struct target_section *section;
893
894 /* We do not allow writing memory in general. */
895 if (writebuf != NULL)
9b409511
YQ
896 {
897 *xfered_len = len;
bc113b4e 898 return TARGET_XFER_UNAVAILABLE;
9b409511 899 }
633785ff
MM
900
901 /* We allow reading readonly memory. */
902 section = target_section_by_addr (ops, offset);
903 if (section != NULL)
904 {
905 /* Check if the section we found is readonly. */
906 if ((bfd_get_section_flags (section->the_bfd_section->owner,
907 section->the_bfd_section)
908 & SEC_READONLY) != 0)
909 {
910 /* Truncate the request to fit into this section. */
911 len = min (len, section->endaddr - offset);
912 break;
913 }
914 }
915
9b409511 916 *xfered_len = len;
bc113b4e 917 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
918 }
919 }
920 }
921
922 /* Forward the request. */
e75fdfca
TT
923 ops = ops->beneath;
924 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
925 offset, len, xfered_len);
633785ff
MM
926}
927
928/* The to_insert_breakpoint method of target record-btrace. */
929
930static int
931record_btrace_insert_breakpoint (struct target_ops *ops,
932 struct gdbarch *gdbarch,
933 struct bp_target_info *bp_tgt)
934{
935 volatile struct gdb_exception except;
67b5c0c1
MM
936 const char *old;
937 int ret;
633785ff
MM
938
939 /* Inserting breakpoints requires accessing memory. Allow it for the
940 duration of this function. */
67b5c0c1
MM
941 old = replay_memory_access;
942 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
943
944 ret = 0;
945 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 946 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 947
67b5c0c1 948 replay_memory_access = old;
633785ff
MM
949
950 if (except.reason < 0)
951 throw_exception (except);
952
953 return ret;
954}
955
956/* The to_remove_breakpoint method of target record-btrace. */
957
958static int
959record_btrace_remove_breakpoint (struct target_ops *ops,
960 struct gdbarch *gdbarch,
961 struct bp_target_info *bp_tgt)
962{
963 volatile struct gdb_exception except;
67b5c0c1
MM
964 const char *old;
965 int ret;
633785ff
MM
966
967 /* Removing breakpoints requires accessing memory. Allow it for the
968 duration of this function. */
67b5c0c1
MM
969 old = replay_memory_access;
970 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
971
972 ret = 0;
973 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 974 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 975
67b5c0c1 976 replay_memory_access = old;
633785ff
MM
977
978 if (except.reason < 0)
979 throw_exception (except);
980
981 return ret;
982}
983
1f3ef581
MM
984/* The to_fetch_registers method of target record-btrace. */
985
986static void
987record_btrace_fetch_registers (struct target_ops *ops,
988 struct regcache *regcache, int regno)
989{
990 struct btrace_insn_iterator *replay;
991 struct thread_info *tp;
992
993 tp = find_thread_ptid (inferior_ptid);
994 gdb_assert (tp != NULL);
995
996 replay = tp->btrace.replay;
aef92902 997 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
998 {
999 const struct btrace_insn *insn;
1000 struct gdbarch *gdbarch;
1001 int pcreg;
1002
1003 gdbarch = get_regcache_arch (regcache);
1004 pcreg = gdbarch_pc_regnum (gdbarch);
1005 if (pcreg < 0)
1006 return;
1007
1008 /* We can only provide the PC register. */
1009 if (regno >= 0 && regno != pcreg)
1010 return;
1011
1012 insn = btrace_insn_get (replay);
1013 gdb_assert (insn != NULL);
1014
1015 regcache_raw_supply (regcache, regno, &insn->pc);
1016 }
1017 else
1018 {
e75fdfca 1019 struct target_ops *t = ops->beneath;
1f3ef581 1020
e75fdfca 1021 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1022 }
1023}
1024
1025/* The to_store_registers method of target record-btrace. */
1026
1027static void
1028record_btrace_store_registers (struct target_ops *ops,
1029 struct regcache *regcache, int regno)
1030{
1031 struct target_ops *t;
1032
aef92902 1033 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1034 error (_("This record target does not allow writing registers."));
1035
1036 gdb_assert (may_write_registers != 0);
1037
e75fdfca
TT
1038 t = ops->beneath;
1039 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1040}
1041
1042/* The to_prepare_to_store method of target record-btrace. */
1043
1044static void
1045record_btrace_prepare_to_store (struct target_ops *ops,
1046 struct regcache *regcache)
1047{
1048 struct target_ops *t;
1049
aef92902 1050 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1051 return;
1052
e75fdfca
TT
1053 t = ops->beneath;
1054 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1055}
1056
0b722aec
MM
1057/* The branch trace frame cache. */
1058
1059struct btrace_frame_cache
1060{
1061 /* The thread. */
1062 struct thread_info *tp;
1063
1064 /* The frame info. */
1065 struct frame_info *frame;
1066
1067 /* The branch trace function segment. */
1068 const struct btrace_function *bfun;
1069};
1070
1071/* A struct btrace_frame_cache hash table indexed by NEXT. */
1072
1073static htab_t bfcache;
1074
1075/* hash_f for htab_create_alloc of bfcache. */
1076
1077static hashval_t
1078bfcache_hash (const void *arg)
1079{
1080 const struct btrace_frame_cache *cache = arg;
1081
1082 return htab_hash_pointer (cache->frame);
1083}
1084
1085/* eq_f for htab_create_alloc of bfcache. */
1086
1087static int
1088bfcache_eq (const void *arg1, const void *arg2)
1089{
1090 const struct btrace_frame_cache *cache1 = arg1;
1091 const struct btrace_frame_cache *cache2 = arg2;
1092
1093 return cache1->frame == cache2->frame;
1094}
1095
1096/* Create a new btrace frame cache. */
1097
1098static struct btrace_frame_cache *
1099bfcache_new (struct frame_info *frame)
1100{
1101 struct btrace_frame_cache *cache;
1102 void **slot;
1103
1104 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1105 cache->frame = frame;
1106
1107 slot = htab_find_slot (bfcache, cache, INSERT);
1108 gdb_assert (*slot == NULL);
1109 *slot = cache;
1110
1111 return cache;
1112}
1113
1114/* Extract the branch trace function from a branch trace frame. */
1115
1116static const struct btrace_function *
1117btrace_get_frame_function (struct frame_info *frame)
1118{
1119 const struct btrace_frame_cache *cache;
1120 const struct btrace_function *bfun;
1121 struct btrace_frame_cache pattern;
1122 void **slot;
1123
1124 pattern.frame = frame;
1125
1126 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1127 if (slot == NULL)
1128 return NULL;
1129
1130 cache = *slot;
1131 return cache->bfun;
1132}
1133
cecac1ab
MM
1134/* Implement stop_reason method for record_btrace_frame_unwind. */
1135
1136static enum unwind_stop_reason
1137record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1138 void **this_cache)
1139{
0b722aec
MM
1140 const struct btrace_frame_cache *cache;
1141 const struct btrace_function *bfun;
1142
1143 cache = *this_cache;
1144 bfun = cache->bfun;
1145 gdb_assert (bfun != NULL);
1146
1147 if (bfun->up == NULL)
1148 return UNWIND_UNAVAILABLE;
1149
1150 return UNWIND_NO_REASON;
cecac1ab
MM
1151}
1152
1153/* Implement this_id method for record_btrace_frame_unwind. */
1154
1155static void
1156record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1157 struct frame_id *this_id)
1158{
0b722aec
MM
1159 const struct btrace_frame_cache *cache;
1160 const struct btrace_function *bfun;
1161 CORE_ADDR code, special;
1162
1163 cache = *this_cache;
1164
1165 bfun = cache->bfun;
1166 gdb_assert (bfun != NULL);
1167
1168 while (bfun->segment.prev != NULL)
1169 bfun = bfun->segment.prev;
1170
1171 code = get_frame_func (this_frame);
1172 special = bfun->number;
1173
1174 *this_id = frame_id_build_unavailable_stack_special (code, special);
1175
1176 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1177 btrace_get_bfun_name (cache->bfun),
1178 core_addr_to_string_nz (this_id->code_addr),
1179 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1180}
1181
1182/* Implement prev_register method for record_btrace_frame_unwind. */
1183
1184static struct value *
1185record_btrace_frame_prev_register (struct frame_info *this_frame,
1186 void **this_cache,
1187 int regnum)
1188{
0b722aec
MM
1189 const struct btrace_frame_cache *cache;
1190 const struct btrace_function *bfun, *caller;
1191 const struct btrace_insn *insn;
1192 struct gdbarch *gdbarch;
1193 CORE_ADDR pc;
1194 int pcreg;
1195
1196 gdbarch = get_frame_arch (this_frame);
1197 pcreg = gdbarch_pc_regnum (gdbarch);
1198 if (pcreg < 0 || regnum != pcreg)
1199 throw_error (NOT_AVAILABLE_ERROR,
1200 _("Registers are not available in btrace record history"));
1201
1202 cache = *this_cache;
1203 bfun = cache->bfun;
1204 gdb_assert (bfun != NULL);
1205
1206 caller = bfun->up;
1207 if (caller == NULL)
1208 throw_error (NOT_AVAILABLE_ERROR,
1209 _("No caller in btrace record history"));
1210
1211 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1212 {
1213 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1214 pc = insn->pc;
1215 }
1216 else
1217 {
1218 insn = VEC_last (btrace_insn_s, caller->insn);
1219 pc = insn->pc;
1220
1221 pc += gdb_insn_length (gdbarch, pc);
1222 }
1223
1224 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1225 btrace_get_bfun_name (bfun), bfun->level,
1226 core_addr_to_string_nz (pc));
1227
1228 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1229}
1230
1231/* Implement sniffer method for record_btrace_frame_unwind. */
1232
1233static int
1234record_btrace_frame_sniffer (const struct frame_unwind *self,
1235 struct frame_info *this_frame,
1236 void **this_cache)
1237{
0b722aec
MM
1238 const struct btrace_function *bfun;
1239 struct btrace_frame_cache *cache;
cecac1ab 1240 struct thread_info *tp;
0b722aec 1241 struct frame_info *next;
cecac1ab
MM
1242
1243 /* THIS_FRAME does not contain a reference to its thread. */
1244 tp = find_thread_ptid (inferior_ptid);
1245 gdb_assert (tp != NULL);
1246
0b722aec
MM
1247 bfun = NULL;
1248 next = get_next_frame (this_frame);
1249 if (next == NULL)
1250 {
1251 const struct btrace_insn_iterator *replay;
1252
1253 replay = tp->btrace.replay;
1254 if (replay != NULL)
1255 bfun = replay->function;
1256 }
1257 else
1258 {
1259 const struct btrace_function *callee;
1260
1261 callee = btrace_get_frame_function (next);
1262 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1263 bfun = callee->up;
1264 }
1265
1266 if (bfun == NULL)
1267 return 0;
1268
1269 DEBUG ("[frame] sniffed frame for %s on level %d",
1270 btrace_get_bfun_name (bfun), bfun->level);
1271
1272 /* This is our frame. Initialize the frame cache. */
1273 cache = bfcache_new (this_frame);
1274 cache->tp = tp;
1275 cache->bfun = bfun;
1276
1277 *this_cache = cache;
1278 return 1;
1279}
1280
1281/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1282
1283static int
1284record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1285 struct frame_info *this_frame,
1286 void **this_cache)
1287{
1288 const struct btrace_function *bfun, *callee;
1289 struct btrace_frame_cache *cache;
1290 struct frame_info *next;
1291
1292 next = get_next_frame (this_frame);
1293 if (next == NULL)
1294 return 0;
1295
1296 callee = btrace_get_frame_function (next);
1297 if (callee == NULL)
1298 return 0;
1299
1300 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1301 return 0;
1302
1303 bfun = callee->up;
1304 if (bfun == NULL)
1305 return 0;
1306
1307 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1308 btrace_get_bfun_name (bfun), bfun->level);
1309
1310 /* This is our frame. Initialize the frame cache. */
1311 cache = bfcache_new (this_frame);
1312 cache->tp = find_thread_ptid (inferior_ptid);
1313 cache->bfun = bfun;
1314
1315 *this_cache = cache;
1316 return 1;
1317}
1318
1319static void
1320record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1321{
1322 struct btrace_frame_cache *cache;
1323 void **slot;
1324
1325 cache = this_cache;
1326
1327 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1328 gdb_assert (slot != NULL);
1329
1330 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1331}
1332
1333/* btrace recording does not store previous memory content, neither the stack
1334 frames content. Any unwinding would return errorneous results as the stack
1335 contents no longer matches the changed PC value restored from history.
1336 Therefore this unwinder reports any possibly unwound registers as
1337 <unavailable>. */
1338
0b722aec 1339const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1340{
1341 NORMAL_FRAME,
1342 record_btrace_frame_unwind_stop_reason,
1343 record_btrace_frame_this_id,
1344 record_btrace_frame_prev_register,
1345 NULL,
0b722aec
MM
1346 record_btrace_frame_sniffer,
1347 record_btrace_frame_dealloc_cache
1348};
1349
1350const struct frame_unwind record_btrace_tailcall_frame_unwind =
1351{
1352 TAILCALL_FRAME,
1353 record_btrace_frame_unwind_stop_reason,
1354 record_btrace_frame_this_id,
1355 record_btrace_frame_prev_register,
1356 NULL,
1357 record_btrace_tailcall_frame_sniffer,
1358 record_btrace_frame_dealloc_cache
cecac1ab 1359};
b2f4cfde 1360
ac01945b
TT
1361/* Implement the to_get_unwinder method. */
1362
1363static const struct frame_unwind *
1364record_btrace_to_get_unwinder (struct target_ops *self)
1365{
1366 return &record_btrace_frame_unwind;
1367}
1368
1369/* Implement the to_get_tailcall_unwinder method. */
1370
1371static const struct frame_unwind *
1372record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1373{
1374 return &record_btrace_tailcall_frame_unwind;
1375}
1376
52834460
MM
1377/* Indicate that TP should be resumed according to FLAG. */
1378
1379static void
1380record_btrace_resume_thread (struct thread_info *tp,
1381 enum btrace_thread_flag flag)
1382{
1383 struct btrace_thread_info *btinfo;
1384
1385 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1386
1387 btinfo = &tp->btrace;
1388
1389 if ((btinfo->flags & BTHR_MOVE) != 0)
1390 error (_("Thread already moving."));
1391
1392 /* Fetch the latest branch trace. */
1393 btrace_fetch (tp);
1394
1395 btinfo->flags |= flag;
1396}
1397
1398/* Find the thread to resume given a PTID. */
1399
1400static struct thread_info *
1401record_btrace_find_resume_thread (ptid_t ptid)
1402{
1403 struct thread_info *tp;
1404
1405 /* When asked to resume everything, we pick the current thread. */
1406 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1407 ptid = inferior_ptid;
1408
1409 return find_thread_ptid (ptid);
1410}
1411
1412/* Start replaying a thread. */
1413
1414static struct btrace_insn_iterator *
1415record_btrace_start_replaying (struct thread_info *tp)
1416{
1417 volatile struct gdb_exception except;
1418 struct btrace_insn_iterator *replay;
1419 struct btrace_thread_info *btinfo;
1420 int executing;
1421
1422 btinfo = &tp->btrace;
1423 replay = NULL;
1424
1425 /* We can't start replaying without trace. */
1426 if (btinfo->begin == NULL)
1427 return NULL;
1428
1429 /* Clear the executing flag to allow changes to the current frame.
1430 We are not actually running, yet. We just started a reverse execution
1431 command or a record goto command.
1432 For the latter, EXECUTING is false and this has no effect.
1433 For the former, EXECUTING is true and we're in to_wait, about to
1434 move the thread. Since we need to recompute the stack, we temporarily
1435 set EXECUTING to flase. */
1436 executing = is_executing (tp->ptid);
1437 set_executing (tp->ptid, 0);
1438
1439 /* GDB stores the current frame_id when stepping in order to detects steps
1440 into subroutines.
1441 Since frames are computed differently when we're replaying, we need to
1442 recompute those stored frames and fix them up so we can still detect
1443 subroutines after we started replaying. */
1444 TRY_CATCH (except, RETURN_MASK_ALL)
1445 {
1446 struct frame_info *frame;
1447 struct frame_id frame_id;
1448 int upd_step_frame_id, upd_step_stack_frame_id;
1449
1450 /* The current frame without replaying - computed via normal unwind. */
1451 frame = get_current_frame ();
1452 frame_id = get_frame_id (frame);
1453
1454 /* Check if we need to update any stepping-related frame id's. */
1455 upd_step_frame_id = frame_id_eq (frame_id,
1456 tp->control.step_frame_id);
1457 upd_step_stack_frame_id = frame_id_eq (frame_id,
1458 tp->control.step_stack_frame_id);
1459
1460 /* We start replaying at the end of the branch trace. This corresponds
1461 to the current instruction. */
1462 replay = xmalloc (sizeof (*replay));
1463 btrace_insn_end (replay, btinfo);
1464
1465 /* We're not replaying, yet. */
1466 gdb_assert (btinfo->replay == NULL);
1467 btinfo->replay = replay;
1468
1469 /* Make sure we're not using any stale registers. */
1470 registers_changed_ptid (tp->ptid);
1471
1472 /* The current frame with replaying - computed via btrace unwind. */
1473 frame = get_current_frame ();
1474 frame_id = get_frame_id (frame);
1475
1476 /* Replace stepping related frames where necessary. */
1477 if (upd_step_frame_id)
1478 tp->control.step_frame_id = frame_id;
1479 if (upd_step_stack_frame_id)
1480 tp->control.step_stack_frame_id = frame_id;
1481 }
1482
1483 /* Restore the previous execution state. */
1484 set_executing (tp->ptid, executing);
1485
1486 if (except.reason < 0)
1487 {
1488 xfree (btinfo->replay);
1489 btinfo->replay = NULL;
1490
1491 registers_changed_ptid (tp->ptid);
1492
1493 throw_exception (except);
1494 }
1495
1496 return replay;
1497}
1498
1499/* Stop replaying a thread. */
1500
1501static void
1502record_btrace_stop_replaying (struct thread_info *tp)
1503{
1504 struct btrace_thread_info *btinfo;
1505
1506 btinfo = &tp->btrace;
1507
1508 xfree (btinfo->replay);
1509 btinfo->replay = NULL;
1510
1511 /* Make sure we're not leaving any stale registers. */
1512 registers_changed_ptid (tp->ptid);
1513}
1514
b2f4cfde
MM
1515/* The to_resume method of target record-btrace. */
1516
1517static void
1518record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1519 enum gdb_signal signal)
1520{
52834460
MM
1521 struct thread_info *tp, *other;
1522 enum btrace_thread_flag flag;
1523
1524 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1525
70ad5bff
MM
1526 /* Store the execution direction of the last resume. */
1527 record_btrace_resume_exec_dir = execution_direction;
1528
52834460
MM
1529 tp = record_btrace_find_resume_thread (ptid);
1530 if (tp == NULL)
1531 error (_("Cannot find thread to resume."));
1532
1533 /* Stop replaying other threads if the thread to resume is not replaying. */
1534 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1535 ALL_NON_EXITED_THREADS (other)
52834460
MM
1536 record_btrace_stop_replaying (other);
1537
b2f4cfde 1538 /* As long as we're not replaying, just forward the request. */
1c63c994 1539 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1540 {
e75fdfca
TT
1541 ops = ops->beneath;
1542 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1543 }
1544
52834460
MM
1545 /* Compute the btrace thread flag for the requested move. */
1546 if (step == 0)
1547 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1548 else
1549 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1550
1551 /* At the moment, we only move a single thread. We could also move
1552 all threads in parallel by single-stepping each resumed thread
1553 until the first runs into an event.
1554 When we do that, we would want to continue all other threads.
1555 For now, just resume one thread to not confuse to_wait. */
1556 record_btrace_resume_thread (tp, flag);
1557
1558 /* We just indicate the resume intent here. The actual stepping happens in
1559 record_btrace_wait below. */
70ad5bff
MM
1560
1561 /* Async support. */
1562 if (target_can_async_p ())
1563 {
1564 target_async (inferior_event_handler, 0);
1565 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1566 }
52834460
MM
1567}
1568
1569/* Find a thread to move. */
1570
1571static struct thread_info *
1572record_btrace_find_thread_to_move (ptid_t ptid)
1573{
1574 struct thread_info *tp;
1575
1576 /* First check the parameter thread. */
1577 tp = find_thread_ptid (ptid);
1578 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1579 return tp;
1580
1581 /* Otherwise, find one other thread that has been resumed. */
034f788c 1582 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1583 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1584 return tp;
1585
1586 return NULL;
1587}
1588
1589/* Return a target_waitstatus indicating that we ran out of history. */
1590
1591static struct target_waitstatus
1592btrace_step_no_history (void)
1593{
1594 struct target_waitstatus status;
1595
1596 status.kind = TARGET_WAITKIND_NO_HISTORY;
1597
1598 return status;
1599}
1600
1601/* Return a target_waitstatus indicating that a step finished. */
1602
1603static struct target_waitstatus
1604btrace_step_stopped (void)
1605{
1606 struct target_waitstatus status;
1607
1608 status.kind = TARGET_WAITKIND_STOPPED;
1609 status.value.sig = GDB_SIGNAL_TRAP;
1610
1611 return status;
1612}
1613
1614/* Clear the record histories. */
1615
1616static void
1617record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1618{
1619 xfree (btinfo->insn_history);
1620 xfree (btinfo->call_history);
1621
1622 btinfo->insn_history = NULL;
1623 btinfo->call_history = NULL;
1624}
1625
1626/* Step a single thread. */
1627
1628static struct target_waitstatus
1629record_btrace_step_thread (struct thread_info *tp)
1630{
1631 struct btrace_insn_iterator *replay, end;
1632 struct btrace_thread_info *btinfo;
1633 struct address_space *aspace;
1634 struct inferior *inf;
1635 enum btrace_thread_flag flags;
1636 unsigned int steps;
1637
e59fa00f
MM
1638 /* We can't step without an execution history. */
1639 if (btrace_is_empty (tp))
1640 return btrace_step_no_history ();
1641
52834460
MM
1642 btinfo = &tp->btrace;
1643 replay = btinfo->replay;
1644
1645 flags = btinfo->flags & BTHR_MOVE;
1646 btinfo->flags &= ~BTHR_MOVE;
1647
1648 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1649
1650 switch (flags)
1651 {
1652 default:
1653 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1654
1655 case BTHR_STEP:
1656 /* We're done if we're not replaying. */
1657 if (replay == NULL)
1658 return btrace_step_no_history ();
1659
1660 /* We are always able to step at least once. */
1661 steps = btrace_insn_next (replay, 1);
1662 gdb_assert (steps == 1);
1663
1664 /* Determine the end of the instruction trace. */
1665 btrace_insn_end (&end, btinfo);
1666
1667 /* We stop replaying if we reached the end of the trace. */
1668 if (btrace_insn_cmp (replay, &end) == 0)
1669 record_btrace_stop_replaying (tp);
1670
1671 return btrace_step_stopped ();
1672
1673 case BTHR_RSTEP:
1674 /* Start replaying if we're not already doing so. */
1675 if (replay == NULL)
1676 replay = record_btrace_start_replaying (tp);
1677
1678 /* If we can't step any further, we reached the end of the history. */
1679 steps = btrace_insn_prev (replay, 1);
1680 if (steps == 0)
1681 return btrace_step_no_history ();
1682
1683 return btrace_step_stopped ();
1684
1685 case BTHR_CONT:
1686 /* We're done if we're not replaying. */
1687 if (replay == NULL)
1688 return btrace_step_no_history ();
1689
c9657e70 1690 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1691 aspace = inf->aspace;
1692
1693 /* Determine the end of the instruction trace. */
1694 btrace_insn_end (&end, btinfo);
1695
1696 for (;;)
1697 {
1698 const struct btrace_insn *insn;
1699
1700 /* We are always able to step at least once. */
1701 steps = btrace_insn_next (replay, 1);
1702 gdb_assert (steps == 1);
1703
1704 /* We stop replaying if we reached the end of the trace. */
1705 if (btrace_insn_cmp (replay, &end) == 0)
1706 {
1707 record_btrace_stop_replaying (tp);
1708 return btrace_step_no_history ();
1709 }
1710
1711 insn = btrace_insn_get (replay);
1712 gdb_assert (insn);
1713
1714 DEBUG ("stepping %d (%s) ... %s", tp->num,
1715 target_pid_to_str (tp->ptid),
1716 core_addr_to_string_nz (insn->pc));
1717
1718 if (breakpoint_here_p (aspace, insn->pc))
1719 return btrace_step_stopped ();
1720 }
1721
1722 case BTHR_RCONT:
1723 /* Start replaying if we're not already doing so. */
1724 if (replay == NULL)
1725 replay = record_btrace_start_replaying (tp);
1726
c9657e70 1727 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1728 aspace = inf->aspace;
1729
1730 for (;;)
1731 {
1732 const struct btrace_insn *insn;
1733
1734 /* If we can't step any further, we're done. */
1735 steps = btrace_insn_prev (replay, 1);
1736 if (steps == 0)
1737 return btrace_step_no_history ();
1738
1739 insn = btrace_insn_get (replay);
1740 gdb_assert (insn);
1741
1742 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1743 target_pid_to_str (tp->ptid),
1744 core_addr_to_string_nz (insn->pc));
1745
1746 if (breakpoint_here_p (aspace, insn->pc))
1747 return btrace_step_stopped ();
1748 }
1749 }
b2f4cfde
MM
1750}
1751
1752/* The to_wait method of target record-btrace. */
1753
1754static ptid_t
1755record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1756 struct target_waitstatus *status, int options)
1757{
52834460
MM
1758 struct thread_info *tp, *other;
1759
1760 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1761
b2f4cfde 1762 /* As long as we're not replaying, just forward the request. */
1c63c994 1763 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1764 {
e75fdfca
TT
1765 ops = ops->beneath;
1766 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
1767 }
1768
52834460
MM
1769 /* Let's find a thread to move. */
1770 tp = record_btrace_find_thread_to_move (ptid);
1771 if (tp == NULL)
1772 {
1773 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1774
1775 status->kind = TARGET_WAITKIND_IGNORE;
1776 return minus_one_ptid;
1777 }
1778
1779 /* We only move a single thread. We're not able to correlate threads. */
1780 *status = record_btrace_step_thread (tp);
1781
1782 /* Stop all other threads. */
1783 if (!non_stop)
034f788c 1784 ALL_NON_EXITED_THREADS (other)
52834460
MM
1785 other->btrace.flags &= ~BTHR_MOVE;
1786
1787 /* Start record histories anew from the current position. */
1788 record_btrace_clear_histories (&tp->btrace);
1789
1790 /* We moved the replay position but did not update registers. */
1791 registers_changed_ptid (tp->ptid);
1792
1793 return tp->ptid;
1794}
1795
1796/* The to_can_execute_reverse method of target record-btrace. */
1797
1798static int
19db3e69 1799record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1800{
1801 return 1;
1802}
1803
1804/* The to_decr_pc_after_break method of target record-btrace. */
1805
1806static CORE_ADDR
1807record_btrace_decr_pc_after_break (struct target_ops *ops,
1808 struct gdbarch *gdbarch)
1809{
1810 /* When replaying, we do not actually execute the breakpoint instruction
1811 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1812 if (record_btrace_is_replaying (ops))
52834460
MM
1813 return 0;
1814
c0eca49f 1815 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1816}
1817
e8032dde 1818/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
1819
1820static void
e8032dde 1821record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 1822{
e8032dde 1823 /* We don't add or remove threads during replay. */
1c63c994 1824 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1825 return;
1826
1827 /* Forward the request. */
e75fdfca 1828 ops = ops->beneath;
e8032dde 1829 ops->to_update_thread_list (ops);
e2887aa3
MM
1830}
1831
1832/* The to_thread_alive method of target record-btrace. */
1833
1834static int
1835record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1836{
1837 /* We don't add or remove threads during replay. */
1c63c994 1838 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1839 return find_thread_ptid (ptid) != NULL;
1840
1841 /* Forward the request. */
e75fdfca
TT
1842 ops = ops->beneath;
1843 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
1844}
1845
066ce621
MM
1846/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1847 is stopped. */
1848
1849static void
1850record_btrace_set_replay (struct thread_info *tp,
1851 const struct btrace_insn_iterator *it)
1852{
1853 struct btrace_thread_info *btinfo;
1854
1855 btinfo = &tp->btrace;
1856
1857 if (it == NULL || it->function == NULL)
52834460 1858 record_btrace_stop_replaying (tp);
066ce621
MM
1859 else
1860 {
1861 if (btinfo->replay == NULL)
52834460 1862 record_btrace_start_replaying (tp);
066ce621
MM
1863 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1864 return;
1865
1866 *btinfo->replay = *it;
52834460 1867 registers_changed_ptid (tp->ptid);
066ce621
MM
1868 }
1869
52834460
MM
1870 /* Start anew from the new replay position. */
1871 record_btrace_clear_histories (btinfo);
066ce621
MM
1872}
1873
1874/* The to_goto_record_begin method of target record-btrace. */
1875
1876static void
08475817 1877record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1878{
1879 struct thread_info *tp;
1880 struct btrace_insn_iterator begin;
1881
1882 tp = require_btrace_thread ();
1883
1884 btrace_insn_begin (&begin, &tp->btrace);
1885 record_btrace_set_replay (tp, &begin);
1886
1887 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1888}
1889
1890/* The to_goto_record_end method of target record-btrace. */
1891
1892static void
307a1b91 1893record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1894{
1895 struct thread_info *tp;
1896
1897 tp = require_btrace_thread ();
1898
1899 record_btrace_set_replay (tp, NULL);
1900
1901 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1902}
1903
1904/* The to_goto_record method of target record-btrace. */
1905
1906static void
606183ac 1907record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1908{
1909 struct thread_info *tp;
1910 struct btrace_insn_iterator it;
1911 unsigned int number;
1912 int found;
1913
1914 number = insn;
1915
1916 /* Check for wrap-arounds. */
1917 if (number != insn)
1918 error (_("Instruction number out of range."));
1919
1920 tp = require_btrace_thread ();
1921
1922 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1923 if (found == 0)
1924 error (_("No such instruction."));
1925
1926 record_btrace_set_replay (tp, &it);
1927
1928 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1929}
1930
70ad5bff
MM
1931/* The to_execution_direction target method. */
1932
1933static enum exec_direction_kind
1934record_btrace_execution_direction (struct target_ops *self)
1935{
1936 return record_btrace_resume_exec_dir;
1937}
1938
aef92902
MM
1939/* The to_prepare_to_generate_core target method. */
1940
1941static void
1942record_btrace_prepare_to_generate_core (struct target_ops *self)
1943{
1944 record_btrace_generating_corefile = 1;
1945}
1946
1947/* The to_done_generating_core target method. */
1948
1949static void
1950record_btrace_done_generating_core (struct target_ops *self)
1951{
1952 record_btrace_generating_corefile = 0;
1953}
1954
afedecd3
MM
1955/* Initialize the record-btrace target ops. */
1956
1957static void
1958init_record_btrace_ops (void)
1959{
1960 struct target_ops *ops;
1961
1962 ops = &record_btrace_ops;
1963 ops->to_shortname = "record-btrace";
1964 ops->to_longname = "Branch tracing target";
1965 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1966 ops->to_open = record_btrace_open;
1967 ops->to_close = record_btrace_close;
b7d2e916 1968 ops->to_async = record_btrace_async;
afedecd3
MM
1969 ops->to_detach = record_detach;
1970 ops->to_disconnect = record_disconnect;
1971 ops->to_mourn_inferior = record_mourn_inferior;
1972 ops->to_kill = record_kill;
afedecd3
MM
1973 ops->to_stop_recording = record_btrace_stop_recording;
1974 ops->to_info_record = record_btrace_info;
1975 ops->to_insn_history = record_btrace_insn_history;
1976 ops->to_insn_history_from = record_btrace_insn_history_from;
1977 ops->to_insn_history_range = record_btrace_insn_history_range;
1978 ops->to_call_history = record_btrace_call_history;
1979 ops->to_call_history_from = record_btrace_call_history_from;
1980 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1981 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1982 ops->to_xfer_partial = record_btrace_xfer_partial;
1983 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1984 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1985 ops->to_fetch_registers = record_btrace_fetch_registers;
1986 ops->to_store_registers = record_btrace_store_registers;
1987 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1988 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1989 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1990 ops->to_resume = record_btrace_resume;
1991 ops->to_wait = record_btrace_wait;
e8032dde 1992 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 1993 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1994 ops->to_goto_record_begin = record_btrace_goto_begin;
1995 ops->to_goto_record_end = record_btrace_goto_end;
1996 ops->to_goto_record = record_btrace_goto;
52834460
MM
1997 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1998 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 1999 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2000 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2001 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2002 ops->to_stratum = record_stratum;
2003 ops->to_magic = OPS_MAGIC;
2004}
2005
f4abbc16
MM
2006/* Start recording in BTS format. */
2007
2008static void
2009cmd_record_btrace_bts_start (char *args, int from_tty)
2010{
2011 volatile struct gdb_exception exception;
2012
2013 if (args != NULL && *args != 0)
2014 error (_("Invalid argument."));
2015
2016 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2017
2018 TRY_CATCH (exception, RETURN_MASK_ALL)
2019 execute_command ("target record-btrace", from_tty);
2020
2021 if (exception.error != 0)
2022 {
2023 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2024 throw_exception (exception);
2025 }
2026}
2027
afedecd3
MM
2028/* Alias for "target record". */
2029
2030static void
2031cmd_record_btrace_start (char *args, int from_tty)
2032{
f4abbc16
MM
2033 volatile struct gdb_exception exception;
2034
afedecd3
MM
2035 if (args != NULL && *args != 0)
2036 error (_("Invalid argument."));
2037
f4abbc16
MM
2038 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2039
2040 TRY_CATCH (exception, RETURN_MASK_ALL)
2041 execute_command ("target record-btrace", from_tty);
2042
2043 if (exception.error == 0)
2044 return;
2045
2046 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2047 throw_exception (exception);
afedecd3
MM
2048}
2049
67b5c0c1
MM
2050/* The "set record btrace" command. */
2051
2052static void
2053cmd_set_record_btrace (char *args, int from_tty)
2054{
2055 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2056}
2057
2058/* The "show record btrace" command. */
2059
2060static void
2061cmd_show_record_btrace (char *args, int from_tty)
2062{
2063 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2064}
2065
2066/* The "show record btrace replay-memory-access" command. */
2067
2068static void
2069cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2070 struct cmd_list_element *c, const char *value)
2071{
2072 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2073 replay_memory_access);
2074}
2075
afedecd3
MM
2076void _initialize_record_btrace (void);
2077
2078/* Initialize btrace commands. */
2079
2080void
2081_initialize_record_btrace (void)
2082{
f4abbc16
MM
2083 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2084 _("Start branch trace recording."), &record_btrace_cmdlist,
2085 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2086 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2087
f4abbc16
MM
2088 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2089 _("\
2090Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2091The processor stores a from/to record for each branch into a cyclic buffer.\n\
2092This format may not be available on all processors."),
2093 &record_btrace_cmdlist);
2094 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2095
67b5c0c1
MM
2096 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2097 _("Set record options"), &set_record_btrace_cmdlist,
2098 "set record btrace ", 0, &set_record_cmdlist);
2099
2100 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2101 _("Show record options"), &show_record_btrace_cmdlist,
2102 "show record btrace ", 0, &show_record_cmdlist);
2103
2104 add_setshow_enum_cmd ("replay-memory-access", no_class,
2105 replay_memory_access_types, &replay_memory_access, _("\
2106Set what memory accesses are allowed during replay."), _("\
2107Show what memory accesses are allowed during replay."),
2108 _("Default is READ-ONLY.\n\n\
2109The btrace record target does not trace data.\n\
2110The memory therefore corresponds to the live target and not \
2111to the current replay position.\n\n\
2112When READ-ONLY, allow accesses to read-only memory during replay.\n\
2113When READ-WRITE, allow accesses to read-only and read-write memory during \
2114replay."),
2115 NULL, cmd_show_replay_memory_access,
2116 &set_record_btrace_cmdlist,
2117 &show_record_btrace_cmdlist);
2118
afedecd3
MM
2119 init_record_btrace_ops ();
2120 add_target (&record_btrace_ops);
0b722aec
MM
2121
2122 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2123 xcalloc, xfree);
afedecd3 2124}
This page took 0.326834 seconds and 4 git commands to generate.