Tweak gdb.trace/tfile.c for thumb mode
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
afedecd3
MM
74/* Print a record-btrace debug message. Use do ... while (0) to avoid
75 ambiguities when used in if statements. */
76
77#define DEBUG(msg, args...) \
78 do \
79 { \
80 if (record_debug != 0) \
81 fprintf_unfiltered (gdb_stdlog, \
82 "[record-btrace] " msg "\n", ##args); \
83 } \
84 while (0)
85
86
87/* Update the branch trace for the current thread and return a pointer to its
066ce621 88 thread_info.
afedecd3
MM
89
90 Throws an error if there is no thread or no trace. This function never
91 returns NULL. */
92
066ce621
MM
93static struct thread_info *
94require_btrace_thread (void)
afedecd3
MM
95{
96 struct thread_info *tp;
afedecd3
MM
97
98 DEBUG ("require");
99
100 tp = find_thread_ptid (inferior_ptid);
101 if (tp == NULL)
102 error (_("No thread."));
103
104 btrace_fetch (tp);
105
6e07b1d2 106 if (btrace_is_empty (tp))
afedecd3
MM
107 error (_("No trace."));
108
066ce621
MM
109 return tp;
110}
111
112/* Update the branch trace for the current thread and return a pointer to its
113 branch trace information struct.
114
115 Throws an error if there is no thread or no trace. This function never
116 returns NULL. */
117
118static struct btrace_thread_info *
119require_btrace (void)
120{
121 struct thread_info *tp;
122
123 tp = require_btrace_thread ();
124
125 return &tp->btrace;
afedecd3
MM
126}
127
128/* Enable branch tracing for one thread. Warn on errors. */
129
130static void
131record_btrace_enable_warn (struct thread_info *tp)
132{
133 volatile struct gdb_exception error;
134
135 TRY_CATCH (error, RETURN_MASK_ERROR)
136 btrace_enable (tp);
137
138 if (error.message != NULL)
139 warning ("%s", error.message);
140}
141
142/* Callback function to disable branch tracing for one thread. */
143
144static void
145record_btrace_disable_callback (void *arg)
146{
147 struct thread_info *tp;
148
149 tp = arg;
150
151 btrace_disable (tp);
152}
153
154/* Enable automatic tracing of new threads. */
155
156static void
157record_btrace_auto_enable (void)
158{
159 DEBUG ("attach thread observer");
160
161 record_btrace_thread_observer
162 = observer_attach_new_thread (record_btrace_enable_warn);
163}
164
165/* Disable automatic tracing of new threads. */
166
167static void
168record_btrace_auto_disable (void)
169{
170 /* The observer may have been detached, already. */
171 if (record_btrace_thread_observer == NULL)
172 return;
173
174 DEBUG ("detach thread observer");
175
176 observer_detach_new_thread (record_btrace_thread_observer);
177 record_btrace_thread_observer = NULL;
178}
179
70ad5bff
MM
180/* The record-btrace async event handler function. */
181
182static void
183record_btrace_handle_async_inferior_event (gdb_client_data data)
184{
185 inferior_event_handler (INF_REG_EVENT, NULL);
186}
187
afedecd3
MM
188/* The to_open method of target record-btrace. */
189
190static void
191record_btrace_open (char *args, int from_tty)
192{
193 struct cleanup *disable_chain;
194 struct thread_info *tp;
195
196 DEBUG ("open");
197
8213266a 198 record_preopen ();
afedecd3
MM
199
200 if (!target_has_execution)
201 error (_("The program is not being run."));
202
203 if (!target_supports_btrace ())
204 error (_("Target does not support branch tracing."));
205
52834460
MM
206 if (non_stop)
207 error (_("Record btrace can't debug inferior in non-stop mode."));
208
afedecd3
MM
209 gdb_assert (record_btrace_thread_observer == NULL);
210
211 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 212 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
213 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
214 {
215 btrace_enable (tp);
216
217 make_cleanup (record_btrace_disable_callback, tp);
218 }
219
220 record_btrace_auto_enable ();
221
222 push_target (&record_btrace_ops);
223
70ad5bff
MM
224 record_btrace_async_inferior_event_handler
225 = create_async_event_handler (record_btrace_handle_async_inferior_event,
226 NULL);
aef92902 227 record_btrace_generating_corefile = 0;
70ad5bff 228
afedecd3
MM
229 observer_notify_record_changed (current_inferior (), 1);
230
231 discard_cleanups (disable_chain);
232}
233
234/* The to_stop_recording method of target record-btrace. */
235
236static void
c6cd7c02 237record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
238{
239 struct thread_info *tp;
240
241 DEBUG ("stop recording");
242
243 record_btrace_auto_disable ();
244
034f788c 245 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
246 if (tp->btrace.target != NULL)
247 btrace_disable (tp);
248}
249
250/* The to_close method of target record-btrace. */
251
252static void
de90e03d 253record_btrace_close (struct target_ops *self)
afedecd3 254{
568e808b
MM
255 struct thread_info *tp;
256
70ad5bff
MM
257 if (record_btrace_async_inferior_event_handler != NULL)
258 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
259
99c819ee
MM
260 /* Make sure automatic recording gets disabled even if we did not stop
261 recording before closing the record-btrace target. */
262 record_btrace_auto_disable ();
263
568e808b
MM
264 /* We should have already stopped recording.
265 Tear down btrace in case we have not. */
034f788c 266 ALL_NON_EXITED_THREADS (tp)
568e808b 267 btrace_teardown (tp);
afedecd3
MM
268}
269
270/* The to_info_record method of target record-btrace. */
271
272static void
630d6a4a 273record_btrace_info (struct target_ops *self)
afedecd3
MM
274{
275 struct btrace_thread_info *btinfo;
276 struct thread_info *tp;
23a7fe75 277 unsigned int insns, calls;
afedecd3
MM
278
279 DEBUG ("info");
280
281 tp = find_thread_ptid (inferior_ptid);
282 if (tp == NULL)
283 error (_("No thread."));
284
285 btrace_fetch (tp);
286
23a7fe75
MM
287 insns = 0;
288 calls = 0;
289
afedecd3 290 btinfo = &tp->btrace;
6e07b1d2
MM
291
292 if (!btrace_is_empty (tp))
23a7fe75
MM
293 {
294 struct btrace_call_iterator call;
295 struct btrace_insn_iterator insn;
296
297 btrace_call_end (&call, btinfo);
298 btrace_call_prev (&call, 1);
5de9129b 299 calls = btrace_call_number (&call);
23a7fe75
MM
300
301 btrace_insn_end (&insn, btinfo);
302 btrace_insn_prev (&insn, 1);
5de9129b 303 insns = btrace_insn_number (&insn);
23a7fe75 304 }
afedecd3
MM
305
306 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 307 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 308 target_pid_to_str (tp->ptid));
07bbe694
MM
309
310 if (btrace_is_replaying (tp))
311 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
312 btrace_insn_number (btinfo->replay));
afedecd3
MM
313}
314
315/* Print an unsigned int. */
316
317static void
318ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
319{
320 ui_out_field_fmt (uiout, fld, "%u", val);
321}
322
323/* Disassemble a section of the recorded instruction trace. */
324
325static void
23a7fe75
MM
326btrace_insn_history (struct ui_out *uiout,
327 const struct btrace_insn_iterator *begin,
328 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
329{
330 struct gdbarch *gdbarch;
23a7fe75 331 struct btrace_insn_iterator it;
afedecd3 332
23a7fe75
MM
333 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
334 btrace_insn_number (end));
afedecd3
MM
335
336 gdbarch = target_gdbarch ();
337
23a7fe75 338 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 339 {
23a7fe75
MM
340 const struct btrace_insn *insn;
341
342 insn = btrace_insn_get (&it);
343
afedecd3 344 /* Print the instruction index. */
23a7fe75 345 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
346 ui_out_text (uiout, "\t");
347
348 /* Disassembly with '/m' flag may not produce the expected result.
349 See PR gdb/11833. */
23a7fe75 350 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
351 }
352}
353
354/* The to_insn_history method of target record-btrace. */
355
356static void
7a6c5609 357record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
358{
359 struct btrace_thread_info *btinfo;
23a7fe75
MM
360 struct btrace_insn_history *history;
361 struct btrace_insn_iterator begin, end;
afedecd3
MM
362 struct cleanup *uiout_cleanup;
363 struct ui_out *uiout;
23a7fe75 364 unsigned int context, covered;
afedecd3
MM
365
366 uiout = current_uiout;
367 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
368 "insn history");
afedecd3 369 context = abs (size);
afedecd3
MM
370 if (context == 0)
371 error (_("Bad record instruction-history-size."));
372
23a7fe75
MM
373 btinfo = require_btrace ();
374 history = btinfo->insn_history;
375 if (history == NULL)
afedecd3 376 {
07bbe694 377 struct btrace_insn_iterator *replay;
afedecd3 378
23a7fe75 379 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 380
07bbe694
MM
381 /* If we're replaying, we start at the replay position. Otherwise, we
382 start at the tail of the trace. */
383 replay = btinfo->replay;
384 if (replay != NULL)
385 begin = *replay;
386 else
387 btrace_insn_end (&begin, btinfo);
388
389 /* We start from here and expand in the requested direction. Then we
390 expand in the other direction, as well, to fill up any remaining
391 context. */
392 end = begin;
393 if (size < 0)
394 {
395 /* We want the current position covered, as well. */
396 covered = btrace_insn_next (&end, 1);
397 covered += btrace_insn_prev (&begin, context - covered);
398 covered += btrace_insn_next (&end, context - covered);
399 }
400 else
401 {
402 covered = btrace_insn_next (&end, context);
403 covered += btrace_insn_prev (&begin, context - covered);
404 }
afedecd3
MM
405 }
406 else
407 {
23a7fe75
MM
408 begin = history->begin;
409 end = history->end;
afedecd3 410
23a7fe75
MM
411 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
412 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 413
23a7fe75
MM
414 if (size < 0)
415 {
416 end = begin;
417 covered = btrace_insn_prev (&begin, context);
418 }
419 else
420 {
421 begin = end;
422 covered = btrace_insn_next (&end, context);
423 }
afedecd3
MM
424 }
425
23a7fe75
MM
426 if (covered > 0)
427 btrace_insn_history (uiout, &begin, &end, flags);
428 else
429 {
430 if (size < 0)
431 printf_unfiltered (_("At the start of the branch trace record.\n"));
432 else
433 printf_unfiltered (_("At the end of the branch trace record.\n"));
434 }
afedecd3 435
23a7fe75 436 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
437 do_cleanups (uiout_cleanup);
438}
439
440/* The to_insn_history_range method of target record-btrace. */
441
442static void
4e99c6b7
TT
443record_btrace_insn_history_range (struct target_ops *self,
444 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
445{
446 struct btrace_thread_info *btinfo;
23a7fe75
MM
447 struct btrace_insn_history *history;
448 struct btrace_insn_iterator begin, end;
afedecd3
MM
449 struct cleanup *uiout_cleanup;
450 struct ui_out *uiout;
23a7fe75
MM
451 unsigned int low, high;
452 int found;
afedecd3
MM
453
454 uiout = current_uiout;
455 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
456 "insn history");
23a7fe75
MM
457 low = from;
458 high = to;
afedecd3 459
23a7fe75 460 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
461
462 /* Check for wrap-arounds. */
23a7fe75 463 if (low != from || high != to)
afedecd3
MM
464 error (_("Bad range."));
465
0688d04e 466 if (high < low)
afedecd3
MM
467 error (_("Bad range."));
468
23a7fe75 469 btinfo = require_btrace ();
afedecd3 470
23a7fe75
MM
471 found = btrace_find_insn_by_number (&begin, btinfo, low);
472 if (found == 0)
473 error (_("Range out of bounds."));
afedecd3 474
23a7fe75
MM
475 found = btrace_find_insn_by_number (&end, btinfo, high);
476 if (found == 0)
0688d04e
MM
477 {
478 /* Silently truncate the range. */
479 btrace_insn_end (&end, btinfo);
480 }
481 else
482 {
483 /* We want both begin and end to be inclusive. */
484 btrace_insn_next (&end, 1);
485 }
afedecd3 486
23a7fe75
MM
487 btrace_insn_history (uiout, &begin, &end, flags);
488 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
489
490 do_cleanups (uiout_cleanup);
491}
492
493/* The to_insn_history_from method of target record-btrace. */
494
495static void
9abc3ff3
TT
496record_btrace_insn_history_from (struct target_ops *self,
497 ULONGEST from, int size, int flags)
afedecd3
MM
498{
499 ULONGEST begin, end, context;
500
501 context = abs (size);
0688d04e
MM
502 if (context == 0)
503 error (_("Bad record instruction-history-size."));
afedecd3
MM
504
505 if (size < 0)
506 {
507 end = from;
508
509 if (from < context)
510 begin = 0;
511 else
0688d04e 512 begin = from - context + 1;
afedecd3
MM
513 }
514 else
515 {
516 begin = from;
0688d04e 517 end = from + context - 1;
afedecd3
MM
518
519 /* Check for wrap-around. */
520 if (end < begin)
521 end = ULONGEST_MAX;
522 }
523
4e99c6b7 524 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
525}
526
527/* Print the instruction number range for a function call history line. */
528
529static void
23a7fe75
MM
530btrace_call_history_insn_range (struct ui_out *uiout,
531 const struct btrace_function *bfun)
afedecd3 532{
7acbe133
MM
533 unsigned int begin, end, size;
534
535 size = VEC_length (btrace_insn_s, bfun->insn);
536 gdb_assert (size > 0);
afedecd3 537
23a7fe75 538 begin = bfun->insn_offset;
7acbe133 539 end = begin + size - 1;
afedecd3 540
23a7fe75 541 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 542 ui_out_text (uiout, ",");
23a7fe75 543 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
544}
545
546/* Print the source line information for a function call history line. */
547
548static void
23a7fe75
MM
549btrace_call_history_src_line (struct ui_out *uiout,
550 const struct btrace_function *bfun)
afedecd3
MM
551{
552 struct symbol *sym;
23a7fe75 553 int begin, end;
afedecd3
MM
554
555 sym = bfun->sym;
556 if (sym == NULL)
557 return;
558
559 ui_out_field_string (uiout, "file",
560 symtab_to_filename_for_display (sym->symtab));
561
23a7fe75
MM
562 begin = bfun->lbegin;
563 end = bfun->lend;
564
565 if (end < begin)
afedecd3
MM
566 return;
567
568 ui_out_text (uiout, ":");
23a7fe75 569 ui_out_field_int (uiout, "min line", begin);
afedecd3 570
23a7fe75 571 if (end == begin)
afedecd3
MM
572 return;
573
8710b709 574 ui_out_text (uiout, ",");
23a7fe75 575 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
576}
577
0b722aec
MM
578/* Get the name of a branch trace function. */
579
580static const char *
581btrace_get_bfun_name (const struct btrace_function *bfun)
582{
583 struct minimal_symbol *msym;
584 struct symbol *sym;
585
586 if (bfun == NULL)
587 return "??";
588
589 msym = bfun->msym;
590 sym = bfun->sym;
591
592 if (sym != NULL)
593 return SYMBOL_PRINT_NAME (sym);
594 else if (msym != NULL)
efd66ac6 595 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
596 else
597 return "??";
598}
599
afedecd3
MM
600/* Disassemble a section of the recorded function trace. */
601
602static void
23a7fe75 603btrace_call_history (struct ui_out *uiout,
8710b709 604 const struct btrace_thread_info *btinfo,
23a7fe75
MM
605 const struct btrace_call_iterator *begin,
606 const struct btrace_call_iterator *end,
afedecd3
MM
607 enum record_print_flag flags)
608{
23a7fe75 609 struct btrace_call_iterator it;
afedecd3 610
23a7fe75
MM
611 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
612 btrace_call_number (end));
afedecd3 613
23a7fe75 614 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 615 {
23a7fe75
MM
616 const struct btrace_function *bfun;
617 struct minimal_symbol *msym;
618 struct symbol *sym;
619
620 bfun = btrace_call_get (&it);
23a7fe75 621 sym = bfun->sym;
0b722aec 622 msym = bfun->msym;
23a7fe75 623
afedecd3 624 /* Print the function index. */
23a7fe75 625 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
626 ui_out_text (uiout, "\t");
627
8710b709
MM
628 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
629 {
630 int level = bfun->level + btinfo->level, i;
631
632 for (i = 0; i < level; ++i)
633 ui_out_text (uiout, " ");
634 }
635
636 if (sym != NULL)
637 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
638 else if (msym != NULL)
efd66ac6 639 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
640 else if (!ui_out_is_mi_like_p (uiout))
641 ui_out_field_string (uiout, "function", "??");
642
1e038f67 643 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 644 {
8710b709 645 ui_out_text (uiout, _("\tinst "));
23a7fe75 646 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
647 }
648
1e038f67 649 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 650 {
8710b709 651 ui_out_text (uiout, _("\tat "));
23a7fe75 652 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
653 }
654
afedecd3
MM
655 ui_out_text (uiout, "\n");
656 }
657}
658
659/* The to_call_history method of target record-btrace. */
660
661static void
5df2fcba 662record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
663{
664 struct btrace_thread_info *btinfo;
23a7fe75
MM
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
afedecd3
MM
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
23a7fe75 669 unsigned int context, covered;
afedecd3
MM
670
671 uiout = current_uiout;
672 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
673 "insn history");
afedecd3 674 context = abs (size);
afedecd3
MM
675 if (context == 0)
676 error (_("Bad record function-call-history-size."));
677
23a7fe75
MM
678 btinfo = require_btrace ();
679 history = btinfo->call_history;
680 if (history == NULL)
afedecd3 681 {
07bbe694 682 struct btrace_insn_iterator *replay;
afedecd3 683
23a7fe75 684 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 685
07bbe694
MM
686 /* If we're replaying, we start at the replay position. Otherwise, we
687 start at the tail of the trace. */
688 replay = btinfo->replay;
689 if (replay != NULL)
690 {
691 begin.function = replay->function;
692 begin.btinfo = btinfo;
693 }
694 else
695 btrace_call_end (&begin, btinfo);
696
697 /* We start from here and expand in the requested direction. Then we
698 expand in the other direction, as well, to fill up any remaining
699 context. */
700 end = begin;
701 if (size < 0)
702 {
703 /* We want the current position covered, as well. */
704 covered = btrace_call_next (&end, 1);
705 covered += btrace_call_prev (&begin, context - covered);
706 covered += btrace_call_next (&end, context - covered);
707 }
708 else
709 {
710 covered = btrace_call_next (&end, context);
711 covered += btrace_call_prev (&begin, context- covered);
712 }
afedecd3
MM
713 }
714 else
715 {
23a7fe75
MM
716 begin = history->begin;
717 end = history->end;
afedecd3 718
23a7fe75
MM
719 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
720 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 721
23a7fe75
MM
722 if (size < 0)
723 {
724 end = begin;
725 covered = btrace_call_prev (&begin, context);
726 }
727 else
728 {
729 begin = end;
730 covered = btrace_call_next (&end, context);
731 }
afedecd3
MM
732 }
733
23a7fe75 734 if (covered > 0)
8710b709 735 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
736 else
737 {
738 if (size < 0)
739 printf_unfiltered (_("At the start of the branch trace record.\n"));
740 else
741 printf_unfiltered (_("At the end of the branch trace record.\n"));
742 }
afedecd3 743
23a7fe75 744 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
745 do_cleanups (uiout_cleanup);
746}
747
748/* The to_call_history_range method of target record-btrace. */
749
750static void
f0d960ea
TT
751record_btrace_call_history_range (struct target_ops *self,
752 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
753{
754 struct btrace_thread_info *btinfo;
23a7fe75
MM
755 struct btrace_call_history *history;
756 struct btrace_call_iterator begin, end;
afedecd3
MM
757 struct cleanup *uiout_cleanup;
758 struct ui_out *uiout;
23a7fe75
MM
759 unsigned int low, high;
760 int found;
afedecd3
MM
761
762 uiout = current_uiout;
763 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
764 "func history");
23a7fe75
MM
765 low = from;
766 high = to;
afedecd3 767
23a7fe75 768 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
769
770 /* Check for wrap-arounds. */
23a7fe75 771 if (low != from || high != to)
afedecd3
MM
772 error (_("Bad range."));
773
0688d04e 774 if (high < low)
afedecd3
MM
775 error (_("Bad range."));
776
23a7fe75 777 btinfo = require_btrace ();
afedecd3 778
23a7fe75
MM
779 found = btrace_find_call_by_number (&begin, btinfo, low);
780 if (found == 0)
781 error (_("Range out of bounds."));
afedecd3 782
23a7fe75
MM
783 found = btrace_find_call_by_number (&end, btinfo, high);
784 if (found == 0)
0688d04e
MM
785 {
786 /* Silently truncate the range. */
787 btrace_call_end (&end, btinfo);
788 }
789 else
790 {
791 /* We want both begin and end to be inclusive. */
792 btrace_call_next (&end, 1);
793 }
afedecd3 794
8710b709 795 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 796 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
797
798 do_cleanups (uiout_cleanup);
799}
800
801/* The to_call_history_from method of target record-btrace. */
802
803static void
ec0aea04
TT
804record_btrace_call_history_from (struct target_ops *self,
805 ULONGEST from, int size, int flags)
afedecd3
MM
806{
807 ULONGEST begin, end, context;
808
809 context = abs (size);
0688d04e
MM
810 if (context == 0)
811 error (_("Bad record function-call-history-size."));
afedecd3
MM
812
813 if (size < 0)
814 {
815 end = from;
816
817 if (from < context)
818 begin = 0;
819 else
0688d04e 820 begin = from - context + 1;
afedecd3
MM
821 }
822 else
823 {
824 begin = from;
0688d04e 825 end = from + context - 1;
afedecd3
MM
826
827 /* Check for wrap-around. */
828 if (end < begin)
829 end = ULONGEST_MAX;
830 }
831
f0d960ea 832 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
833}
834
07bbe694
MM
835/* The to_record_is_replaying method of target record-btrace. */
836
837static int
1c63c994 838record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
839{
840 struct thread_info *tp;
841
034f788c 842 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
843 if (btrace_is_replaying (tp))
844 return 1;
845
846 return 0;
847}
848
633785ff
MM
849/* The to_xfer_partial method of target record-btrace. */
850
9b409511 851static enum target_xfer_status
633785ff
MM
852record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
853 const char *annex, gdb_byte *readbuf,
854 const gdb_byte *writebuf, ULONGEST offset,
9b409511 855 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
856{
857 struct target_ops *t;
858
859 /* Filter out requests that don't make sense during replay. */
67b5c0c1 860 if (replay_memory_access == replay_memory_access_read_only
aef92902 861 && !record_btrace_generating_corefile
67b5c0c1 862 && record_btrace_is_replaying (ops))
633785ff
MM
863 {
864 switch (object)
865 {
866 case TARGET_OBJECT_MEMORY:
867 {
868 struct target_section *section;
869
870 /* We do not allow writing memory in general. */
871 if (writebuf != NULL)
9b409511
YQ
872 {
873 *xfered_len = len;
bc113b4e 874 return TARGET_XFER_UNAVAILABLE;
9b409511 875 }
633785ff
MM
876
877 /* We allow reading readonly memory. */
878 section = target_section_by_addr (ops, offset);
879 if (section != NULL)
880 {
881 /* Check if the section we found is readonly. */
882 if ((bfd_get_section_flags (section->the_bfd_section->owner,
883 section->the_bfd_section)
884 & SEC_READONLY) != 0)
885 {
886 /* Truncate the request to fit into this section. */
887 len = min (len, section->endaddr - offset);
888 break;
889 }
890 }
891
9b409511 892 *xfered_len = len;
bc113b4e 893 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
894 }
895 }
896 }
897
898 /* Forward the request. */
899 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
900 if (ops->to_xfer_partial != NULL)
901 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 902 offset, len, xfered_len);
633785ff 903
9b409511 904 *xfered_len = len;
bc113b4e 905 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
906}
907
908/* The to_insert_breakpoint method of target record-btrace. */
909
910static int
911record_btrace_insert_breakpoint (struct target_ops *ops,
912 struct gdbarch *gdbarch,
913 struct bp_target_info *bp_tgt)
914{
915 volatile struct gdb_exception except;
67b5c0c1
MM
916 const char *old;
917 int ret;
633785ff
MM
918
919 /* Inserting breakpoints requires accessing memory. Allow it for the
920 duration of this function. */
67b5c0c1
MM
921 old = replay_memory_access;
922 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
923
924 ret = 0;
925 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 926 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 927
67b5c0c1 928 replay_memory_access = old;
633785ff
MM
929
930 if (except.reason < 0)
931 throw_exception (except);
932
933 return ret;
934}
935
936/* The to_remove_breakpoint method of target record-btrace. */
937
938static int
939record_btrace_remove_breakpoint (struct target_ops *ops,
940 struct gdbarch *gdbarch,
941 struct bp_target_info *bp_tgt)
942{
943 volatile struct gdb_exception except;
67b5c0c1
MM
944 const char *old;
945 int ret;
633785ff
MM
946
947 /* Removing breakpoints requires accessing memory. Allow it for the
948 duration of this function. */
67b5c0c1
MM
949 old = replay_memory_access;
950 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
951
952 ret = 0;
953 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 954 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 955
67b5c0c1 956 replay_memory_access = old;
633785ff
MM
957
958 if (except.reason < 0)
959 throw_exception (except);
960
961 return ret;
962}
963
1f3ef581
MM
964/* The to_fetch_registers method of target record-btrace. */
965
966static void
967record_btrace_fetch_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
969{
970 struct btrace_insn_iterator *replay;
971 struct thread_info *tp;
972
973 tp = find_thread_ptid (inferior_ptid);
974 gdb_assert (tp != NULL);
975
976 replay = tp->btrace.replay;
aef92902 977 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
978 {
979 const struct btrace_insn *insn;
980 struct gdbarch *gdbarch;
981 int pcreg;
982
983 gdbarch = get_regcache_arch (regcache);
984 pcreg = gdbarch_pc_regnum (gdbarch);
985 if (pcreg < 0)
986 return;
987
988 /* We can only provide the PC register. */
989 if (regno >= 0 && regno != pcreg)
990 return;
991
992 insn = btrace_insn_get (replay);
993 gdb_assert (insn != NULL);
994
995 regcache_raw_supply (regcache, regno, &insn->pc);
996 }
997 else
998 {
999 struct target_ops *t;
1000
1001 for (t = ops->beneath; t != NULL; t = t->beneath)
1002 if (t->to_fetch_registers != NULL)
1003 {
1004 t->to_fetch_registers (t, regcache, regno);
1005 break;
1006 }
1007 }
1008}
1009
1010/* The to_store_registers method of target record-btrace. */
1011
1012static void
1013record_btrace_store_registers (struct target_ops *ops,
1014 struct regcache *regcache, int regno)
1015{
1016 struct target_ops *t;
1017
aef92902 1018 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1019 error (_("This record target does not allow writing registers."));
1020
1021 gdb_assert (may_write_registers != 0);
1022
1023 for (t = ops->beneath; t != NULL; t = t->beneath)
1024 if (t->to_store_registers != NULL)
1025 {
1026 t->to_store_registers (t, regcache, regno);
1027 return;
1028 }
1029
1030 noprocess ();
1031}
1032
1033/* The to_prepare_to_store method of target record-btrace. */
1034
1035static void
1036record_btrace_prepare_to_store (struct target_ops *ops,
1037 struct regcache *regcache)
1038{
1039 struct target_ops *t;
1040
aef92902 1041 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1042 return;
1043
1044 for (t = ops->beneath; t != NULL; t = t->beneath)
1045 if (t->to_prepare_to_store != NULL)
1046 {
1047 t->to_prepare_to_store (t, regcache);
1048 return;
1049 }
1050}
1051
0b722aec
MM
1052/* The branch trace frame cache. */
1053
1054struct btrace_frame_cache
1055{
1056 /* The thread. */
1057 struct thread_info *tp;
1058
1059 /* The frame info. */
1060 struct frame_info *frame;
1061
1062 /* The branch trace function segment. */
1063 const struct btrace_function *bfun;
1064};
1065
1066/* A struct btrace_frame_cache hash table indexed by NEXT. */
1067
1068static htab_t bfcache;
1069
1070/* hash_f for htab_create_alloc of bfcache. */
1071
1072static hashval_t
1073bfcache_hash (const void *arg)
1074{
1075 const struct btrace_frame_cache *cache = arg;
1076
1077 return htab_hash_pointer (cache->frame);
1078}
1079
1080/* eq_f for htab_create_alloc of bfcache. */
1081
1082static int
1083bfcache_eq (const void *arg1, const void *arg2)
1084{
1085 const struct btrace_frame_cache *cache1 = arg1;
1086 const struct btrace_frame_cache *cache2 = arg2;
1087
1088 return cache1->frame == cache2->frame;
1089}
1090
1091/* Create a new btrace frame cache. */
1092
1093static struct btrace_frame_cache *
1094bfcache_new (struct frame_info *frame)
1095{
1096 struct btrace_frame_cache *cache;
1097 void **slot;
1098
1099 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1100 cache->frame = frame;
1101
1102 slot = htab_find_slot (bfcache, cache, INSERT);
1103 gdb_assert (*slot == NULL);
1104 *slot = cache;
1105
1106 return cache;
1107}
1108
1109/* Extract the branch trace function from a branch trace frame. */
1110
1111static const struct btrace_function *
1112btrace_get_frame_function (struct frame_info *frame)
1113{
1114 const struct btrace_frame_cache *cache;
1115 const struct btrace_function *bfun;
1116 struct btrace_frame_cache pattern;
1117 void **slot;
1118
1119 pattern.frame = frame;
1120
1121 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1122 if (slot == NULL)
1123 return NULL;
1124
1125 cache = *slot;
1126 return cache->bfun;
1127}
1128
cecac1ab
MM
1129/* Implement stop_reason method for record_btrace_frame_unwind. */
1130
1131static enum unwind_stop_reason
1132record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1133 void **this_cache)
1134{
0b722aec
MM
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun;
1137
1138 cache = *this_cache;
1139 bfun = cache->bfun;
1140 gdb_assert (bfun != NULL);
1141
1142 if (bfun->up == NULL)
1143 return UNWIND_UNAVAILABLE;
1144
1145 return UNWIND_NO_REASON;
cecac1ab
MM
1146}
1147
1148/* Implement this_id method for record_btrace_frame_unwind. */
1149
1150static void
1151record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1152 struct frame_id *this_id)
1153{
0b722aec
MM
1154 const struct btrace_frame_cache *cache;
1155 const struct btrace_function *bfun;
1156 CORE_ADDR code, special;
1157
1158 cache = *this_cache;
1159
1160 bfun = cache->bfun;
1161 gdb_assert (bfun != NULL);
1162
1163 while (bfun->segment.prev != NULL)
1164 bfun = bfun->segment.prev;
1165
1166 code = get_frame_func (this_frame);
1167 special = bfun->number;
1168
1169 *this_id = frame_id_build_unavailable_stack_special (code, special);
1170
1171 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1172 btrace_get_bfun_name (cache->bfun),
1173 core_addr_to_string_nz (this_id->code_addr),
1174 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1175}
1176
1177/* Implement prev_register method for record_btrace_frame_unwind. */
1178
1179static struct value *
1180record_btrace_frame_prev_register (struct frame_info *this_frame,
1181 void **this_cache,
1182 int regnum)
1183{
0b722aec
MM
1184 const struct btrace_frame_cache *cache;
1185 const struct btrace_function *bfun, *caller;
1186 const struct btrace_insn *insn;
1187 struct gdbarch *gdbarch;
1188 CORE_ADDR pc;
1189 int pcreg;
1190
1191 gdbarch = get_frame_arch (this_frame);
1192 pcreg = gdbarch_pc_regnum (gdbarch);
1193 if (pcreg < 0 || regnum != pcreg)
1194 throw_error (NOT_AVAILABLE_ERROR,
1195 _("Registers are not available in btrace record history"));
1196
1197 cache = *this_cache;
1198 bfun = cache->bfun;
1199 gdb_assert (bfun != NULL);
1200
1201 caller = bfun->up;
1202 if (caller == NULL)
1203 throw_error (NOT_AVAILABLE_ERROR,
1204 _("No caller in btrace record history"));
1205
1206 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1207 {
1208 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1209 pc = insn->pc;
1210 }
1211 else
1212 {
1213 insn = VEC_last (btrace_insn_s, caller->insn);
1214 pc = insn->pc;
1215
1216 pc += gdb_insn_length (gdbarch, pc);
1217 }
1218
1219 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1220 btrace_get_bfun_name (bfun), bfun->level,
1221 core_addr_to_string_nz (pc));
1222
1223 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1224}
1225
1226/* Implement sniffer method for record_btrace_frame_unwind. */
1227
1228static int
1229record_btrace_frame_sniffer (const struct frame_unwind *self,
1230 struct frame_info *this_frame,
1231 void **this_cache)
1232{
0b722aec
MM
1233 const struct btrace_function *bfun;
1234 struct btrace_frame_cache *cache;
cecac1ab 1235 struct thread_info *tp;
0b722aec 1236 struct frame_info *next;
cecac1ab
MM
1237
1238 /* THIS_FRAME does not contain a reference to its thread. */
1239 tp = find_thread_ptid (inferior_ptid);
1240 gdb_assert (tp != NULL);
1241
0b722aec
MM
1242 bfun = NULL;
1243 next = get_next_frame (this_frame);
1244 if (next == NULL)
1245 {
1246 const struct btrace_insn_iterator *replay;
1247
1248 replay = tp->btrace.replay;
1249 if (replay != NULL)
1250 bfun = replay->function;
1251 }
1252 else
1253 {
1254 const struct btrace_function *callee;
1255
1256 callee = btrace_get_frame_function (next);
1257 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1258 bfun = callee->up;
1259 }
1260
1261 if (bfun == NULL)
1262 return 0;
1263
1264 DEBUG ("[frame] sniffed frame for %s on level %d",
1265 btrace_get_bfun_name (bfun), bfun->level);
1266
1267 /* This is our frame. Initialize the frame cache. */
1268 cache = bfcache_new (this_frame);
1269 cache->tp = tp;
1270 cache->bfun = bfun;
1271
1272 *this_cache = cache;
1273 return 1;
1274}
1275
1276/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1277
1278static int
1279record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1280 struct frame_info *this_frame,
1281 void **this_cache)
1282{
1283 const struct btrace_function *bfun, *callee;
1284 struct btrace_frame_cache *cache;
1285 struct frame_info *next;
1286
1287 next = get_next_frame (this_frame);
1288 if (next == NULL)
1289 return 0;
1290
1291 callee = btrace_get_frame_function (next);
1292 if (callee == NULL)
1293 return 0;
1294
1295 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1296 return 0;
1297
1298 bfun = callee->up;
1299 if (bfun == NULL)
1300 return 0;
1301
1302 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1303 btrace_get_bfun_name (bfun), bfun->level);
1304
1305 /* This is our frame. Initialize the frame cache. */
1306 cache = bfcache_new (this_frame);
1307 cache->tp = find_thread_ptid (inferior_ptid);
1308 cache->bfun = bfun;
1309
1310 *this_cache = cache;
1311 return 1;
1312}
1313
1314static void
1315record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1316{
1317 struct btrace_frame_cache *cache;
1318 void **slot;
1319
1320 cache = this_cache;
1321
1322 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1323 gdb_assert (slot != NULL);
1324
1325 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1326}
1327
1328/* btrace recording does not store previous memory content, neither the stack
1329 frames content. Any unwinding would return errorneous results as the stack
1330 contents no longer matches the changed PC value restored from history.
1331 Therefore this unwinder reports any possibly unwound registers as
1332 <unavailable>. */
1333
0b722aec 1334const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1335{
1336 NORMAL_FRAME,
1337 record_btrace_frame_unwind_stop_reason,
1338 record_btrace_frame_this_id,
1339 record_btrace_frame_prev_register,
1340 NULL,
0b722aec
MM
1341 record_btrace_frame_sniffer,
1342 record_btrace_frame_dealloc_cache
1343};
1344
1345const struct frame_unwind record_btrace_tailcall_frame_unwind =
1346{
1347 TAILCALL_FRAME,
1348 record_btrace_frame_unwind_stop_reason,
1349 record_btrace_frame_this_id,
1350 record_btrace_frame_prev_register,
1351 NULL,
1352 record_btrace_tailcall_frame_sniffer,
1353 record_btrace_frame_dealloc_cache
cecac1ab 1354};
b2f4cfde 1355
ac01945b
TT
1356/* Implement the to_get_unwinder method. */
1357
1358static const struct frame_unwind *
1359record_btrace_to_get_unwinder (struct target_ops *self)
1360{
1361 return &record_btrace_frame_unwind;
1362}
1363
1364/* Implement the to_get_tailcall_unwinder method. */
1365
1366static const struct frame_unwind *
1367record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1368{
1369 return &record_btrace_tailcall_frame_unwind;
1370}
1371
52834460
MM
1372/* Indicate that TP should be resumed according to FLAG. */
1373
1374static void
1375record_btrace_resume_thread (struct thread_info *tp,
1376 enum btrace_thread_flag flag)
1377{
1378 struct btrace_thread_info *btinfo;
1379
1380 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1381
1382 btinfo = &tp->btrace;
1383
1384 if ((btinfo->flags & BTHR_MOVE) != 0)
1385 error (_("Thread already moving."));
1386
1387 /* Fetch the latest branch trace. */
1388 btrace_fetch (tp);
1389
1390 btinfo->flags |= flag;
1391}
1392
1393/* Find the thread to resume given a PTID. */
1394
1395static struct thread_info *
1396record_btrace_find_resume_thread (ptid_t ptid)
1397{
1398 struct thread_info *tp;
1399
1400 /* When asked to resume everything, we pick the current thread. */
1401 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1402 ptid = inferior_ptid;
1403
1404 return find_thread_ptid (ptid);
1405}
1406
1407/* Start replaying a thread. */
1408
1409static struct btrace_insn_iterator *
1410record_btrace_start_replaying (struct thread_info *tp)
1411{
1412 volatile struct gdb_exception except;
1413 struct btrace_insn_iterator *replay;
1414 struct btrace_thread_info *btinfo;
1415 int executing;
1416
1417 btinfo = &tp->btrace;
1418 replay = NULL;
1419
1420 /* We can't start replaying without trace. */
1421 if (btinfo->begin == NULL)
1422 return NULL;
1423
1424 /* Clear the executing flag to allow changes to the current frame.
1425 We are not actually running, yet. We just started a reverse execution
1426 command or a record goto command.
1427 For the latter, EXECUTING is false and this has no effect.
1428 For the former, EXECUTING is true and we're in to_wait, about to
1429 move the thread. Since we need to recompute the stack, we temporarily
1430 set EXECUTING to flase. */
1431 executing = is_executing (tp->ptid);
1432 set_executing (tp->ptid, 0);
1433
1434 /* GDB stores the current frame_id when stepping in order to detects steps
1435 into subroutines.
1436 Since frames are computed differently when we're replaying, we need to
1437 recompute those stored frames and fix them up so we can still detect
1438 subroutines after we started replaying. */
1439 TRY_CATCH (except, RETURN_MASK_ALL)
1440 {
1441 struct frame_info *frame;
1442 struct frame_id frame_id;
1443 int upd_step_frame_id, upd_step_stack_frame_id;
1444
1445 /* The current frame without replaying - computed via normal unwind. */
1446 frame = get_current_frame ();
1447 frame_id = get_frame_id (frame);
1448
1449 /* Check if we need to update any stepping-related frame id's. */
1450 upd_step_frame_id = frame_id_eq (frame_id,
1451 tp->control.step_frame_id);
1452 upd_step_stack_frame_id = frame_id_eq (frame_id,
1453 tp->control.step_stack_frame_id);
1454
1455 /* We start replaying at the end of the branch trace. This corresponds
1456 to the current instruction. */
1457 replay = xmalloc (sizeof (*replay));
1458 btrace_insn_end (replay, btinfo);
1459
1460 /* We're not replaying, yet. */
1461 gdb_assert (btinfo->replay == NULL);
1462 btinfo->replay = replay;
1463
1464 /* Make sure we're not using any stale registers. */
1465 registers_changed_ptid (tp->ptid);
1466
1467 /* The current frame with replaying - computed via btrace unwind. */
1468 frame = get_current_frame ();
1469 frame_id = get_frame_id (frame);
1470
1471 /* Replace stepping related frames where necessary. */
1472 if (upd_step_frame_id)
1473 tp->control.step_frame_id = frame_id;
1474 if (upd_step_stack_frame_id)
1475 tp->control.step_stack_frame_id = frame_id;
1476 }
1477
1478 /* Restore the previous execution state. */
1479 set_executing (tp->ptid, executing);
1480
1481 if (except.reason < 0)
1482 {
1483 xfree (btinfo->replay);
1484 btinfo->replay = NULL;
1485
1486 registers_changed_ptid (tp->ptid);
1487
1488 throw_exception (except);
1489 }
1490
1491 return replay;
1492}
1493
1494/* Stop replaying a thread. */
1495
1496static void
1497record_btrace_stop_replaying (struct thread_info *tp)
1498{
1499 struct btrace_thread_info *btinfo;
1500
1501 btinfo = &tp->btrace;
1502
1503 xfree (btinfo->replay);
1504 btinfo->replay = NULL;
1505
1506 /* Make sure we're not leaving any stale registers. */
1507 registers_changed_ptid (tp->ptid);
1508}
1509
b2f4cfde
MM
1510/* The to_resume method of target record-btrace. */
1511
1512static void
1513record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1514 enum gdb_signal signal)
1515{
52834460
MM
1516 struct thread_info *tp, *other;
1517 enum btrace_thread_flag flag;
1518
1519 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1520
70ad5bff
MM
1521 /* Store the execution direction of the last resume. */
1522 record_btrace_resume_exec_dir = execution_direction;
1523
52834460
MM
1524 tp = record_btrace_find_resume_thread (ptid);
1525 if (tp == NULL)
1526 error (_("Cannot find thread to resume."));
1527
1528 /* Stop replaying other threads if the thread to resume is not replaying. */
1529 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1530 ALL_NON_EXITED_THREADS (other)
52834460
MM
1531 record_btrace_stop_replaying (other);
1532
b2f4cfde 1533 /* As long as we're not replaying, just forward the request. */
1c63c994 1534 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1535 {
1536 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1537 if (ops->to_resume != NULL)
1538 return ops->to_resume (ops, ptid, step, signal);
1539
1540 error (_("Cannot find target for stepping."));
1541 }
1542
52834460
MM
1543 /* Compute the btrace thread flag for the requested move. */
1544 if (step == 0)
1545 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1546 else
1547 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1548
1549 /* At the moment, we only move a single thread. We could also move
1550 all threads in parallel by single-stepping each resumed thread
1551 until the first runs into an event.
1552 When we do that, we would want to continue all other threads.
1553 For now, just resume one thread to not confuse to_wait. */
1554 record_btrace_resume_thread (tp, flag);
1555
1556 /* We just indicate the resume intent here. The actual stepping happens in
1557 record_btrace_wait below. */
70ad5bff
MM
1558
1559 /* Async support. */
1560 if (target_can_async_p ())
1561 {
1562 target_async (inferior_event_handler, 0);
1563 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1564 }
52834460
MM
1565}
1566
1567/* Find a thread to move. */
1568
1569static struct thread_info *
1570record_btrace_find_thread_to_move (ptid_t ptid)
1571{
1572 struct thread_info *tp;
1573
1574 /* First check the parameter thread. */
1575 tp = find_thread_ptid (ptid);
1576 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1577 return tp;
1578
1579 /* Otherwise, find one other thread that has been resumed. */
034f788c 1580 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1581 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1582 return tp;
1583
1584 return NULL;
1585}
1586
1587/* Return a target_waitstatus indicating that we ran out of history. */
1588
1589static struct target_waitstatus
1590btrace_step_no_history (void)
1591{
1592 struct target_waitstatus status;
1593
1594 status.kind = TARGET_WAITKIND_NO_HISTORY;
1595
1596 return status;
1597}
1598
1599/* Return a target_waitstatus indicating that a step finished. */
1600
1601static struct target_waitstatus
1602btrace_step_stopped (void)
1603{
1604 struct target_waitstatus status;
1605
1606 status.kind = TARGET_WAITKIND_STOPPED;
1607 status.value.sig = GDB_SIGNAL_TRAP;
1608
1609 return status;
1610}
1611
1612/* Clear the record histories. */
1613
1614static void
1615record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1616{
1617 xfree (btinfo->insn_history);
1618 xfree (btinfo->call_history);
1619
1620 btinfo->insn_history = NULL;
1621 btinfo->call_history = NULL;
1622}
1623
1624/* Step a single thread. */
1625
1626static struct target_waitstatus
1627record_btrace_step_thread (struct thread_info *tp)
1628{
1629 struct btrace_insn_iterator *replay, end;
1630 struct btrace_thread_info *btinfo;
1631 struct address_space *aspace;
1632 struct inferior *inf;
1633 enum btrace_thread_flag flags;
1634 unsigned int steps;
1635
e59fa00f
MM
1636 /* We can't step without an execution history. */
1637 if (btrace_is_empty (tp))
1638 return btrace_step_no_history ();
1639
52834460
MM
1640 btinfo = &tp->btrace;
1641 replay = btinfo->replay;
1642
1643 flags = btinfo->flags & BTHR_MOVE;
1644 btinfo->flags &= ~BTHR_MOVE;
1645
1646 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1647
1648 switch (flags)
1649 {
1650 default:
1651 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1652
1653 case BTHR_STEP:
1654 /* We're done if we're not replaying. */
1655 if (replay == NULL)
1656 return btrace_step_no_history ();
1657
1658 /* We are always able to step at least once. */
1659 steps = btrace_insn_next (replay, 1);
1660 gdb_assert (steps == 1);
1661
1662 /* Determine the end of the instruction trace. */
1663 btrace_insn_end (&end, btinfo);
1664
1665 /* We stop replaying if we reached the end of the trace. */
1666 if (btrace_insn_cmp (replay, &end) == 0)
1667 record_btrace_stop_replaying (tp);
1668
1669 return btrace_step_stopped ();
1670
1671 case BTHR_RSTEP:
1672 /* Start replaying if we're not already doing so. */
1673 if (replay == NULL)
1674 replay = record_btrace_start_replaying (tp);
1675
1676 /* If we can't step any further, we reached the end of the history. */
1677 steps = btrace_insn_prev (replay, 1);
1678 if (steps == 0)
1679 return btrace_step_no_history ();
1680
1681 return btrace_step_stopped ();
1682
1683 case BTHR_CONT:
1684 /* We're done if we're not replaying. */
1685 if (replay == NULL)
1686 return btrace_step_no_history ();
1687
1688 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1689 aspace = inf->aspace;
1690
1691 /* Determine the end of the instruction trace. */
1692 btrace_insn_end (&end, btinfo);
1693
1694 for (;;)
1695 {
1696 const struct btrace_insn *insn;
1697
1698 /* We are always able to step at least once. */
1699 steps = btrace_insn_next (replay, 1);
1700 gdb_assert (steps == 1);
1701
1702 /* We stop replaying if we reached the end of the trace. */
1703 if (btrace_insn_cmp (replay, &end) == 0)
1704 {
1705 record_btrace_stop_replaying (tp);
1706 return btrace_step_no_history ();
1707 }
1708
1709 insn = btrace_insn_get (replay);
1710 gdb_assert (insn);
1711
1712 DEBUG ("stepping %d (%s) ... %s", tp->num,
1713 target_pid_to_str (tp->ptid),
1714 core_addr_to_string_nz (insn->pc));
1715
1716 if (breakpoint_here_p (aspace, insn->pc))
1717 return btrace_step_stopped ();
1718 }
1719
1720 case BTHR_RCONT:
1721 /* Start replaying if we're not already doing so. */
1722 if (replay == NULL)
1723 replay = record_btrace_start_replaying (tp);
1724
1725 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1726 aspace = inf->aspace;
1727
1728 for (;;)
1729 {
1730 const struct btrace_insn *insn;
1731
1732 /* If we can't step any further, we're done. */
1733 steps = btrace_insn_prev (replay, 1);
1734 if (steps == 0)
1735 return btrace_step_no_history ();
1736
1737 insn = btrace_insn_get (replay);
1738 gdb_assert (insn);
1739
1740 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1741 target_pid_to_str (tp->ptid),
1742 core_addr_to_string_nz (insn->pc));
1743
1744 if (breakpoint_here_p (aspace, insn->pc))
1745 return btrace_step_stopped ();
1746 }
1747 }
b2f4cfde
MM
1748}
1749
1750/* The to_wait method of target record-btrace. */
1751
1752static ptid_t
1753record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1754 struct target_waitstatus *status, int options)
1755{
52834460
MM
1756 struct thread_info *tp, *other;
1757
1758 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1759
b2f4cfde 1760 /* As long as we're not replaying, just forward the request. */
1c63c994 1761 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1762 {
1763 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1764 if (ops->to_wait != NULL)
1765 return ops->to_wait (ops, ptid, status, options);
1766
1767 error (_("Cannot find target for waiting."));
1768 }
1769
52834460
MM
1770 /* Let's find a thread to move. */
1771 tp = record_btrace_find_thread_to_move (ptid);
1772 if (tp == NULL)
1773 {
1774 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1775
1776 status->kind = TARGET_WAITKIND_IGNORE;
1777 return minus_one_ptid;
1778 }
1779
1780 /* We only move a single thread. We're not able to correlate threads. */
1781 *status = record_btrace_step_thread (tp);
1782
1783 /* Stop all other threads. */
1784 if (!non_stop)
034f788c 1785 ALL_NON_EXITED_THREADS (other)
52834460
MM
1786 other->btrace.flags &= ~BTHR_MOVE;
1787
1788 /* Start record histories anew from the current position. */
1789 record_btrace_clear_histories (&tp->btrace);
1790
1791 /* We moved the replay position but did not update registers. */
1792 registers_changed_ptid (tp->ptid);
1793
1794 return tp->ptid;
1795}
1796
1797/* The to_can_execute_reverse method of target record-btrace. */
1798
1799static int
19db3e69 1800record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1801{
1802 return 1;
1803}
1804
1805/* The to_decr_pc_after_break method of target record-btrace. */
1806
1807static CORE_ADDR
1808record_btrace_decr_pc_after_break (struct target_ops *ops,
1809 struct gdbarch *gdbarch)
1810{
1811 /* When replaying, we do not actually execute the breakpoint instruction
1812 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1813 if (record_btrace_is_replaying (ops))
52834460
MM
1814 return 0;
1815
c0eca49f 1816 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1817}
1818
e2887aa3
MM
1819/* The to_find_new_threads method of target record-btrace. */
1820
1821static void
1822record_btrace_find_new_threads (struct target_ops *ops)
1823{
1824 /* Don't expect new threads if we're replaying. */
1c63c994 1825 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1826 return;
1827
1828 /* Forward the request. */
1829 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1830 if (ops->to_find_new_threads != NULL)
1831 {
1832 ops->to_find_new_threads (ops);
1833 break;
1834 }
1835}
1836
1837/* The to_thread_alive method of target record-btrace. */
1838
1839static int
1840record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1841{
1842 /* We don't add or remove threads during replay. */
1c63c994 1843 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1844 return find_thread_ptid (ptid) != NULL;
1845
1846 /* Forward the request. */
1847 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1848 if (ops->to_thread_alive != NULL)
1849 return ops->to_thread_alive (ops, ptid);
1850
1851 return 0;
1852}
1853
066ce621
MM
1854/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1855 is stopped. */
1856
1857static void
1858record_btrace_set_replay (struct thread_info *tp,
1859 const struct btrace_insn_iterator *it)
1860{
1861 struct btrace_thread_info *btinfo;
1862
1863 btinfo = &tp->btrace;
1864
1865 if (it == NULL || it->function == NULL)
52834460 1866 record_btrace_stop_replaying (tp);
066ce621
MM
1867 else
1868 {
1869 if (btinfo->replay == NULL)
52834460 1870 record_btrace_start_replaying (tp);
066ce621
MM
1871 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1872 return;
1873
1874 *btinfo->replay = *it;
52834460 1875 registers_changed_ptid (tp->ptid);
066ce621
MM
1876 }
1877
52834460
MM
1878 /* Start anew from the new replay position. */
1879 record_btrace_clear_histories (btinfo);
066ce621
MM
1880}
1881
1882/* The to_goto_record_begin method of target record-btrace. */
1883
1884static void
08475817 1885record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1886{
1887 struct thread_info *tp;
1888 struct btrace_insn_iterator begin;
1889
1890 tp = require_btrace_thread ();
1891
1892 btrace_insn_begin (&begin, &tp->btrace);
1893 record_btrace_set_replay (tp, &begin);
1894
1895 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1896}
1897
1898/* The to_goto_record_end method of target record-btrace. */
1899
1900static void
307a1b91 1901record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1902{
1903 struct thread_info *tp;
1904
1905 tp = require_btrace_thread ();
1906
1907 record_btrace_set_replay (tp, NULL);
1908
1909 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1910}
1911
1912/* The to_goto_record method of target record-btrace. */
1913
1914static void
606183ac 1915record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1916{
1917 struct thread_info *tp;
1918 struct btrace_insn_iterator it;
1919 unsigned int number;
1920 int found;
1921
1922 number = insn;
1923
1924 /* Check for wrap-arounds. */
1925 if (number != insn)
1926 error (_("Instruction number out of range."));
1927
1928 tp = require_btrace_thread ();
1929
1930 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1931 if (found == 0)
1932 error (_("No such instruction."));
1933
1934 record_btrace_set_replay (tp, &it);
1935
1936 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1937}
1938
70ad5bff
MM
1939/* The to_execution_direction target method. */
1940
1941static enum exec_direction_kind
1942record_btrace_execution_direction (struct target_ops *self)
1943{
1944 return record_btrace_resume_exec_dir;
1945}
1946
aef92902
MM
1947/* The to_prepare_to_generate_core target method. */
1948
1949static void
1950record_btrace_prepare_to_generate_core (struct target_ops *self)
1951{
1952 record_btrace_generating_corefile = 1;
1953}
1954
1955/* The to_done_generating_core target method. */
1956
1957static void
1958record_btrace_done_generating_core (struct target_ops *self)
1959{
1960 record_btrace_generating_corefile = 0;
1961}
1962
afedecd3
MM
1963/* Initialize the record-btrace target ops. */
1964
1965static void
1966init_record_btrace_ops (void)
1967{
1968 struct target_ops *ops;
1969
1970 ops = &record_btrace_ops;
1971 ops->to_shortname = "record-btrace";
1972 ops->to_longname = "Branch tracing target";
1973 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1974 ops->to_open = record_btrace_open;
1975 ops->to_close = record_btrace_close;
1976 ops->to_detach = record_detach;
1977 ops->to_disconnect = record_disconnect;
1978 ops->to_mourn_inferior = record_mourn_inferior;
1979 ops->to_kill = record_kill;
afedecd3
MM
1980 ops->to_stop_recording = record_btrace_stop_recording;
1981 ops->to_info_record = record_btrace_info;
1982 ops->to_insn_history = record_btrace_insn_history;
1983 ops->to_insn_history_from = record_btrace_insn_history_from;
1984 ops->to_insn_history_range = record_btrace_insn_history_range;
1985 ops->to_call_history = record_btrace_call_history;
1986 ops->to_call_history_from = record_btrace_call_history_from;
1987 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1988 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1989 ops->to_xfer_partial = record_btrace_xfer_partial;
1990 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1991 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1992 ops->to_fetch_registers = record_btrace_fetch_registers;
1993 ops->to_store_registers = record_btrace_store_registers;
1994 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1995 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1996 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1997 ops->to_resume = record_btrace_resume;
1998 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1999 ops->to_find_new_threads = record_btrace_find_new_threads;
2000 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2001 ops->to_goto_record_begin = record_btrace_goto_begin;
2002 ops->to_goto_record_end = record_btrace_goto_end;
2003 ops->to_goto_record = record_btrace_goto;
52834460
MM
2004 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2005 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 2006 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2007 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2008 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2009 ops->to_stratum = record_stratum;
2010 ops->to_magic = OPS_MAGIC;
2011}
2012
2013/* Alias for "target record". */
2014
2015static void
2016cmd_record_btrace_start (char *args, int from_tty)
2017{
2018 if (args != NULL && *args != 0)
2019 error (_("Invalid argument."));
2020
2021 execute_command ("target record-btrace", from_tty);
2022}
2023
67b5c0c1
MM
2024/* The "set record btrace" command. */
2025
2026static void
2027cmd_set_record_btrace (char *args, int from_tty)
2028{
2029 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2030}
2031
2032/* The "show record btrace" command. */
2033
2034static void
2035cmd_show_record_btrace (char *args, int from_tty)
2036{
2037 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2038}
2039
2040/* The "show record btrace replay-memory-access" command. */
2041
2042static void
2043cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2044 struct cmd_list_element *c, const char *value)
2045{
2046 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2047 replay_memory_access);
2048}
2049
afedecd3
MM
2050void _initialize_record_btrace (void);
2051
2052/* Initialize btrace commands. */
2053
2054void
2055_initialize_record_btrace (void)
2056{
2057 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2058 _("Start branch trace recording."),
2059 &record_cmdlist);
2060 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2061
67b5c0c1
MM
2062 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2063 _("Set record options"), &set_record_btrace_cmdlist,
2064 "set record btrace ", 0, &set_record_cmdlist);
2065
2066 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2067 _("Show record options"), &show_record_btrace_cmdlist,
2068 "show record btrace ", 0, &show_record_cmdlist);
2069
2070 add_setshow_enum_cmd ("replay-memory-access", no_class,
2071 replay_memory_access_types, &replay_memory_access, _("\
2072Set what memory accesses are allowed during replay."), _("\
2073Show what memory accesses are allowed during replay."),
2074 _("Default is READ-ONLY.\n\n\
2075The btrace record target does not trace data.\n\
2076The memory therefore corresponds to the live target and not \
2077to the current replay position.\n\n\
2078When READ-ONLY, allow accesses to read-only memory during replay.\n\
2079When READ-WRITE, allow accesses to read-only and read-write memory during \
2080replay."),
2081 NULL, cmd_show_replay_memory_access,
2082 &set_record_btrace_cmdlist,
2083 &show_record_btrace_cmdlist);
2084
afedecd3
MM
2085 init_record_btrace_ops ();
2086 add_target (&record_btrace_ops);
0b722aec
MM
2087
2088 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2089 xcalloc, xfree);
afedecd3 2090}
This page took 0.328204 seconds and 4 git commands to generate.