Don't redefine upload/download/file in gdbserver-base
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
afedecd3
MM
162/* Enable automatic tracing of new threads. */
163
164static void
165record_btrace_auto_enable (void)
166{
167 DEBUG ("attach thread observer");
168
169 record_btrace_thread_observer
170 = observer_attach_new_thread (record_btrace_enable_warn);
171}
172
173/* Disable automatic tracing of new threads. */
174
175static void
176record_btrace_auto_disable (void)
177{
178 /* The observer may have been detached, already. */
179 if (record_btrace_thread_observer == NULL)
180 return;
181
182 DEBUG ("detach thread observer");
183
184 observer_detach_new_thread (record_btrace_thread_observer);
185 record_btrace_thread_observer = NULL;
186}
187
70ad5bff
MM
188/* The record-btrace async event handler function. */
189
190static void
191record_btrace_handle_async_inferior_event (gdb_client_data data)
192{
193 inferior_event_handler (INF_REG_EVENT, NULL);
194}
195
c0272db5
TW
196/* See record-btrace.h. */
197
198void
199record_btrace_push_target (void)
200{
201 const char *format;
202
203 record_btrace_auto_enable ();
204
205 push_target (&record_btrace_ops);
206
207 record_btrace_async_inferior_event_handler
208 = create_async_event_handler (record_btrace_handle_async_inferior_event,
209 NULL);
210 record_btrace_generating_corefile = 0;
211
212 format = btrace_format_short_string (record_btrace_conf.format);
213 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
214}
215
228f1508
SM
216/* Disable btrace on a set of threads on scope exit. */
217
218struct scoped_btrace_disable
219{
220 scoped_btrace_disable () = default;
221
222 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
223
224 ~scoped_btrace_disable ()
225 {
226 for (thread_info *tp : m_threads)
227 btrace_disable (tp);
228 }
229
230 void add_thread (thread_info *thread)
231 {
232 m_threads.push_front (thread);
233 }
234
235 void discard ()
236 {
237 m_threads.clear ();
238 }
239
240private:
241 std::forward_list<thread_info *> m_threads;
242};
243
afedecd3
MM
244/* The to_open method of target record-btrace. */
245
246static void
014f9477 247record_btrace_open (const char *args, int from_tty)
afedecd3 248{
228f1508
SM
249 /* If we fail to enable btrace for one thread, disable it for the threads for
250 which it was successfully enabled. */
251 scoped_btrace_disable btrace_disable;
afedecd3
MM
252 struct thread_info *tp;
253
254 DEBUG ("open");
255
8213266a 256 record_preopen ();
afedecd3
MM
257
258 if (!target_has_execution)
259 error (_("The program is not being run."));
260
afedecd3
MM
261 gdb_assert (record_btrace_thread_observer == NULL);
262
034f788c 263 ALL_NON_EXITED_THREADS (tp)
5d5658a1 264 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 265 {
f4abbc16 266 btrace_enable (tp, &record_btrace_conf);
afedecd3 267
228f1508 268 btrace_disable.add_thread (tp);
afedecd3
MM
269 }
270
c0272db5 271 record_btrace_push_target ();
afedecd3 272
228f1508 273 btrace_disable.discard ();
afedecd3
MM
274}
275
276/* The to_stop_recording method of target record-btrace. */
277
278static void
c6cd7c02 279record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
280{
281 struct thread_info *tp;
282
283 DEBUG ("stop recording");
284
285 record_btrace_auto_disable ();
286
034f788c 287 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
288 if (tp->btrace.target != NULL)
289 btrace_disable (tp);
290}
291
c0272db5
TW
292/* The to_disconnect method of target record-btrace. */
293
294static void
295record_btrace_disconnect (struct target_ops *self, const char *args,
296 int from_tty)
297{
298 struct target_ops *beneath = self->beneath;
299
300 /* Do not stop recording, just clean up GDB side. */
301 unpush_target (self);
302
303 /* Forward disconnect. */
304 beneath->to_disconnect (beneath, args, from_tty);
305}
306
afedecd3
MM
307/* The to_close method of target record-btrace. */
308
309static void
de90e03d 310record_btrace_close (struct target_ops *self)
afedecd3 311{
568e808b
MM
312 struct thread_info *tp;
313
70ad5bff
MM
314 if (record_btrace_async_inferior_event_handler != NULL)
315 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
316
99c819ee
MM
317 /* Make sure automatic recording gets disabled even if we did not stop
318 recording before closing the record-btrace target. */
319 record_btrace_auto_disable ();
320
568e808b
MM
321 /* We should have already stopped recording.
322 Tear down btrace in case we have not. */
034f788c 323 ALL_NON_EXITED_THREADS (tp)
568e808b 324 btrace_teardown (tp);
afedecd3
MM
325}
326
b7d2e916
PA
327/* The to_async method of target record-btrace. */
328
329static void
6a3753b3 330record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 331{
6a3753b3 332 if (enable)
b7d2e916
PA
333 mark_async_event_handler (record_btrace_async_inferior_event_handler);
334 else
335 clear_async_event_handler (record_btrace_async_inferior_event_handler);
336
6a3753b3 337 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
338}
339
d33501a5
MM
340/* Adjusts the size and returns a human readable size suffix. */
341
342static const char *
343record_btrace_adjust_size (unsigned int *size)
344{
345 unsigned int sz;
346
347 sz = *size;
348
349 if ((sz & ((1u << 30) - 1)) == 0)
350 {
351 *size = sz >> 30;
352 return "GB";
353 }
354 else if ((sz & ((1u << 20) - 1)) == 0)
355 {
356 *size = sz >> 20;
357 return "MB";
358 }
359 else if ((sz & ((1u << 10) - 1)) == 0)
360 {
361 *size = sz >> 10;
362 return "kB";
363 }
364 else
365 return "";
366}
367
368/* Print a BTS configuration. */
369
370static void
371record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
372{
373 const char *suffix;
374 unsigned int size;
375
376 size = conf->size;
377 if (size > 0)
378 {
379 suffix = record_btrace_adjust_size (&size);
380 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 }
382}
383
bc504a31 384/* Print an Intel Processor Trace configuration. */
b20a6524
MM
385
386static void
387record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
388{
389 const char *suffix;
390 unsigned int size;
391
392 size = conf->size;
393 if (size > 0)
394 {
395 suffix = record_btrace_adjust_size (&size);
396 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
397 }
398}
399
d33501a5
MM
400/* Print a branch tracing configuration. */
401
402static void
403record_btrace_print_conf (const struct btrace_config *conf)
404{
405 printf_unfiltered (_("Recording format: %s.\n"),
406 btrace_format_string (conf->format));
407
408 switch (conf->format)
409 {
410 case BTRACE_FORMAT_NONE:
411 return;
412
413 case BTRACE_FORMAT_BTS:
414 record_btrace_print_bts_conf (&conf->bts);
415 return;
b20a6524
MM
416
417 case BTRACE_FORMAT_PT:
418 record_btrace_print_pt_conf (&conf->pt);
419 return;
d33501a5
MM
420 }
421
422 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
423}
424
afedecd3
MM
425/* The to_info_record method of target record-btrace. */
426
427static void
630d6a4a 428record_btrace_info (struct target_ops *self)
afedecd3
MM
429{
430 struct btrace_thread_info *btinfo;
f4abbc16 431 const struct btrace_config *conf;
afedecd3 432 struct thread_info *tp;
31fd9caa 433 unsigned int insns, calls, gaps;
afedecd3
MM
434
435 DEBUG ("info");
436
437 tp = find_thread_ptid (inferior_ptid);
438 if (tp == NULL)
439 error (_("No thread."));
440
cd4007e4
MM
441 validate_registers_access ();
442
f4abbc16
MM
443 btinfo = &tp->btrace;
444
445 conf = btrace_conf (btinfo);
446 if (conf != NULL)
d33501a5 447 record_btrace_print_conf (conf);
f4abbc16 448
afedecd3
MM
449 btrace_fetch (tp);
450
23a7fe75
MM
451 insns = 0;
452 calls = 0;
31fd9caa 453 gaps = 0;
23a7fe75 454
6e07b1d2 455 if (!btrace_is_empty (tp))
23a7fe75
MM
456 {
457 struct btrace_call_iterator call;
458 struct btrace_insn_iterator insn;
459
460 btrace_call_end (&call, btinfo);
461 btrace_call_prev (&call, 1);
5de9129b 462 calls = btrace_call_number (&call);
23a7fe75
MM
463
464 btrace_insn_end (&insn, btinfo);
5de9129b 465 insns = btrace_insn_number (&insn);
31fd9caa 466
69090cee
TW
467 /* If the last instruction is not a gap, it is the current instruction
468 that is not actually part of the record. */
469 if (btrace_insn_get (&insn) != NULL)
470 insns -= 1;
31fd9caa
MM
471
472 gaps = btinfo->ngaps;
23a7fe75 473 }
afedecd3 474
31fd9caa 475 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
476 "for thread %s (%s).\n"), insns, calls, gaps,
477 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
478
479 if (btrace_is_replaying (tp))
480 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
481 btrace_insn_number (btinfo->replay));
afedecd3
MM
482}
483
31fd9caa
MM
484/* Print a decode error. */
485
486static void
487btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
488 enum btrace_format format)
489{
508352a9 490 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 491
112e8700 492 uiout->text (_("["));
508352a9
TW
493 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
494 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 495 {
112e8700
SM
496 uiout->text (_("decode error ("));
497 uiout->field_int ("errcode", errcode);
498 uiout->text (_("): "));
31fd9caa 499 }
112e8700
SM
500 uiout->text (errstr);
501 uiout->text (_("]\n"));
31fd9caa
MM
502}
503
afedecd3
MM
504/* Print an unsigned int. */
505
506static void
507ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
508{
112e8700 509 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
510}
511
f94cc897
MM
512/* A range of source lines. */
513
514struct btrace_line_range
515{
516 /* The symtab this line is from. */
517 struct symtab *symtab;
518
519 /* The first line (inclusive). */
520 int begin;
521
522 /* The last line (exclusive). */
523 int end;
524};
525
526/* Construct a line range. */
527
528static struct btrace_line_range
529btrace_mk_line_range (struct symtab *symtab, int begin, int end)
530{
531 struct btrace_line_range range;
532
533 range.symtab = symtab;
534 range.begin = begin;
535 range.end = end;
536
537 return range;
538}
539
540/* Add a line to a line range. */
541
542static struct btrace_line_range
543btrace_line_range_add (struct btrace_line_range range, int line)
544{
545 if (range.end <= range.begin)
546 {
547 /* This is the first entry. */
548 range.begin = line;
549 range.end = line + 1;
550 }
551 else if (line < range.begin)
552 range.begin = line;
553 else if (range.end < line)
554 range.end = line;
555
556 return range;
557}
558
559/* Return non-zero if RANGE is empty, zero otherwise. */
560
561static int
562btrace_line_range_is_empty (struct btrace_line_range range)
563{
564 return range.end <= range.begin;
565}
566
567/* Return non-zero if LHS contains RHS, zero otherwise. */
568
569static int
570btrace_line_range_contains_range (struct btrace_line_range lhs,
571 struct btrace_line_range rhs)
572{
573 return ((lhs.symtab == rhs.symtab)
574 && (lhs.begin <= rhs.begin)
575 && (rhs.end <= lhs.end));
576}
577
578/* Find the line range associated with PC. */
579
580static struct btrace_line_range
581btrace_find_line_range (CORE_ADDR pc)
582{
583 struct btrace_line_range range;
584 struct linetable_entry *lines;
585 struct linetable *ltable;
586 struct symtab *symtab;
587 int nlines, i;
588
589 symtab = find_pc_line_symtab (pc);
590 if (symtab == NULL)
591 return btrace_mk_line_range (NULL, 0, 0);
592
593 ltable = SYMTAB_LINETABLE (symtab);
594 if (ltable == NULL)
595 return btrace_mk_line_range (symtab, 0, 0);
596
597 nlines = ltable->nitems;
598 lines = ltable->item;
599 if (nlines <= 0)
600 return btrace_mk_line_range (symtab, 0, 0);
601
602 range = btrace_mk_line_range (symtab, 0, 0);
603 for (i = 0; i < nlines - 1; i++)
604 {
605 if ((lines[i].pc == pc) && (lines[i].line != 0))
606 range = btrace_line_range_add (range, lines[i].line);
607 }
608
609 return range;
610}
611
612/* Print source lines in LINES to UIOUT.
613
614 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
615 instructions corresponding to that source line. When printing a new source
616 line, we do the cleanups for the open chain and open a new cleanup chain for
617 the new source line. If the source line range in LINES is not empty, this
618 function will leave the cleanup chain for the last printed source line open
619 so instructions can be added to it. */
620
621static void
622btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
623 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
624 gdb::optional<ui_out_emit_list> *asm_list,
625 gdb_disassembly_flags flags)
f94cc897 626{
8d297bbf 627 print_source_lines_flags psl_flags;
f94cc897 628
f94cc897
MM
629 if (flags & DISASSEMBLY_FILENAME)
630 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
631
7ea78b59 632 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 633 {
7ea78b59 634 asm_list->reset ();
f94cc897 635
7ea78b59 636 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
637
638 print_source_lines (lines.symtab, line, line + 1, psl_flags);
639
7ea78b59 640 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
641 }
642}
643
afedecd3
MM
644/* Disassemble a section of the recorded instruction trace. */
645
646static void
23a7fe75 647btrace_insn_history (struct ui_out *uiout,
31fd9caa 648 const struct btrace_thread_info *btinfo,
23a7fe75 649 const struct btrace_insn_iterator *begin,
9a24775b
PA
650 const struct btrace_insn_iterator *end,
651 gdb_disassembly_flags flags)
afedecd3 652{
9a24775b
PA
653 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
654 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 655
f94cc897
MM
656 flags |= DISASSEMBLY_SPECULATIVE;
657
7ea78b59
SM
658 struct gdbarch *gdbarch = target_gdbarch ();
659 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 660
7ea78b59 661 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 662
7ea78b59
SM
663 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
664 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 665
8b172ce7
PA
666 gdb_pretty_print_disassembler disasm (gdbarch);
667
7ea78b59
SM
668 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
669 btrace_insn_next (&it, 1))
afedecd3 670 {
23a7fe75
MM
671 const struct btrace_insn *insn;
672
673 insn = btrace_insn_get (&it);
674
31fd9caa
MM
675 /* A NULL instruction indicates a gap in the trace. */
676 if (insn == NULL)
677 {
678 const struct btrace_config *conf;
679
680 conf = btrace_conf (btinfo);
afedecd3 681
31fd9caa
MM
682 /* We have trace so we must have a configuration. */
683 gdb_assert (conf != NULL);
684
69090cee
TW
685 uiout->field_fmt ("insn-number", "%u",
686 btrace_insn_number (&it));
687 uiout->text ("\t");
688
689 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
690 conf->format);
691 }
692 else
693 {
f94cc897 694 struct disasm_insn dinsn;
da8c46d2 695
f94cc897 696 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 697 {
f94cc897
MM
698 struct btrace_line_range lines;
699
700 lines = btrace_find_line_range (insn->pc);
701 if (!btrace_line_range_is_empty (lines)
702 && !btrace_line_range_contains_range (last_lines, lines))
703 {
7ea78b59
SM
704 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
705 flags);
f94cc897
MM
706 last_lines = lines;
707 }
7ea78b59 708 else if (!src_and_asm_tuple.has_value ())
f94cc897 709 {
7ea78b59
SM
710 gdb_assert (!asm_list.has_value ());
711
712 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
713
f94cc897 714 /* No source information. */
7ea78b59 715 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
716 }
717
7ea78b59
SM
718 gdb_assert (src_and_asm_tuple.has_value ());
719 gdb_assert (asm_list.has_value ());
da8c46d2 720 }
da8c46d2 721
f94cc897
MM
722 memset (&dinsn, 0, sizeof (dinsn));
723 dinsn.number = btrace_insn_number (&it);
724 dinsn.addr = insn->pc;
31fd9caa 725
da8c46d2 726 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 727 dinsn.is_speculative = 1;
da8c46d2 728
8b172ce7 729 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 730 }
afedecd3
MM
731 }
732}
733
734/* The to_insn_history method of target record-btrace. */
735
736static void
9a24775b
PA
737record_btrace_insn_history (struct target_ops *self, int size,
738 gdb_disassembly_flags flags)
afedecd3
MM
739{
740 struct btrace_thread_info *btinfo;
23a7fe75
MM
741 struct btrace_insn_history *history;
742 struct btrace_insn_iterator begin, end;
afedecd3 743 struct ui_out *uiout;
23a7fe75 744 unsigned int context, covered;
afedecd3
MM
745
746 uiout = current_uiout;
2e783024 747 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 748 context = abs (size);
afedecd3
MM
749 if (context == 0)
750 error (_("Bad record instruction-history-size."));
751
23a7fe75
MM
752 btinfo = require_btrace ();
753 history = btinfo->insn_history;
754 if (history == NULL)
afedecd3 755 {
07bbe694 756 struct btrace_insn_iterator *replay;
afedecd3 757
9a24775b 758 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 759
07bbe694
MM
760 /* If we're replaying, we start at the replay position. Otherwise, we
761 start at the tail of the trace. */
762 replay = btinfo->replay;
763 if (replay != NULL)
764 begin = *replay;
765 else
766 btrace_insn_end (&begin, btinfo);
767
768 /* We start from here and expand in the requested direction. Then we
769 expand in the other direction, as well, to fill up any remaining
770 context. */
771 end = begin;
772 if (size < 0)
773 {
774 /* We want the current position covered, as well. */
775 covered = btrace_insn_next (&end, 1);
776 covered += btrace_insn_prev (&begin, context - covered);
777 covered += btrace_insn_next (&end, context - covered);
778 }
779 else
780 {
781 covered = btrace_insn_next (&end, context);
782 covered += btrace_insn_prev (&begin, context - covered);
783 }
afedecd3
MM
784 }
785 else
786 {
23a7fe75
MM
787 begin = history->begin;
788 end = history->end;
afedecd3 789
9a24775b 790 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 791 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 792
23a7fe75
MM
793 if (size < 0)
794 {
795 end = begin;
796 covered = btrace_insn_prev (&begin, context);
797 }
798 else
799 {
800 begin = end;
801 covered = btrace_insn_next (&end, context);
802 }
afedecd3
MM
803 }
804
23a7fe75 805 if (covered > 0)
31fd9caa 806 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
807 else
808 {
809 if (size < 0)
810 printf_unfiltered (_("At the start of the branch trace record.\n"));
811 else
812 printf_unfiltered (_("At the end of the branch trace record.\n"));
813 }
afedecd3 814
23a7fe75 815 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
816}
817
818/* The to_insn_history_range method of target record-btrace. */
819
820static void
4e99c6b7 821record_btrace_insn_history_range (struct target_ops *self,
9a24775b
PA
822 ULONGEST from, ULONGEST to,
823 gdb_disassembly_flags flags)
afedecd3
MM
824{
825 struct btrace_thread_info *btinfo;
23a7fe75 826 struct btrace_insn_iterator begin, end;
afedecd3 827 struct ui_out *uiout;
23a7fe75
MM
828 unsigned int low, high;
829 int found;
afedecd3
MM
830
831 uiout = current_uiout;
2e783024 832 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
833 low = from;
834 high = to;
afedecd3 835
9a24775b 836 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
837
838 /* Check for wrap-arounds. */
23a7fe75 839 if (low != from || high != to)
afedecd3
MM
840 error (_("Bad range."));
841
0688d04e 842 if (high < low)
afedecd3
MM
843 error (_("Bad range."));
844
23a7fe75 845 btinfo = require_btrace ();
afedecd3 846
23a7fe75
MM
847 found = btrace_find_insn_by_number (&begin, btinfo, low);
848 if (found == 0)
849 error (_("Range out of bounds."));
afedecd3 850
23a7fe75
MM
851 found = btrace_find_insn_by_number (&end, btinfo, high);
852 if (found == 0)
0688d04e
MM
853 {
854 /* Silently truncate the range. */
855 btrace_insn_end (&end, btinfo);
856 }
857 else
858 {
859 /* We want both begin and end to be inclusive. */
860 btrace_insn_next (&end, 1);
861 }
afedecd3 862
31fd9caa 863 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 864 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
865}
866
867/* The to_insn_history_from method of target record-btrace. */
868
869static void
9abc3ff3 870record_btrace_insn_history_from (struct target_ops *self,
9a24775b
PA
871 ULONGEST from, int size,
872 gdb_disassembly_flags flags)
afedecd3
MM
873{
874 ULONGEST begin, end, context;
875
876 context = abs (size);
0688d04e
MM
877 if (context == 0)
878 error (_("Bad record instruction-history-size."));
afedecd3
MM
879
880 if (size < 0)
881 {
882 end = from;
883
884 if (from < context)
885 begin = 0;
886 else
0688d04e 887 begin = from - context + 1;
afedecd3
MM
888 }
889 else
890 {
891 begin = from;
0688d04e 892 end = from + context - 1;
afedecd3
MM
893
894 /* Check for wrap-around. */
895 if (end < begin)
896 end = ULONGEST_MAX;
897 }
898
4e99c6b7 899 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
900}
901
902/* Print the instruction number range for a function call history line. */
903
904static void
23a7fe75
MM
905btrace_call_history_insn_range (struct ui_out *uiout,
906 const struct btrace_function *bfun)
afedecd3 907{
7acbe133
MM
908 unsigned int begin, end, size;
909
0860c437 910 size = bfun->insn.size ();
7acbe133 911 gdb_assert (size > 0);
afedecd3 912
23a7fe75 913 begin = bfun->insn_offset;
7acbe133 914 end = begin + size - 1;
afedecd3 915
23a7fe75 916 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 917 uiout->text (",");
23a7fe75 918 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
919}
920
ce0dfbea
MM
921/* Compute the lowest and highest source line for the instructions in BFUN
922 and return them in PBEGIN and PEND.
923 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
924 result from inlining or macro expansion. */
925
926static void
927btrace_compute_src_line_range (const struct btrace_function *bfun,
928 int *pbegin, int *pend)
929{
ce0dfbea
MM
930 struct symtab *symtab;
931 struct symbol *sym;
ce0dfbea
MM
932 int begin, end;
933
934 begin = INT_MAX;
935 end = INT_MIN;
936
937 sym = bfun->sym;
938 if (sym == NULL)
939 goto out;
940
941 symtab = symbol_symtab (sym);
942
0860c437 943 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
944 {
945 struct symtab_and_line sal;
946
0860c437 947 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
948 if (sal.symtab != symtab || sal.line == 0)
949 continue;
950
325fac50
PA
951 begin = std::min (begin, sal.line);
952 end = std::max (end, sal.line);
ce0dfbea
MM
953 }
954
955 out:
956 *pbegin = begin;
957 *pend = end;
958}
959
afedecd3
MM
960/* Print the source line information for a function call history line. */
961
962static void
23a7fe75
MM
963btrace_call_history_src_line (struct ui_out *uiout,
964 const struct btrace_function *bfun)
afedecd3
MM
965{
966 struct symbol *sym;
23a7fe75 967 int begin, end;
afedecd3
MM
968
969 sym = bfun->sym;
970 if (sym == NULL)
971 return;
972
112e8700 973 uiout->field_string ("file",
08be3fe3 974 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 975
ce0dfbea 976 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 977 if (end < begin)
afedecd3
MM
978 return;
979
112e8700
SM
980 uiout->text (":");
981 uiout->field_int ("min line", begin);
afedecd3 982
23a7fe75 983 if (end == begin)
afedecd3
MM
984 return;
985
112e8700
SM
986 uiout->text (",");
987 uiout->field_int ("max line", end);
afedecd3
MM
988}
989
0b722aec
MM
990/* Get the name of a branch trace function. */
991
992static const char *
993btrace_get_bfun_name (const struct btrace_function *bfun)
994{
995 struct minimal_symbol *msym;
996 struct symbol *sym;
997
998 if (bfun == NULL)
999 return "??";
1000
1001 msym = bfun->msym;
1002 sym = bfun->sym;
1003
1004 if (sym != NULL)
1005 return SYMBOL_PRINT_NAME (sym);
1006 else if (msym != NULL)
efd66ac6 1007 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1008 else
1009 return "??";
1010}
1011
afedecd3
MM
1012/* Disassemble a section of the recorded function trace. */
1013
1014static void
23a7fe75 1015btrace_call_history (struct ui_out *uiout,
8710b709 1016 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1017 const struct btrace_call_iterator *begin,
1018 const struct btrace_call_iterator *end,
8d297bbf 1019 int int_flags)
afedecd3 1020{
23a7fe75 1021 struct btrace_call_iterator it;
8d297bbf 1022 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1023
8d297bbf 1024 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1025 btrace_call_number (end));
afedecd3 1026
23a7fe75 1027 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1028 {
23a7fe75
MM
1029 const struct btrace_function *bfun;
1030 struct minimal_symbol *msym;
1031 struct symbol *sym;
1032
1033 bfun = btrace_call_get (&it);
23a7fe75 1034 sym = bfun->sym;
0b722aec 1035 msym = bfun->msym;
23a7fe75 1036
afedecd3 1037 /* Print the function index. */
23a7fe75 1038 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1039 uiout->text ("\t");
afedecd3 1040
31fd9caa
MM
1041 /* Indicate gaps in the trace. */
1042 if (bfun->errcode != 0)
1043 {
1044 const struct btrace_config *conf;
1045
1046 conf = btrace_conf (btinfo);
1047
1048 /* We have trace so we must have a configuration. */
1049 gdb_assert (conf != NULL);
1050
1051 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1052
1053 continue;
1054 }
1055
8710b709
MM
1056 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1057 {
1058 int level = bfun->level + btinfo->level, i;
1059
1060 for (i = 0; i < level; ++i)
112e8700 1061 uiout->text (" ");
8710b709
MM
1062 }
1063
1064 if (sym != NULL)
112e8700 1065 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1066 else if (msym != NULL)
112e8700
SM
1067 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1068 else if (!uiout->is_mi_like_p ())
1069 uiout->field_string ("function", "??");
8710b709 1070
1e038f67 1071 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1072 {
112e8700 1073 uiout->text (_("\tinst "));
23a7fe75 1074 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1075 }
1076
1e038f67 1077 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1078 {
112e8700 1079 uiout->text (_("\tat "));
23a7fe75 1080 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1081 }
1082
112e8700 1083 uiout->text ("\n");
afedecd3
MM
1084 }
1085}
1086
1087/* The to_call_history method of target record-btrace. */
1088
1089static void
0cb7c7b0
SM
1090record_btrace_call_history (struct target_ops *self, int size,
1091 record_print_flags flags)
afedecd3
MM
1092{
1093 struct btrace_thread_info *btinfo;
23a7fe75
MM
1094 struct btrace_call_history *history;
1095 struct btrace_call_iterator begin, end;
afedecd3 1096 struct ui_out *uiout;
23a7fe75 1097 unsigned int context, covered;
afedecd3
MM
1098
1099 uiout = current_uiout;
2e783024 1100 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1101 context = abs (size);
afedecd3
MM
1102 if (context == 0)
1103 error (_("Bad record function-call-history-size."));
1104
23a7fe75
MM
1105 btinfo = require_btrace ();
1106 history = btinfo->call_history;
1107 if (history == NULL)
afedecd3 1108 {
07bbe694 1109 struct btrace_insn_iterator *replay;
afedecd3 1110
0cb7c7b0 1111 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1112
07bbe694
MM
1113 /* If we're replaying, we start at the replay position. Otherwise, we
1114 start at the tail of the trace. */
1115 replay = btinfo->replay;
1116 if (replay != NULL)
1117 {
07bbe694 1118 begin.btinfo = btinfo;
a0f1b963 1119 begin.index = replay->call_index;
07bbe694
MM
1120 }
1121 else
1122 btrace_call_end (&begin, btinfo);
1123
1124 /* We start from here and expand in the requested direction. Then we
1125 expand in the other direction, as well, to fill up any remaining
1126 context. */
1127 end = begin;
1128 if (size < 0)
1129 {
1130 /* We want the current position covered, as well. */
1131 covered = btrace_call_next (&end, 1);
1132 covered += btrace_call_prev (&begin, context - covered);
1133 covered += btrace_call_next (&end, context - covered);
1134 }
1135 else
1136 {
1137 covered = btrace_call_next (&end, context);
1138 covered += btrace_call_prev (&begin, context- covered);
1139 }
afedecd3
MM
1140 }
1141 else
1142 {
23a7fe75
MM
1143 begin = history->begin;
1144 end = history->end;
afedecd3 1145
0cb7c7b0 1146 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1147 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1148
23a7fe75
MM
1149 if (size < 0)
1150 {
1151 end = begin;
1152 covered = btrace_call_prev (&begin, context);
1153 }
1154 else
1155 {
1156 begin = end;
1157 covered = btrace_call_next (&end, context);
1158 }
afedecd3
MM
1159 }
1160
23a7fe75 1161 if (covered > 0)
8710b709 1162 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1163 else
1164 {
1165 if (size < 0)
1166 printf_unfiltered (_("At the start of the branch trace record.\n"));
1167 else
1168 printf_unfiltered (_("At the end of the branch trace record.\n"));
1169 }
afedecd3 1170
23a7fe75 1171 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1172}
1173
1174/* The to_call_history_range method of target record-btrace. */
1175
1176static void
f0d960ea 1177record_btrace_call_history_range (struct target_ops *self,
8d297bbf 1178 ULONGEST from, ULONGEST to,
0cb7c7b0 1179 record_print_flags flags)
afedecd3
MM
1180{
1181 struct btrace_thread_info *btinfo;
23a7fe75 1182 struct btrace_call_iterator begin, end;
afedecd3 1183 struct ui_out *uiout;
23a7fe75
MM
1184 unsigned int low, high;
1185 int found;
afedecd3
MM
1186
1187 uiout = current_uiout;
2e783024 1188 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1189 low = from;
1190 high = to;
afedecd3 1191
0cb7c7b0 1192 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1193
1194 /* Check for wrap-arounds. */
23a7fe75 1195 if (low != from || high != to)
afedecd3
MM
1196 error (_("Bad range."));
1197
0688d04e 1198 if (high < low)
afedecd3
MM
1199 error (_("Bad range."));
1200
23a7fe75 1201 btinfo = require_btrace ();
afedecd3 1202
23a7fe75
MM
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1204 if (found == 0)
1205 error (_("Range out of bounds."));
afedecd3 1206
23a7fe75
MM
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1208 if (found == 0)
0688d04e
MM
1209 {
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1212 }
1213 else
1214 {
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1217 }
afedecd3 1218
8710b709 1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1220 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1221}
1222
1223/* The to_call_history_from method of target record-btrace. */
1224
1225static void
ec0aea04 1226record_btrace_call_history_from (struct target_ops *self,
8d297bbf 1227 ULONGEST from, int size,
0cb7c7b0 1228 record_print_flags flags)
afedecd3
MM
1229{
1230 ULONGEST begin, end, context;
1231
1232 context = abs (size);
0688d04e
MM
1233 if (context == 0)
1234 error (_("Bad record function-call-history-size."));
afedecd3
MM
1235
1236 if (size < 0)
1237 {
1238 end = from;
1239
1240 if (from < context)
1241 begin = 0;
1242 else
0688d04e 1243 begin = from - context + 1;
afedecd3
MM
1244 }
1245 else
1246 {
1247 begin = from;
0688d04e 1248 end = from + context - 1;
afedecd3
MM
1249
1250 /* Check for wrap-around. */
1251 if (end < begin)
1252 end = ULONGEST_MAX;
1253 }
1254
f0d960ea 1255 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1256}
1257
b158a20f
TW
1258/* The to_record_method method of target record-btrace. */
1259
1260static enum record_method
1261record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1262{
b158a20f
TW
1263 struct thread_info * const tp = find_thread_ptid (ptid);
1264
1265 if (tp == NULL)
1266 error (_("No thread."));
1267
1268 if (tp->btrace.target == NULL)
1269 return RECORD_METHOD_NONE;
1270
1271 return RECORD_METHOD_BTRACE;
1272}
1273
07bbe694
MM
1274/* The to_record_is_replaying method of target record-btrace. */
1275
1276static int
a52eab48 1277record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1278{
1279 struct thread_info *tp;
1280
034f788c 1281 ALL_NON_EXITED_THREADS (tp)
a52eab48 1282 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1283 return 1;
1284
1285 return 0;
1286}
1287
7ff27e9b
MM
1288/* The to_record_will_replay method of target record-btrace. */
1289
1290static int
1291record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1292{
1293 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1294}
1295
633785ff
MM
1296/* The to_xfer_partial method of target record-btrace. */
1297
9b409511 1298static enum target_xfer_status
633785ff
MM
1299record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1300 const char *annex, gdb_byte *readbuf,
1301 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1302 ULONGEST len, ULONGEST *xfered_len)
633785ff 1303{
633785ff 1304 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1305 if (replay_memory_access == replay_memory_access_read_only
aef92902 1306 && !record_btrace_generating_corefile
4d10e986 1307 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1308 {
1309 switch (object)
1310 {
1311 case TARGET_OBJECT_MEMORY:
1312 {
1313 struct target_section *section;
1314
1315 /* We do not allow writing memory in general. */
1316 if (writebuf != NULL)
9b409511
YQ
1317 {
1318 *xfered_len = len;
bc113b4e 1319 return TARGET_XFER_UNAVAILABLE;
9b409511 1320 }
633785ff
MM
1321
1322 /* We allow reading readonly memory. */
1323 section = target_section_by_addr (ops, offset);
1324 if (section != NULL)
1325 {
1326 /* Check if the section we found is readonly. */
1327 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1328 section->the_bfd_section)
1329 & SEC_READONLY) != 0)
1330 {
1331 /* Truncate the request to fit into this section. */
325fac50 1332 len = std::min (len, section->endaddr - offset);
633785ff
MM
1333 break;
1334 }
1335 }
1336
9b409511 1337 *xfered_len = len;
bc113b4e 1338 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1339 }
1340 }
1341 }
1342
1343 /* Forward the request. */
e75fdfca
TT
1344 ops = ops->beneath;
1345 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1346 offset, len, xfered_len);
633785ff
MM
1347}
1348
1349/* The to_insert_breakpoint method of target record-btrace. */
1350
1351static int
1352record_btrace_insert_breakpoint (struct target_ops *ops,
1353 struct gdbarch *gdbarch,
1354 struct bp_target_info *bp_tgt)
1355{
67b5c0c1
MM
1356 const char *old;
1357 int ret;
633785ff
MM
1358
1359 /* Inserting breakpoints requires accessing memory. Allow it for the
1360 duration of this function. */
67b5c0c1
MM
1361 old = replay_memory_access;
1362 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1363
1364 ret = 0;
492d29ea
PA
1365 TRY
1366 {
1367 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1368 }
492d29ea
PA
1369 CATCH (except, RETURN_MASK_ALL)
1370 {
6c63c96a 1371 replay_memory_access = old;
492d29ea
PA
1372 throw_exception (except);
1373 }
1374 END_CATCH
6c63c96a 1375 replay_memory_access = old;
633785ff
MM
1376
1377 return ret;
1378}
1379
1380/* The to_remove_breakpoint method of target record-btrace. */
1381
1382static int
1383record_btrace_remove_breakpoint (struct target_ops *ops,
1384 struct gdbarch *gdbarch,
73971819
PA
1385 struct bp_target_info *bp_tgt,
1386 enum remove_bp_reason reason)
633785ff 1387{
67b5c0c1
MM
1388 const char *old;
1389 int ret;
633785ff
MM
1390
1391 /* Removing breakpoints requires accessing memory. Allow it for the
1392 duration of this function. */
67b5c0c1
MM
1393 old = replay_memory_access;
1394 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1395
1396 ret = 0;
492d29ea
PA
1397 TRY
1398 {
73971819
PA
1399 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1400 reason);
492d29ea 1401 }
492d29ea
PA
1402 CATCH (except, RETURN_MASK_ALL)
1403 {
6c63c96a 1404 replay_memory_access = old;
492d29ea
PA
1405 throw_exception (except);
1406 }
1407 END_CATCH
6c63c96a 1408 replay_memory_access = old;
633785ff
MM
1409
1410 return ret;
1411}
1412
1f3ef581
MM
1413/* The to_fetch_registers method of target record-btrace. */
1414
1415static void
1416record_btrace_fetch_registers (struct target_ops *ops,
1417 struct regcache *regcache, int regno)
1418{
1419 struct btrace_insn_iterator *replay;
1420 struct thread_info *tp;
1421
bcc0c096 1422 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1423 gdb_assert (tp != NULL);
1424
1425 replay = tp->btrace.replay;
aef92902 1426 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1427 {
1428 const struct btrace_insn *insn;
1429 struct gdbarch *gdbarch;
1430 int pcreg;
1431
ac7936df 1432 gdbarch = regcache->arch ();
1f3ef581
MM
1433 pcreg = gdbarch_pc_regnum (gdbarch);
1434 if (pcreg < 0)
1435 return;
1436
1437 /* We can only provide the PC register. */
1438 if (regno >= 0 && regno != pcreg)
1439 return;
1440
1441 insn = btrace_insn_get (replay);
1442 gdb_assert (insn != NULL);
1443
1444 regcache_raw_supply (regcache, regno, &insn->pc);
1445 }
1446 else
1447 {
e75fdfca 1448 struct target_ops *t = ops->beneath;
1f3ef581 1449
e75fdfca 1450 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1451 }
1452}
1453
1454/* The to_store_registers method of target record-btrace. */
1455
1456static void
1457record_btrace_store_registers (struct target_ops *ops,
1458 struct regcache *regcache, int regno)
1459{
1460 struct target_ops *t;
1461
a52eab48 1462 if (!record_btrace_generating_corefile
bcc0c096 1463 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1464 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1465
1466 gdb_assert (may_write_registers != 0);
1467
e75fdfca
TT
1468 t = ops->beneath;
1469 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1470}
1471
1472/* The to_prepare_to_store method of target record-btrace. */
1473
1474static void
1475record_btrace_prepare_to_store (struct target_ops *ops,
1476 struct regcache *regcache)
1477{
1478 struct target_ops *t;
1479
a52eab48 1480 if (!record_btrace_generating_corefile
bcc0c096 1481 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1482 return;
1483
e75fdfca
TT
1484 t = ops->beneath;
1485 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1486}
1487
0b722aec
MM
1488/* The branch trace frame cache. */
1489
1490struct btrace_frame_cache
1491{
1492 /* The thread. */
1493 struct thread_info *tp;
1494
1495 /* The frame info. */
1496 struct frame_info *frame;
1497
1498 /* The branch trace function segment. */
1499 const struct btrace_function *bfun;
1500};
1501
1502/* A struct btrace_frame_cache hash table indexed by NEXT. */
1503
1504static htab_t bfcache;
1505
1506/* hash_f for htab_create_alloc of bfcache. */
1507
1508static hashval_t
1509bfcache_hash (const void *arg)
1510{
19ba03f4
SM
1511 const struct btrace_frame_cache *cache
1512 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1513
1514 return htab_hash_pointer (cache->frame);
1515}
1516
1517/* eq_f for htab_create_alloc of bfcache. */
1518
1519static int
1520bfcache_eq (const void *arg1, const void *arg2)
1521{
19ba03f4
SM
1522 const struct btrace_frame_cache *cache1
1523 = (const struct btrace_frame_cache *) arg1;
1524 const struct btrace_frame_cache *cache2
1525 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1526
1527 return cache1->frame == cache2->frame;
1528}
1529
1530/* Create a new btrace frame cache. */
1531
1532static struct btrace_frame_cache *
1533bfcache_new (struct frame_info *frame)
1534{
1535 struct btrace_frame_cache *cache;
1536 void **slot;
1537
1538 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1539 cache->frame = frame;
1540
1541 slot = htab_find_slot (bfcache, cache, INSERT);
1542 gdb_assert (*slot == NULL);
1543 *slot = cache;
1544
1545 return cache;
1546}
1547
1548/* Extract the branch trace function from a branch trace frame. */
1549
1550static const struct btrace_function *
1551btrace_get_frame_function (struct frame_info *frame)
1552{
1553 const struct btrace_frame_cache *cache;
0b722aec
MM
1554 struct btrace_frame_cache pattern;
1555 void **slot;
1556
1557 pattern.frame = frame;
1558
1559 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1560 if (slot == NULL)
1561 return NULL;
1562
19ba03f4 1563 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1564 return cache->bfun;
1565}
1566
cecac1ab
MM
1567/* Implement stop_reason method for record_btrace_frame_unwind. */
1568
1569static enum unwind_stop_reason
1570record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1571 void **this_cache)
1572{
0b722aec
MM
1573 const struct btrace_frame_cache *cache;
1574 const struct btrace_function *bfun;
1575
19ba03f4 1576 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1577 bfun = cache->bfun;
1578 gdb_assert (bfun != NULL);
1579
42bfe59e 1580 if (bfun->up == 0)
0b722aec
MM
1581 return UNWIND_UNAVAILABLE;
1582
1583 return UNWIND_NO_REASON;
cecac1ab
MM
1584}
1585
1586/* Implement this_id method for record_btrace_frame_unwind. */
1587
1588static void
1589record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1590 struct frame_id *this_id)
1591{
0b722aec
MM
1592 const struct btrace_frame_cache *cache;
1593 const struct btrace_function *bfun;
4aeb0dfc 1594 struct btrace_call_iterator it;
0b722aec
MM
1595 CORE_ADDR code, special;
1596
19ba03f4 1597 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1598
1599 bfun = cache->bfun;
1600 gdb_assert (bfun != NULL);
1601
4aeb0dfc
TW
1602 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1603 bfun = btrace_call_get (&it);
0b722aec
MM
1604
1605 code = get_frame_func (this_frame);
1606 special = bfun->number;
1607
1608 *this_id = frame_id_build_unavailable_stack_special (code, special);
1609
1610 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1611 btrace_get_bfun_name (cache->bfun),
1612 core_addr_to_string_nz (this_id->code_addr),
1613 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1614}
1615
1616/* Implement prev_register method for record_btrace_frame_unwind. */
1617
1618static struct value *
1619record_btrace_frame_prev_register (struct frame_info *this_frame,
1620 void **this_cache,
1621 int regnum)
1622{
0b722aec
MM
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun, *caller;
42bfe59e 1625 struct btrace_call_iterator it;
0b722aec
MM
1626 struct gdbarch *gdbarch;
1627 CORE_ADDR pc;
1628 int pcreg;
1629
1630 gdbarch = get_frame_arch (this_frame);
1631 pcreg = gdbarch_pc_regnum (gdbarch);
1632 if (pcreg < 0 || regnum != pcreg)
1633 throw_error (NOT_AVAILABLE_ERROR,
1634 _("Registers are not available in btrace record history"));
1635
19ba03f4 1636 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1637 bfun = cache->bfun;
1638 gdb_assert (bfun != NULL);
1639
42bfe59e 1640 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1641 throw_error (NOT_AVAILABLE_ERROR,
1642 _("No caller in btrace record history"));
1643
42bfe59e
TW
1644 caller = btrace_call_get (&it);
1645
0b722aec 1646 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1647 pc = caller->insn.front ().pc;
0b722aec
MM
1648 else
1649 {
0860c437 1650 pc = caller->insn.back ().pc;
0b722aec
MM
1651 pc += gdb_insn_length (gdbarch, pc);
1652 }
1653
1654 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1655 btrace_get_bfun_name (bfun), bfun->level,
1656 core_addr_to_string_nz (pc));
1657
1658 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1659}
1660
1661/* Implement sniffer method for record_btrace_frame_unwind. */
1662
1663static int
1664record_btrace_frame_sniffer (const struct frame_unwind *self,
1665 struct frame_info *this_frame,
1666 void **this_cache)
1667{
0b722aec
MM
1668 const struct btrace_function *bfun;
1669 struct btrace_frame_cache *cache;
cecac1ab 1670 struct thread_info *tp;
0b722aec 1671 struct frame_info *next;
cecac1ab
MM
1672
1673 /* THIS_FRAME does not contain a reference to its thread. */
1674 tp = find_thread_ptid (inferior_ptid);
1675 gdb_assert (tp != NULL);
1676
0b722aec
MM
1677 bfun = NULL;
1678 next = get_next_frame (this_frame);
1679 if (next == NULL)
1680 {
1681 const struct btrace_insn_iterator *replay;
1682
1683 replay = tp->btrace.replay;
1684 if (replay != NULL)
08c3f6d2 1685 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1686 }
1687 else
1688 {
1689 const struct btrace_function *callee;
42bfe59e 1690 struct btrace_call_iterator it;
0b722aec
MM
1691
1692 callee = btrace_get_frame_function (next);
42bfe59e
TW
1693 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1694 return 0;
1695
1696 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1697 return 0;
1698
1699 bfun = btrace_call_get (&it);
0b722aec
MM
1700 }
1701
1702 if (bfun == NULL)
1703 return 0;
1704
1705 DEBUG ("[frame] sniffed frame for %s on level %d",
1706 btrace_get_bfun_name (bfun), bfun->level);
1707
1708 /* This is our frame. Initialize the frame cache. */
1709 cache = bfcache_new (this_frame);
1710 cache->tp = tp;
1711 cache->bfun = bfun;
1712
1713 *this_cache = cache;
1714 return 1;
1715}
1716
1717/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1718
1719static int
1720record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1722 void **this_cache)
1723{
1724 const struct btrace_function *bfun, *callee;
1725 struct btrace_frame_cache *cache;
42bfe59e 1726 struct btrace_call_iterator it;
0b722aec 1727 struct frame_info *next;
42bfe59e 1728 struct thread_info *tinfo;
0b722aec
MM
1729
1730 next = get_next_frame (this_frame);
1731 if (next == NULL)
1732 return 0;
1733
1734 callee = btrace_get_frame_function (next);
1735 if (callee == NULL)
1736 return 0;
1737
1738 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1739 return 0;
1740
42bfe59e
TW
1741 tinfo = find_thread_ptid (inferior_ptid);
1742 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1743 return 0;
1744
42bfe59e
TW
1745 bfun = btrace_call_get (&it);
1746
0b722aec
MM
1747 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1748 btrace_get_bfun_name (bfun), bfun->level);
1749
1750 /* This is our frame. Initialize the frame cache. */
1751 cache = bfcache_new (this_frame);
42bfe59e 1752 cache->tp = tinfo;
0b722aec
MM
1753 cache->bfun = bfun;
1754
1755 *this_cache = cache;
1756 return 1;
1757}
1758
1759static void
1760record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1761{
1762 struct btrace_frame_cache *cache;
1763 void **slot;
1764
19ba03f4 1765 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1766
1767 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1768 gdb_assert (slot != NULL);
1769
1770 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1771}
1772
1773/* btrace recording does not store previous memory content, neither the stack
1774 frames content. Any unwinding would return errorneous results as the stack
1775 contents no longer matches the changed PC value restored from history.
1776 Therefore this unwinder reports any possibly unwound registers as
1777 <unavailable>. */
1778
0b722aec 1779const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1780{
1781 NORMAL_FRAME,
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1785 NULL,
0b722aec
MM
1786 record_btrace_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1788};
1789
1790const struct frame_unwind record_btrace_tailcall_frame_unwind =
1791{
1792 TAILCALL_FRAME,
1793 record_btrace_frame_unwind_stop_reason,
1794 record_btrace_frame_this_id,
1795 record_btrace_frame_prev_register,
1796 NULL,
1797 record_btrace_tailcall_frame_sniffer,
1798 record_btrace_frame_dealloc_cache
cecac1ab 1799};
b2f4cfde 1800
ac01945b
TT
1801/* Implement the to_get_unwinder method. */
1802
1803static const struct frame_unwind *
1804record_btrace_to_get_unwinder (struct target_ops *self)
1805{
1806 return &record_btrace_frame_unwind;
1807}
1808
1809/* Implement the to_get_tailcall_unwinder method. */
1810
1811static const struct frame_unwind *
1812record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1813{
1814 return &record_btrace_tailcall_frame_unwind;
1815}
1816
987e68b1
MM
1817/* Return a human-readable string for FLAG. */
1818
1819static const char *
1820btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1821{
1822 switch (flag)
1823 {
1824 case BTHR_STEP:
1825 return "step";
1826
1827 case BTHR_RSTEP:
1828 return "reverse-step";
1829
1830 case BTHR_CONT:
1831 return "cont";
1832
1833 case BTHR_RCONT:
1834 return "reverse-cont";
1835
1836 case BTHR_STOP:
1837 return "stop";
1838 }
1839
1840 return "<invalid>";
1841}
1842
52834460
MM
1843/* Indicate that TP should be resumed according to FLAG. */
1844
1845static void
1846record_btrace_resume_thread (struct thread_info *tp,
1847 enum btrace_thread_flag flag)
1848{
1849 struct btrace_thread_info *btinfo;
1850
43792cf0 1851 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1852 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1853
1854 btinfo = &tp->btrace;
1855
52834460
MM
1856 /* Fetch the latest branch trace. */
1857 btrace_fetch (tp);
1858
0ca912df
MM
1859 /* A resume request overwrites a preceding resume or stop request. */
1860 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1861 btinfo->flags |= flag;
1862}
1863
ec71cc2f
MM
1864/* Get the current frame for TP. */
1865
1866static struct frame_info *
1867get_thread_current_frame (struct thread_info *tp)
1868{
1869 struct frame_info *frame;
1870 ptid_t old_inferior_ptid;
1871 int executing;
1872
1873 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1874 old_inferior_ptid = inferior_ptid;
1875 inferior_ptid = tp->ptid;
1876
1877 /* Clear the executing flag to allow changes to the current frame.
1878 We are not actually running, yet. We just started a reverse execution
1879 command or a record goto command.
1880 For the latter, EXECUTING is false and this has no effect.
1881 For the former, EXECUTING is true and we're in to_wait, about to
1882 move the thread. Since we need to recompute the stack, we temporarily
1883 set EXECUTING to flase. */
1884 executing = is_executing (inferior_ptid);
1885 set_executing (inferior_ptid, 0);
1886
1887 frame = NULL;
1888 TRY
1889 {
1890 frame = get_current_frame ();
1891 }
1892 CATCH (except, RETURN_MASK_ALL)
1893 {
1894 /* Restore the previous execution state. */
1895 set_executing (inferior_ptid, executing);
1896
1897 /* Restore the previous inferior_ptid. */
1898 inferior_ptid = old_inferior_ptid;
1899
1900 throw_exception (except);
1901 }
1902 END_CATCH
1903
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1906
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1909
1910 return frame;
1911}
1912
52834460
MM
1913/* Start replaying a thread. */
1914
1915static struct btrace_insn_iterator *
1916record_btrace_start_replaying (struct thread_info *tp)
1917{
52834460
MM
1918 struct btrace_insn_iterator *replay;
1919 struct btrace_thread_info *btinfo;
52834460
MM
1920
1921 btinfo = &tp->btrace;
1922 replay = NULL;
1923
1924 /* We can't start replaying without trace. */
b54b03bd 1925 if (btinfo->functions.empty ())
52834460
MM
1926 return NULL;
1927
52834460
MM
1928 /* GDB stores the current frame_id when stepping in order to detects steps
1929 into subroutines.
1930 Since frames are computed differently when we're replaying, we need to
1931 recompute those stored frames and fix them up so we can still detect
1932 subroutines after we started replaying. */
492d29ea 1933 TRY
52834460
MM
1934 {
1935 struct frame_info *frame;
1936 struct frame_id frame_id;
1937 int upd_step_frame_id, upd_step_stack_frame_id;
1938
1939 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1940 frame = get_thread_current_frame (tp);
52834460
MM
1941 frame_id = get_frame_id (frame);
1942
1943 /* Check if we need to update any stepping-related frame id's. */
1944 upd_step_frame_id = frame_id_eq (frame_id,
1945 tp->control.step_frame_id);
1946 upd_step_stack_frame_id = frame_id_eq (frame_id,
1947 tp->control.step_stack_frame_id);
1948
1949 /* We start replaying at the end of the branch trace. This corresponds
1950 to the current instruction. */
8d749320 1951 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1952 btrace_insn_end (replay, btinfo);
1953
31fd9caa
MM
1954 /* Skip gaps at the end of the trace. */
1955 while (btrace_insn_get (replay) == NULL)
1956 {
1957 unsigned int steps;
1958
1959 steps = btrace_insn_prev (replay, 1);
1960 if (steps == 0)
1961 error (_("No trace."));
1962 }
1963
52834460
MM
1964 /* We're not replaying, yet. */
1965 gdb_assert (btinfo->replay == NULL);
1966 btinfo->replay = replay;
1967
1968 /* Make sure we're not using any stale registers. */
1969 registers_changed_ptid (tp->ptid);
1970
1971 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1972 frame = get_thread_current_frame (tp);
52834460
MM
1973 frame_id = get_frame_id (frame);
1974
1975 /* Replace stepping related frames where necessary. */
1976 if (upd_step_frame_id)
1977 tp->control.step_frame_id = frame_id;
1978 if (upd_step_stack_frame_id)
1979 tp->control.step_stack_frame_id = frame_id;
1980 }
492d29ea 1981 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1982 {
1983 xfree (btinfo->replay);
1984 btinfo->replay = NULL;
1985
1986 registers_changed_ptid (tp->ptid);
1987
1988 throw_exception (except);
1989 }
492d29ea 1990 END_CATCH
52834460
MM
1991
1992 return replay;
1993}
1994
1995/* Stop replaying a thread. */
1996
1997static void
1998record_btrace_stop_replaying (struct thread_info *tp)
1999{
2000 struct btrace_thread_info *btinfo;
2001
2002 btinfo = &tp->btrace;
2003
2004 xfree (btinfo->replay);
2005 btinfo->replay = NULL;
2006
2007 /* Make sure we're not leaving any stale registers. */
2008 registers_changed_ptid (tp->ptid);
2009}
2010
e3cfc1c7
MM
2011/* Stop replaying TP if it is at the end of its execution history. */
2012
2013static void
2014record_btrace_stop_replaying_at_end (struct thread_info *tp)
2015{
2016 struct btrace_insn_iterator *replay, end;
2017 struct btrace_thread_info *btinfo;
2018
2019 btinfo = &tp->btrace;
2020 replay = btinfo->replay;
2021
2022 if (replay == NULL)
2023 return;
2024
2025 btrace_insn_end (&end, btinfo);
2026
2027 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2029}
2030
b2f4cfde
MM
2031/* The to_resume method of target record-btrace. */
2032
2033static void
2034record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2035 enum gdb_signal signal)
2036{
0ca912df 2037 struct thread_info *tp;
d2939ba2 2038 enum btrace_thread_flag flag, cflag;
52834460 2039
987e68b1
MM
2040 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2041 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2042 step ? "step" : "cont");
52834460 2043
0ca912df
MM
2044 /* Store the execution direction of the last resume.
2045
2046 If there is more than one to_resume call, we have to rely on infrun
2047 to not change the execution direction in-between. */
70ad5bff
MM
2048 record_btrace_resume_exec_dir = execution_direction;
2049
0ca912df 2050 /* As long as we're not replaying, just forward the request.
52834460 2051
0ca912df
MM
2052 For non-stop targets this means that no thread is replaying. In order to
2053 make progress, we may need to explicitly move replaying threads to the end
2054 of their execution history. */
a52eab48
MM
2055 if ((execution_direction != EXEC_REVERSE)
2056 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2057 {
e75fdfca 2058 ops = ops->beneath;
04c4fe8c
MM
2059 ops->to_resume (ops, ptid, step, signal);
2060 return;
b2f4cfde
MM
2061 }
2062
52834460 2063 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2064 if (execution_direction == EXEC_REVERSE)
2065 {
2066 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2067 cflag = BTHR_RCONT;
2068 }
52834460 2069 else
d2939ba2
MM
2070 {
2071 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2072 cflag = BTHR_CONT;
2073 }
52834460 2074
52834460 2075 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2076 record_btrace_wait below.
2077
2078 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2079 if (!target_is_non_stop_p ())
2080 {
2081 gdb_assert (ptid_match (inferior_ptid, ptid));
2082
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2085 {
2086 if (ptid_match (tp->ptid, inferior_ptid))
2087 record_btrace_resume_thread (tp, flag);
2088 else
2089 record_btrace_resume_thread (tp, cflag);
2090 }
2091 }
2092 else
2093 {
2094 ALL_NON_EXITED_THREADS (tp)
2095 if (ptid_match (tp->ptid, ptid))
2096 record_btrace_resume_thread (tp, flag);
2097 }
70ad5bff
MM
2098
2099 /* Async support. */
2100 if (target_can_async_p ())
2101 {
6a3753b3 2102 target_async (1);
70ad5bff
MM
2103 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2104 }
52834460
MM
2105}
2106
85ad3aaf
PA
2107/* The to_commit_resume method of target record-btrace. */
2108
2109static void
2110record_btrace_commit_resume (struct target_ops *ops)
2111{
2112 if ((execution_direction != EXEC_REVERSE)
2113 && !record_btrace_is_replaying (ops, minus_one_ptid))
2114 ops->beneath->to_commit_resume (ops->beneath);
2115}
2116
987e68b1
MM
2117/* Cancel resuming TP. */
2118
2119static void
2120record_btrace_cancel_resume (struct thread_info *tp)
2121{
2122 enum btrace_thread_flag flags;
2123
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2125 if (flags == 0)
2126 return;
2127
43792cf0
PA
2128 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2129 print_thread_id (tp),
987e68b1
MM
2130 target_pid_to_str (tp->ptid), flags,
2131 btrace_thread_flag_to_str (flags));
2132
2133 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2134 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2135}
2136
2137/* Return a target_waitstatus indicating that we ran out of history. */
2138
2139static struct target_waitstatus
2140btrace_step_no_history (void)
2141{
2142 struct target_waitstatus status;
2143
2144 status.kind = TARGET_WAITKIND_NO_HISTORY;
2145
2146 return status;
2147}
2148
2149/* Return a target_waitstatus indicating that a step finished. */
2150
2151static struct target_waitstatus
2152btrace_step_stopped (void)
2153{
2154 struct target_waitstatus status;
2155
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_TRAP;
2158
2159 return status;
2160}
2161
6e4879f0
MM
2162/* Return a target_waitstatus indicating that a thread was stopped as
2163 requested. */
2164
2165static struct target_waitstatus
2166btrace_step_stopped_on_request (void)
2167{
2168 struct target_waitstatus status;
2169
2170 status.kind = TARGET_WAITKIND_STOPPED;
2171 status.value.sig = GDB_SIGNAL_0;
2172
2173 return status;
2174}
2175
d825d248
MM
2176/* Return a target_waitstatus indicating a spurious stop. */
2177
2178static struct target_waitstatus
2179btrace_step_spurious (void)
2180{
2181 struct target_waitstatus status;
2182
2183 status.kind = TARGET_WAITKIND_SPURIOUS;
2184
2185 return status;
2186}
2187
e3cfc1c7
MM
2188/* Return a target_waitstatus indicating that the thread was not resumed. */
2189
2190static struct target_waitstatus
2191btrace_step_no_resumed (void)
2192{
2193 struct target_waitstatus status;
2194
2195 status.kind = TARGET_WAITKIND_NO_RESUMED;
2196
2197 return status;
2198}
2199
2200/* Return a target_waitstatus indicating that we should wait again. */
2201
2202static struct target_waitstatus
2203btrace_step_again (void)
2204{
2205 struct target_waitstatus status;
2206
2207 status.kind = TARGET_WAITKIND_IGNORE;
2208
2209 return status;
2210}
2211
52834460
MM
2212/* Clear the record histories. */
2213
2214static void
2215record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2216{
2217 xfree (btinfo->insn_history);
2218 xfree (btinfo->call_history);
2219
2220 btinfo->insn_history = NULL;
2221 btinfo->call_history = NULL;
2222}
2223
3c615f99
MM
2224/* Check whether TP's current replay position is at a breakpoint. */
2225
2226static int
2227record_btrace_replay_at_breakpoint (struct thread_info *tp)
2228{
2229 struct btrace_insn_iterator *replay;
2230 struct btrace_thread_info *btinfo;
2231 const struct btrace_insn *insn;
2232 struct inferior *inf;
2233
2234 btinfo = &tp->btrace;
2235 replay = btinfo->replay;
2236
2237 if (replay == NULL)
2238 return 0;
2239
2240 insn = btrace_insn_get (replay);
2241 if (insn == NULL)
2242 return 0;
2243
2244 inf = find_inferior_ptid (tp->ptid);
2245 if (inf == NULL)
2246 return 0;
2247
2248 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2249 &btinfo->stop_reason);
2250}
2251
d825d248 2252/* Step one instruction in forward direction. */
52834460
MM
2253
2254static struct target_waitstatus
d825d248 2255record_btrace_single_step_forward (struct thread_info *tp)
52834460 2256{
b61ce85c 2257 struct btrace_insn_iterator *replay, end, start;
52834460 2258 struct btrace_thread_info *btinfo;
52834460 2259
d825d248
MM
2260 btinfo = &tp->btrace;
2261 replay = btinfo->replay;
2262
2263 /* We're done if we're not replaying. */
2264 if (replay == NULL)
2265 return btrace_step_no_history ();
2266
011c71b6
MM
2267 /* Check if we're stepping a breakpoint. */
2268 if (record_btrace_replay_at_breakpoint (tp))
2269 return btrace_step_stopped ();
2270
b61ce85c
MM
2271 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2272 jump back to the instruction at which we started. */
2273 start = *replay;
d825d248
MM
2274 do
2275 {
2276 unsigned int steps;
2277
e3cfc1c7
MM
2278 /* We will bail out here if we continue stepping after reaching the end
2279 of the execution history. */
d825d248
MM
2280 steps = btrace_insn_next (replay, 1);
2281 if (steps == 0)
b61ce85c
MM
2282 {
2283 *replay = start;
2284 return btrace_step_no_history ();
2285 }
d825d248
MM
2286 }
2287 while (btrace_insn_get (replay) == NULL);
2288
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2291
e3cfc1c7
MM
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
d825d248 2295 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2296 return btrace_step_no_history ();
d825d248
MM
2297
2298 return btrace_step_spurious ();
2299}
2300
2301/* Step one instruction in backward direction. */
2302
2303static struct target_waitstatus
2304record_btrace_single_step_backward (struct thread_info *tp)
2305{
b61ce85c 2306 struct btrace_insn_iterator *replay, start;
d825d248 2307 struct btrace_thread_info *btinfo;
e59fa00f 2308
52834460
MM
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
d825d248
MM
2312 /* Start replaying if we're not already doing so. */
2313 if (replay == NULL)
2314 replay = record_btrace_start_replaying (tp);
2315
2316 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2317 Skip gaps during replay. If we end up at a gap (at the beginning of
2318 the trace), jump back to the instruction at which we started. */
2319 start = *replay;
d825d248
MM
2320 do
2321 {
2322 unsigned int steps;
2323
2324 steps = btrace_insn_prev (replay, 1);
2325 if (steps == 0)
b61ce85c
MM
2326 {
2327 *replay = start;
2328 return btrace_step_no_history ();
2329 }
d825d248
MM
2330 }
2331 while (btrace_insn_get (replay) == NULL);
2332
011c71b6
MM
2333 /* Check if we're stepping a breakpoint.
2334
2335 For reverse-stepping, this check is after the step. There is logic in
2336 infrun.c that handles reverse-stepping separately. See, for example,
2337 proceed and adjust_pc_after_break.
2338
2339 This code assumes that for reverse-stepping, PC points to the last
2340 de-executed instruction, whereas for forward-stepping PC points to the
2341 next to-be-executed instruction. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2344
d825d248
MM
2345 return btrace_step_spurious ();
2346}
2347
2348/* Step a single thread. */
2349
2350static struct target_waitstatus
2351record_btrace_step_thread (struct thread_info *tp)
2352{
2353 struct btrace_thread_info *btinfo;
2354 struct target_waitstatus status;
2355 enum btrace_thread_flag flags;
2356
2357 btinfo = &tp->btrace;
2358
6e4879f0
MM
2359 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2360 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2361
43792cf0 2362 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2363 target_pid_to_str (tp->ptid), flags,
2364 btrace_thread_flag_to_str (flags));
52834460 2365
6e4879f0
MM
2366 /* We can't step without an execution history. */
2367 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2368 return btrace_step_no_history ();
2369
52834460
MM
2370 switch (flags)
2371 {
2372 default:
2373 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2374
6e4879f0
MM
2375 case BTHR_STOP:
2376 return btrace_step_stopped_on_request ();
2377
52834460 2378 case BTHR_STEP:
d825d248
MM
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2381 break;
52834460
MM
2382
2383 return btrace_step_stopped ();
2384
2385 case BTHR_RSTEP:
d825d248
MM
2386 status = record_btrace_single_step_backward (tp);
2387 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2388 break;
52834460
MM
2389
2390 return btrace_step_stopped ();
2391
2392 case BTHR_CONT:
e3cfc1c7
MM
2393 status = record_btrace_single_step_forward (tp);
2394 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2395 break;
52834460 2396
e3cfc1c7
MM
2397 btinfo->flags |= flags;
2398 return btrace_step_again ();
52834460
MM
2399
2400 case BTHR_RCONT:
e3cfc1c7
MM
2401 status = record_btrace_single_step_backward (tp);
2402 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2403 break;
52834460 2404
e3cfc1c7
MM
2405 btinfo->flags |= flags;
2406 return btrace_step_again ();
2407 }
d825d248 2408
e3cfc1c7
MM
2409 /* We keep threads moving at the end of their execution history. The to_wait
2410 method will stop the thread for whom the event is reported. */
2411 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2412 btinfo->flags |= flags;
52834460 2413
e3cfc1c7 2414 return status;
b2f4cfde
MM
2415}
2416
e3cfc1c7
MM
2417/* A vector of threads. */
2418
2419typedef struct thread_info * tp_t;
2420DEF_VEC_P (tp_t);
2421
a6b5be76
MM
2422/* Announce further events if necessary. */
2423
2424static void
53127008
SM
2425record_btrace_maybe_mark_async_event
2426 (const std::vector<thread_info *> &moving,
2427 const std::vector<thread_info *> &no_history)
a6b5be76 2428{
53127008
SM
2429 bool more_moving = !moving.empty ();
2430 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2431
2432 if (!more_moving && !more_no_history)
2433 return;
2434
2435 if (more_moving)
2436 DEBUG ("movers pending");
2437
2438 if (more_no_history)
2439 DEBUG ("no-history pending");
2440
2441 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2442}
2443
b2f4cfde
MM
2444/* The to_wait method of target record-btrace. */
2445
2446static ptid_t
2447record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2448 struct target_waitstatus *status, int options)
2449{
53127008
SM
2450 std::vector<thread_info *> moving;
2451 std::vector<thread_info *> no_history;
52834460
MM
2452
2453 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2454
b2f4cfde 2455 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2456 if ((execution_direction != EXEC_REVERSE)
2457 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2458 {
e75fdfca
TT
2459 ops = ops->beneath;
2460 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2461 }
2462
e3cfc1c7 2463 /* Keep a work list of moving threads. */
53127008
SM
2464 {
2465 thread_info *tp;
2466
2467 ALL_NON_EXITED_THREADS (tp)
2468 {
2469 if (ptid_match (tp->ptid, ptid)
2470 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2471 moving.push_back (tp);
2472 }
2473 }
e3cfc1c7 2474
53127008 2475 if (moving.empty ())
52834460 2476 {
e3cfc1c7 2477 *status = btrace_step_no_resumed ();
52834460 2478
e3cfc1c7 2479 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2480 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2481
e3cfc1c7 2482 return null_ptid;
52834460
MM
2483 }
2484
e3cfc1c7
MM
2485 /* Step moving threads one by one, one step each, until either one thread
2486 reports an event or we run out of threads to step.
2487
2488 When stepping more than one thread, chances are that some threads reach
2489 the end of their execution history earlier than others. If we reported
2490 this immediately, all-stop on top of non-stop would stop all threads and
2491 resume the same threads next time. And we would report the same thread
2492 having reached the end of its execution history again.
2493
2494 In the worst case, this would starve the other threads. But even if other
2495 threads would be allowed to make progress, this would result in far too
2496 many intermediate stops.
2497
2498 We therefore delay the reporting of "no execution history" until we have
2499 nothing else to report. By this time, all threads should have moved to
2500 either the beginning or the end of their execution history. There will
2501 be a single user-visible stop. */
53127008
SM
2502 struct thread_info *eventing = NULL;
2503 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2504 {
53127008 2505 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2506 {
53127008
SM
2507 thread_info *tp = moving[ix];
2508
e3cfc1c7
MM
2509 *status = record_btrace_step_thread (tp);
2510
2511 switch (status->kind)
2512 {
2513 case TARGET_WAITKIND_IGNORE:
2514 ix++;
2515 break;
2516
2517 case TARGET_WAITKIND_NO_HISTORY:
53127008 2518 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2519 break;
2520
2521 default:
53127008 2522 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2523 break;
2524 }
2525 }
2526 }
2527
2528 if (eventing == NULL)
2529 {
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2532
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
53127008 2535 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2536
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
53127008 2539 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2541
2542 *status = btrace_step_no_history ();
2543 }
2544
2545 gdb_assert (eventing != NULL);
2546
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2550
2551 /* Stop all other threads. */
5953356c 2552 if (!target_is_non_stop_p ())
53127008
SM
2553 {
2554 thread_info *tp;
2555
2556 ALL_NON_EXITED_THREADS (tp)
2557 record_btrace_cancel_resume (tp);
2558 }
52834460 2559
a6b5be76
MM
2560 /* In async mode, we need to announce further events. */
2561 if (target_is_async_p ())
2562 record_btrace_maybe_mark_async_event (moving, no_history);
2563
52834460 2564 /* Start record histories anew from the current position. */
e3cfc1c7 2565 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2566
2567 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2568 registers_changed_ptid (eventing->ptid);
2569
43792cf0
PA
2570 DEBUG ("wait ended by thread %s (%s): %s",
2571 print_thread_id (eventing),
e3cfc1c7 2572 target_pid_to_str (eventing->ptid),
23fdd69e 2573 target_waitstatus_to_string (status).c_str ());
52834460 2574
e3cfc1c7 2575 return eventing->ptid;
52834460
MM
2576}
2577
6e4879f0
MM
2578/* The to_stop method of target record-btrace. */
2579
2580static void
2581record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2582{
2583 DEBUG ("stop %s", target_pid_to_str (ptid));
2584
2585 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2586 if ((execution_direction != EXEC_REVERSE)
2587 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2588 {
2589 ops = ops->beneath;
2590 ops->to_stop (ops, ptid);
2591 }
2592 else
2593 {
2594 struct thread_info *tp;
2595
2596 ALL_NON_EXITED_THREADS (tp)
2597 if (ptid_match (tp->ptid, ptid))
2598 {
2599 tp->btrace.flags &= ~BTHR_MOVE;
2600 tp->btrace.flags |= BTHR_STOP;
2601 }
2602 }
2603 }
2604
52834460
MM
2605/* The to_can_execute_reverse method of target record-btrace. */
2606
2607static int
19db3e69 2608record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2609{
2610 return 1;
2611}
2612
9e8915c6 2613/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2614
9e8915c6
PA
2615static int
2616record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2617{
a52eab48 2618 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2619 {
2620 struct thread_info *tp = inferior_thread ();
2621
2622 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2623 }
2624
2625 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2626}
2627
2628/* The to_supports_stopped_by_sw_breakpoint method of target
2629 record-btrace. */
2630
2631static int
2632record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2633{
a52eab48 2634 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2635 return 1;
2636
2637 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2638}
2639
2640/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2641
2642static int
2643record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2644{
a52eab48 2645 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2646 {
2647 struct thread_info *tp = inferior_thread ();
2648
2649 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2650 }
2651
2652 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2653}
2654
2655/* The to_supports_stopped_by_hw_breakpoint method of target
2656 record-btrace. */
2657
2658static int
2659record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2660{
a52eab48 2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2662 return 1;
52834460 2663
9e8915c6 2664 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2665}
2666
e8032dde 2667/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2668
2669static void
e8032dde 2670record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2671{
e8032dde 2672 /* We don't add or remove threads during replay. */
a52eab48 2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2674 return;
2675
2676 /* Forward the request. */
e75fdfca 2677 ops = ops->beneath;
e8032dde 2678 ops->to_update_thread_list (ops);
e2887aa3
MM
2679}
2680
2681/* The to_thread_alive method of target record-btrace. */
2682
2683static int
2684record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2685{
2686 /* We don't add or remove threads during replay. */
a52eab48 2687 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2688 return find_thread_ptid (ptid) != NULL;
2689
2690 /* Forward the request. */
e75fdfca
TT
2691 ops = ops->beneath;
2692 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2693}
2694
066ce621
MM
2695/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2696 is stopped. */
2697
2698static void
2699record_btrace_set_replay (struct thread_info *tp,
2700 const struct btrace_insn_iterator *it)
2701{
2702 struct btrace_thread_info *btinfo;
2703
2704 btinfo = &tp->btrace;
2705
a0f1b963 2706 if (it == NULL)
52834460 2707 record_btrace_stop_replaying (tp);
066ce621
MM
2708 else
2709 {
2710 if (btinfo->replay == NULL)
52834460 2711 record_btrace_start_replaying (tp);
066ce621
MM
2712 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2713 return;
2714
2715 *btinfo->replay = *it;
52834460 2716 registers_changed_ptid (tp->ptid);
066ce621
MM
2717 }
2718
52834460
MM
2719 /* Start anew from the new replay position. */
2720 record_btrace_clear_histories (btinfo);
485668e5
MM
2721
2722 stop_pc = regcache_read_pc (get_current_regcache ());
2723 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2724}
2725
2726/* The to_goto_record_begin method of target record-btrace. */
2727
2728static void
08475817 2729record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2730{
2731 struct thread_info *tp;
2732 struct btrace_insn_iterator begin;
2733
2734 tp = require_btrace_thread ();
2735
2736 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2737
2738 /* Skip gaps at the beginning of the trace. */
2739 while (btrace_insn_get (&begin) == NULL)
2740 {
2741 unsigned int steps;
2742
2743 steps = btrace_insn_next (&begin, 1);
2744 if (steps == 0)
2745 error (_("No trace."));
2746 }
2747
066ce621 2748 record_btrace_set_replay (tp, &begin);
066ce621
MM
2749}
2750
2751/* The to_goto_record_end method of target record-btrace. */
2752
2753static void
307a1b91 2754record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2755{
2756 struct thread_info *tp;
2757
2758 tp = require_btrace_thread ();
2759
2760 record_btrace_set_replay (tp, NULL);
066ce621
MM
2761}
2762
2763/* The to_goto_record method of target record-btrace. */
2764
2765static void
606183ac 2766record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2767{
2768 struct thread_info *tp;
2769 struct btrace_insn_iterator it;
2770 unsigned int number;
2771 int found;
2772
2773 number = insn;
2774
2775 /* Check for wrap-arounds. */
2776 if (number != insn)
2777 error (_("Instruction number out of range."));
2778
2779 tp = require_btrace_thread ();
2780
2781 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2782
2783 /* Check if the instruction could not be found or is a gap. */
2784 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2785 error (_("No such instruction."));
2786
2787 record_btrace_set_replay (tp, &it);
066ce621
MM
2788}
2789
797094dd
MM
2790/* The to_record_stop_replaying method of target record-btrace. */
2791
2792static void
2793record_btrace_stop_replaying_all (struct target_ops *self)
2794{
2795 struct thread_info *tp;
2796
2797 ALL_NON_EXITED_THREADS (tp)
2798 record_btrace_stop_replaying (tp);
2799}
2800
70ad5bff
MM
2801/* The to_execution_direction target method. */
2802
2803static enum exec_direction_kind
2804record_btrace_execution_direction (struct target_ops *self)
2805{
2806 return record_btrace_resume_exec_dir;
2807}
2808
aef92902
MM
2809/* The to_prepare_to_generate_core target method. */
2810
2811static void
2812record_btrace_prepare_to_generate_core (struct target_ops *self)
2813{
2814 record_btrace_generating_corefile = 1;
2815}
2816
2817/* The to_done_generating_core target method. */
2818
2819static void
2820record_btrace_done_generating_core (struct target_ops *self)
2821{
2822 record_btrace_generating_corefile = 0;
2823}
2824
afedecd3
MM
2825/* Initialize the record-btrace target ops. */
2826
2827static void
2828init_record_btrace_ops (void)
2829{
2830 struct target_ops *ops;
2831
2832 ops = &record_btrace_ops;
2833 ops->to_shortname = "record-btrace";
2834 ops->to_longname = "Branch tracing target";
2835 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2836 ops->to_open = record_btrace_open;
2837 ops->to_close = record_btrace_close;
b7d2e916 2838 ops->to_async = record_btrace_async;
afedecd3 2839 ops->to_detach = record_detach;
c0272db5 2840 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2841 ops->to_mourn_inferior = record_mourn_inferior;
2842 ops->to_kill = record_kill;
afedecd3
MM
2843 ops->to_stop_recording = record_btrace_stop_recording;
2844 ops->to_info_record = record_btrace_info;
2845 ops->to_insn_history = record_btrace_insn_history;
2846 ops->to_insn_history_from = record_btrace_insn_history_from;
2847 ops->to_insn_history_range = record_btrace_insn_history_range;
2848 ops->to_call_history = record_btrace_call_history;
2849 ops->to_call_history_from = record_btrace_call_history_from;
2850 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2851 ops->to_record_method = record_btrace_record_method;
07bbe694 2852 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2853 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2854 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2855 ops->to_xfer_partial = record_btrace_xfer_partial;
2856 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2857 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2858 ops->to_fetch_registers = record_btrace_fetch_registers;
2859 ops->to_store_registers = record_btrace_store_registers;
2860 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2861 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2862 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2863 ops->to_resume = record_btrace_resume;
85ad3aaf 2864 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2865 ops->to_wait = record_btrace_wait;
6e4879f0 2866 ops->to_stop = record_btrace_stop;
e8032dde 2867 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2868 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2869 ops->to_goto_record_begin = record_btrace_goto_begin;
2870 ops->to_goto_record_end = record_btrace_goto_end;
2871 ops->to_goto_record = record_btrace_goto;
52834460 2872 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2873 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2874 ops->to_supports_stopped_by_sw_breakpoint
2875 = record_btrace_supports_stopped_by_sw_breakpoint;
2876 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2877 ops->to_supports_stopped_by_hw_breakpoint
2878 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2879 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2880 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2881 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2882 ops->to_stratum = record_stratum;
2883 ops->to_magic = OPS_MAGIC;
2884}
2885
f4abbc16
MM
2886/* Start recording in BTS format. */
2887
2888static void
cdb34d4a 2889cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2890{
f4abbc16
MM
2891 if (args != NULL && *args != 0)
2892 error (_("Invalid argument."));
2893
2894 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2895
492d29ea
PA
2896 TRY
2897 {
95a6b0a1 2898 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2899 }
2900 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2901 {
2902 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2903 throw_exception (exception);
2904 }
492d29ea 2905 END_CATCH
f4abbc16
MM
2906}
2907
bc504a31 2908/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2909
2910static void
cdb34d4a 2911cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2912{
2913 if (args != NULL && *args != 0)
2914 error (_("Invalid argument."));
2915
b20a6524 2916 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2917
492d29ea
PA
2918 TRY
2919 {
95a6b0a1 2920 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2921 }
2922 CATCH (exception, RETURN_MASK_ALL)
2923 {
2924 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2925 throw_exception (exception);
2926 }
2927 END_CATCH
afedecd3
MM
2928}
2929
b20a6524
MM
2930/* Alias for "target record". */
2931
2932static void
981a3fb3 2933cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2934{
2935 if (args != NULL && *args != 0)
2936 error (_("Invalid argument."));
2937
2938 record_btrace_conf.format = BTRACE_FORMAT_PT;
2939
2940 TRY
2941 {
95a6b0a1 2942 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2943 }
2944 CATCH (exception, RETURN_MASK_ALL)
2945 {
2946 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2947
2948 TRY
2949 {
95a6b0a1 2950 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2951 }
2952 CATCH (exception, RETURN_MASK_ALL)
2953 {
2954 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2955 throw_exception (exception);
2956 }
2957 END_CATCH
2958 }
2959 END_CATCH
2960}
2961
67b5c0c1
MM
2962/* The "set record btrace" command. */
2963
2964static void
981a3fb3 2965cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2966{
2967 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2968}
2969
2970/* The "show record btrace" command. */
2971
2972static void
981a3fb3 2973cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2974{
2975 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2976}
2977
2978/* The "show record btrace replay-memory-access" command. */
2979
2980static void
2981cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2982 struct cmd_list_element *c, const char *value)
2983{
2984 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2985 replay_memory_access);
2986}
2987
d33501a5
MM
2988/* The "set record btrace bts" command. */
2989
2990static void
981a3fb3 2991cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2992{
2993 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2994 "by an appropriate subcommand.\n"));
d33501a5
MM
2995 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2996 all_commands, gdb_stdout);
2997}
2998
2999/* The "show record btrace bts" command. */
3000
3001static void
981a3fb3 3002cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3003{
3004 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3005}
3006
b20a6524
MM
3007/* The "set record btrace pt" command. */
3008
3009static void
981a3fb3 3010cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3011{
3012 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3013 "by an appropriate subcommand.\n"));
3014 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3015 all_commands, gdb_stdout);
3016}
3017
3018/* The "show record btrace pt" command. */
3019
3020static void
981a3fb3 3021cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3022{
3023 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3024}
3025
3026/* The "record bts buffer-size" show value function. */
3027
3028static void
3029show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c,
3031 const char *value)
3032{
3033 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3034 value);
3035}
3036
3037/* The "record pt buffer-size" show value function. */
3038
3039static void
3040show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3041 struct cmd_list_element *c,
3042 const char *value)
3043{
3044 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3045 value);
3046}
3047
afedecd3
MM
3048/* Initialize btrace commands. */
3049
3050void
3051_initialize_record_btrace (void)
3052{
f4abbc16
MM
3053 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3054 _("Start branch trace recording."), &record_btrace_cmdlist,
3055 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3056 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3057
f4abbc16
MM
3058 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3059 _("\
3060Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3061The processor stores a from/to record for each branch into a cyclic buffer.\n\
3062This format may not be available on all processors."),
3063 &record_btrace_cmdlist);
3064 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3065
b20a6524
MM
3066 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3067 _("\
bc504a31 3068Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3069This format may not be available on all processors."),
3070 &record_btrace_cmdlist);
3071 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3072
67b5c0c1
MM
3073 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3074 _("Set record options"), &set_record_btrace_cmdlist,
3075 "set record btrace ", 0, &set_record_cmdlist);
3076
3077 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3078 _("Show record options"), &show_record_btrace_cmdlist,
3079 "show record btrace ", 0, &show_record_cmdlist);
3080
3081 add_setshow_enum_cmd ("replay-memory-access", no_class,
3082 replay_memory_access_types, &replay_memory_access, _("\
3083Set what memory accesses are allowed during replay."), _("\
3084Show what memory accesses are allowed during replay."),
3085 _("Default is READ-ONLY.\n\n\
3086The btrace record target does not trace data.\n\
3087The memory therefore corresponds to the live target and not \
3088to the current replay position.\n\n\
3089When READ-ONLY, allow accesses to read-only memory during replay.\n\
3090When READ-WRITE, allow accesses to read-only and read-write memory during \
3091replay."),
3092 NULL, cmd_show_replay_memory_access,
3093 &set_record_btrace_cmdlist,
3094 &show_record_btrace_cmdlist);
3095
d33501a5
MM
3096 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3097 _("Set record btrace bts options"),
3098 &set_record_btrace_bts_cmdlist,
3099 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3100
3101 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3102 _("Show record btrace bts options"),
3103 &show_record_btrace_bts_cmdlist,
3104 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3105
3106 add_setshow_uinteger_cmd ("buffer-size", no_class,
3107 &record_btrace_conf.bts.size,
3108 _("Set the record/replay bts buffer size."),
3109 _("Show the record/replay bts buffer size."), _("\
3110When starting recording request a trace buffer of this size. \
3111The actual buffer size may differ from the requested size. \
3112Use \"info record\" to see the actual buffer size.\n\n\
3113Bigger buffers allow longer recording but also take more time to process \
3114the recorded execution trace.\n\n\
b20a6524
MM
3115The trace buffer size may not be changed while recording."), NULL,
3116 show_record_bts_buffer_size_value,
d33501a5
MM
3117 &set_record_btrace_bts_cmdlist,
3118 &show_record_btrace_bts_cmdlist);
3119
b20a6524
MM
3120 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3121 _("Set record btrace pt options"),
3122 &set_record_btrace_pt_cmdlist,
3123 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3124
3125 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3126 _("Show record btrace pt options"),
3127 &show_record_btrace_pt_cmdlist,
3128 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3129
3130 add_setshow_uinteger_cmd ("buffer-size", no_class,
3131 &record_btrace_conf.pt.size,
3132 _("Set the record/replay pt buffer size."),
3133 _("Show the record/replay pt buffer size."), _("\
3134Bigger buffers allow longer recording but also take more time to process \
3135the recorded execution.\n\
3136The actual buffer size may differ from the requested size. Use \"info record\" \
3137to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3138 &set_record_btrace_pt_cmdlist,
3139 &show_record_btrace_pt_cmdlist);
3140
afedecd3
MM
3141 init_record_btrace_ops ();
3142 add_target (&record_btrace_ops);
0b722aec
MM
3143
3144 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3145 xcalloc, xfree);
d33501a5
MM
3146
3147 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3148 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3149}
This page took 0.697154 seconds and 4 git commands to generate.