btrace: identify cpu
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 lbegin = bfun->lbegin;
115 lend = bfun->lend;
116
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
122 ibegin, iend);
123 }
124
125 /* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
127
128 static int
129 ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
132 {
133 struct minimal_symbol *msym;
134 struct symbol *sym;
135
136 msym = bfun->msym;
137 sym = bfun->sym;
138
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
142 return 1;
143
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
146 {
147 const char *bfname, *fname;
148
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
151 return 1;
152
153 /* Check the location of those functions, as well. */
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
156 if (filename_cmp (fname, bfname) != 0)
157 return 1;
158 }
159
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
162 return 1;
163
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
166 return 1;
167
168 return 0;
169 }
170
171 /* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
175
176 static int
177 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
178 {
179 struct symbol *sym;
180 const char *bfile;
181
182 sym = bfun->sym;
183 if (sym == NULL)
184 return 1;
185
186 bfile = symtab_to_fullname (symbol_symtab (sym));
187
188 return (filename_cmp (bfile, fullname) != 0);
189 }
190
191 /* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
194
195 static struct btrace_function *
196 ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
198 struct symbol *fun)
199 {
200 struct btrace_function *bfun;
201
202 bfun = xzalloc (sizeof (*bfun));
203
204 bfun->msym = mfun;
205 bfun->sym = fun;
206 bfun->flow.prev = prev;
207
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
211
212 if (prev == NULL)
213 {
214 /* Start counting at one. */
215 bfun->number = 1;
216 bfun->insn_offset = 1;
217 }
218 else
219 {
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
222
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
226 }
227
228 return bfun;
229 }
230
231 /* Update the UP field of a function segment. */
232
233 static void
234 ftrace_update_caller (struct btrace_function *bfun,
235 struct btrace_function *caller,
236 enum btrace_function_flag flags)
237 {
238 if (bfun->up != NULL)
239 ftrace_debug (bfun, "updating caller");
240
241 bfun->up = caller;
242 bfun->flags = flags;
243
244 ftrace_debug (bfun, "set caller");
245 }
246
247 /* Fix up the caller for all segments of a function. */
248
249 static void
250 ftrace_fixup_caller (struct btrace_function *bfun,
251 struct btrace_function *caller,
252 enum btrace_function_flag flags)
253 {
254 struct btrace_function *prev, *next;
255
256 ftrace_update_caller (bfun, caller, flags);
257
258 /* Update all function segments belonging to the same function. */
259 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
260 ftrace_update_caller (prev, caller, flags);
261
262 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
263 ftrace_update_caller (next, caller, flags);
264 }
265
266 /* Add a new function segment for a call.
267 CALLER is the chronologically preceding function segment.
268 MFUN and FUN are the symbol information we have for this function. */
269
270 static struct btrace_function *
271 ftrace_new_call (struct btrace_function *caller,
272 struct minimal_symbol *mfun,
273 struct symbol *fun)
274 {
275 struct btrace_function *bfun;
276
277 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->up = caller;
279 bfun->level = caller->level + 1;
280
281 ftrace_debug (bfun, "new call");
282
283 return bfun;
284 }
285
286 /* Add a new function segment for a tail call.
287 CALLER is the chronologically preceding function segment.
288 MFUN and FUN are the symbol information we have for this function. */
289
290 static struct btrace_function *
291 ftrace_new_tailcall (struct btrace_function *caller,
292 struct minimal_symbol *mfun,
293 struct symbol *fun)
294 {
295 struct btrace_function *bfun;
296
297 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->up = caller;
299 bfun->level = caller->level + 1;
300 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301
302 ftrace_debug (bfun, "new tail call");
303
304 return bfun;
305 }
306
307 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
308 symbol information. */
309
310 static struct btrace_function *
311 ftrace_find_caller (struct btrace_function *bfun,
312 struct minimal_symbol *mfun,
313 struct symbol *fun)
314 {
315 for (; bfun != NULL; bfun = bfun->up)
316 {
317 /* Skip functions with incompatible symbol information. */
318 if (ftrace_function_switched (bfun, mfun, fun))
319 continue;
320
321 /* This is the function segment we're looking for. */
322 break;
323 }
324
325 return bfun;
326 }
327
328 /* Find the innermost caller in the back trace of BFUN, skipping all
329 function segments that do not end with a call instruction (e.g.
330 tail calls ending with a jump). */
331
332 static struct btrace_function *
333 ftrace_find_call (struct btrace_function *bfun)
334 {
335 for (; bfun != NULL; bfun = bfun->up)
336 {
337 struct btrace_insn *last;
338
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
341
342 last = VEC_last (btrace_insn_s, bfun->insn);
343
344 if (last->iclass == BTRACE_INSN_CALL)
345 break;
346 }
347
348 return bfun;
349 }
350
351 /* Add a continuation segment for a function into which we return.
352 PREV is the chronologically preceding function segment.
353 MFUN and FUN are the symbol information we have for this function. */
354
355 static struct btrace_function *
356 ftrace_new_return (struct btrace_function *prev,
357 struct minimal_symbol *mfun,
358 struct symbol *fun)
359 {
360 struct btrace_function *bfun, *caller;
361
362 bfun = ftrace_new_function (prev, mfun, fun);
363
364 /* It is important to start at PREV's caller. Otherwise, we might find
365 PREV itself, if PREV is a recursive function. */
366 caller = ftrace_find_caller (prev->up, mfun, fun);
367 if (caller != NULL)
368 {
369 /* The caller of PREV is the preceding btrace function segment in this
370 function instance. */
371 gdb_assert (caller->segment.next == NULL);
372
373 caller->segment.next = bfun;
374 bfun->segment.prev = caller;
375
376 /* Maintain the function level. */
377 bfun->level = caller->level;
378
379 /* Maintain the call stack. */
380 bfun->up = caller->up;
381 bfun->flags = caller->flags;
382
383 ftrace_debug (bfun, "new return");
384 }
385 else
386 {
387 /* We did not find a caller. This could mean that something went
388 wrong or that the call is simply not included in the trace. */
389
390 /* Let's search for some actual call. */
391 caller = ftrace_find_call (prev->up);
392 if (caller == NULL)
393 {
394 /* There is no call in PREV's back trace. We assume that the
395 branch trace did not include it. */
396
397 /* Let's find the topmost call function - this skips tail calls. */
398 while (prev->up != NULL)
399 prev = prev->up;
400
401 /* We maintain levels for a series of returns for which we have
402 not seen the calls.
403 We start at the preceding function's level in case this has
404 already been a return for which we have not seen the call.
405 We start at level 0 otherwise, to handle tail calls correctly. */
406 bfun->level = min (0, prev->level) - 1;
407
408 /* Fix up the call stack for PREV. */
409 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
410
411 ftrace_debug (bfun, "new return - no caller");
412 }
413 else
414 {
415 /* There is a call in PREV's back trace to which we should have
416 returned. Let's remain at this level. */
417 bfun->level = prev->level;
418
419 ftrace_debug (bfun, "new return - unknown caller");
420 }
421 }
422
423 return bfun;
424 }
425
426 /* Add a new function segment for a function switch.
427 PREV is the chronologically preceding function segment.
428 MFUN and FUN are the symbol information we have for this function. */
429
430 static struct btrace_function *
431 ftrace_new_switch (struct btrace_function *prev,
432 struct minimal_symbol *mfun,
433 struct symbol *fun)
434 {
435 struct btrace_function *bfun;
436
437 /* This is an unexplained function switch. The call stack will likely
438 be wrong at this point. */
439 bfun = ftrace_new_function (prev, mfun, fun);
440
441 /* We keep the function level. */
442 bfun->level = prev->level;
443
444 ftrace_debug (bfun, "new switch");
445
446 return bfun;
447 }
448
449 /* Update BFUN with respect to the instruction at PC. This may create new
450 function segments.
451 Return the chronologically latest function segment, never NULL. */
452
453 static struct btrace_function *
454 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
455 {
456 struct bound_minimal_symbol bmfun;
457 struct minimal_symbol *mfun;
458 struct symbol *fun;
459 struct btrace_insn *last;
460
461 /* Try to determine the function we're in. We use both types of symbols
462 to avoid surprises when we sometimes get a full symbol and sometimes
463 only a minimal symbol. */
464 fun = find_pc_function (pc);
465 bmfun = lookup_minimal_symbol_by_pc (pc);
466 mfun = bmfun.minsym;
467
468 if (fun == NULL && mfun == NULL)
469 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
470
471 /* If we didn't have a function before, we create one. */
472 if (bfun == NULL)
473 return ftrace_new_function (bfun, mfun, fun);
474
475 /* Check the last instruction, if we have one.
476 We do this check first, since it allows us to fill in the call stack
477 links in addition to the normal flow links. */
478 last = NULL;
479 if (!VEC_empty (btrace_insn_s, bfun->insn))
480 last = VEC_last (btrace_insn_s, bfun->insn);
481
482 if (last != NULL)
483 {
484 switch (last->iclass)
485 {
486 case BTRACE_INSN_RETURN:
487 return ftrace_new_return (bfun, mfun, fun);
488
489 case BTRACE_INSN_CALL:
490 /* Ignore calls to the next instruction. They are used for PIC. */
491 if (last->pc + last->size == pc)
492 break;
493
494 return ftrace_new_call (bfun, mfun, fun);
495
496 case BTRACE_INSN_JUMP:
497 {
498 CORE_ADDR start;
499
500 start = get_pc_function_start (pc);
501
502 /* If we can't determine the function for PC, we treat a jump at
503 the end of the block as tail call. */
504 if (start == 0 || start == pc)
505 return ftrace_new_tailcall (bfun, mfun, fun);
506 }
507 }
508 }
509
510 /* Check if we're switching functions for some other reason. */
511 if (ftrace_function_switched (bfun, mfun, fun))
512 {
513 DEBUG_FTRACE ("switching from %s in %s at %s",
514 ftrace_print_insn_addr (last),
515 ftrace_print_function_name (bfun),
516 ftrace_print_filename (bfun));
517
518 return ftrace_new_switch (bfun, mfun, fun);
519 }
520
521 return bfun;
522 }
523
524 /* Update BFUN's source range with respect to the instruction at PC. */
525
526 static void
527 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
528 {
529 struct symtab_and_line sal;
530 const char *fullname;
531
532 sal = find_pc_line (pc, 0);
533 if (sal.symtab == NULL || sal.line == 0)
534 {
535 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
536 return;
537 }
538
539 /* Check if we switched files. This could happen if, say, a macro that
540 is defined in another file is expanded here. */
541 fullname = symtab_to_fullname (sal.symtab);
542 if (ftrace_skip_file (bfun, fullname))
543 {
544 DEBUG_FTRACE ("ignoring file at %s, file=%s",
545 core_addr_to_string_nz (pc), fullname);
546 return;
547 }
548
549 /* Update the line range. */
550 bfun->lbegin = min (bfun->lbegin, sal.line);
551 bfun->lend = max (bfun->lend, sal.line);
552
553 if (record_debug > 1)
554 ftrace_debug (bfun, "update lines");
555 }
556
557 /* Add the instruction at PC to BFUN's instructions. */
558
559 static void
560 ftrace_update_insns (struct btrace_function *bfun,
561 const struct btrace_insn *insn)
562 {
563 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
564
565 if (record_debug > 1)
566 ftrace_debug (bfun, "update insn");
567 }
568
569 /* Classify the instruction at PC. */
570
571 static enum btrace_insn_class
572 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
573 {
574 volatile struct gdb_exception error;
575 enum btrace_insn_class iclass;
576
577 iclass = BTRACE_INSN_OTHER;
578 TRY_CATCH (error, RETURN_MASK_ERROR)
579 {
580 if (gdbarch_insn_is_call (gdbarch, pc))
581 iclass = BTRACE_INSN_CALL;
582 else if (gdbarch_insn_is_ret (gdbarch, pc))
583 iclass = BTRACE_INSN_RETURN;
584 else if (gdbarch_insn_is_jump (gdbarch, pc))
585 iclass = BTRACE_INSN_JUMP;
586 }
587
588 return iclass;
589 }
590
591 /* Compute the function branch trace from BTS trace. */
592
593 static void
594 btrace_compute_ftrace_bts (struct thread_info *tp,
595 const struct btrace_data_bts *btrace)
596 {
597 struct btrace_thread_info *btinfo;
598 struct btrace_function *begin, *end;
599 struct gdbarch *gdbarch;
600 unsigned int blk;
601 int level;
602
603 gdbarch = target_gdbarch ();
604 btinfo = &tp->btrace;
605 begin = btinfo->begin;
606 end = btinfo->end;
607 level = begin != NULL ? -btinfo->level : INT_MAX;
608 blk = VEC_length (btrace_block_s, btrace->blocks);
609
610 while (blk != 0)
611 {
612 btrace_block_s *block;
613 CORE_ADDR pc;
614
615 blk -= 1;
616
617 block = VEC_index (btrace_block_s, btrace->blocks, blk);
618 pc = block->begin;
619
620 for (;;)
621 {
622 volatile struct gdb_exception error;
623 struct btrace_insn insn;
624 int size;
625
626 /* We should hit the end of the block. Warn if we went too far. */
627 if (block->end < pc)
628 {
629 warning (_("Recorded trace may be corrupted around %s."),
630 core_addr_to_string_nz (pc));
631 break;
632 }
633
634 end = ftrace_update_function (end, pc);
635 if (begin == NULL)
636 begin = end;
637
638 /* Maintain the function level offset.
639 For all but the last block, we do it here. */
640 if (blk != 0)
641 level = min (level, end->level);
642
643 size = 0;
644 TRY_CATCH (error, RETURN_MASK_ERROR)
645 size = gdb_insn_length (gdbarch, pc);
646
647 insn.pc = pc;
648 insn.size = size;
649 insn.iclass = ftrace_classify_insn (gdbarch, pc);
650
651 ftrace_update_insns (end, &insn);
652 ftrace_update_lines (end, pc);
653
654 /* We're done once we pushed the instruction at the end. */
655 if (block->end == pc)
656 break;
657
658 /* We can't continue if we fail to compute the size. */
659 if (size <= 0)
660 {
661 warning (_("Recorded trace may be incomplete around %s."),
662 core_addr_to_string_nz (pc));
663 break;
664 }
665
666 pc += size;
667
668 /* Maintain the function level offset.
669 For the last block, we do it here to not consider the last
670 instruction.
671 Since the last instruction corresponds to the current instruction
672 and is not really part of the execution history, it shouldn't
673 affect the level. */
674 if (blk == 0)
675 level = min (level, end->level);
676 }
677 }
678
679 btinfo->begin = begin;
680 btinfo->end = end;
681
682 /* LEVEL is the minimal function level of all btrace function segments.
683 Define the global level offset to -LEVEL so all function levels are
684 normalized to start at zero. */
685 btinfo->level = -level;
686 }
687
688 /* Compute the function branch trace from a block branch trace BTRACE for
689 a thread given by BTINFO. */
690
691 static void
692 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
693 {
694 DEBUG ("compute ftrace");
695
696 switch (btrace->format)
697 {
698 case BTRACE_FORMAT_NONE:
699 return;
700
701 case BTRACE_FORMAT_BTS:
702 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
703 return;
704 }
705
706 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
707 }
708
709 /* Add an entry for the current PC. */
710
711 static void
712 btrace_add_pc (struct thread_info *tp)
713 {
714 struct btrace_data btrace;
715 struct btrace_block *block;
716 struct regcache *regcache;
717 struct cleanup *cleanup;
718 CORE_ADDR pc;
719
720 regcache = get_thread_regcache (tp->ptid);
721 pc = regcache_read_pc (regcache);
722
723 btrace_data_init (&btrace);
724 btrace.format = BTRACE_FORMAT_BTS;
725 btrace.variant.bts.blocks = NULL;
726
727 cleanup = make_cleanup_btrace_data (&btrace);
728
729 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
730 block->begin = pc;
731 block->end = pc;
732
733 btrace_compute_ftrace (tp, &btrace);
734
735 do_cleanups (cleanup);
736 }
737
738 /* See btrace.h. */
739
740 void
741 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
742 {
743 if (tp->btrace.target != NULL)
744 return;
745
746 if (!target_supports_btrace (conf->format))
747 error (_("Target does not support branch tracing."));
748
749 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
750
751 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
752
753 /* Add an entry for the current PC so we start tracing from where we
754 enabled it. */
755 if (tp->btrace.target != NULL)
756 btrace_add_pc (tp);
757 }
758
759 /* See btrace.h. */
760
761 const struct btrace_config *
762 btrace_conf (const struct btrace_thread_info *btinfo)
763 {
764 if (btinfo->target == NULL)
765 return NULL;
766
767 return target_btrace_conf (btinfo->target);
768 }
769
770 /* See btrace.h. */
771
772 void
773 btrace_disable (struct thread_info *tp)
774 {
775 struct btrace_thread_info *btp = &tp->btrace;
776 int errcode = 0;
777
778 if (btp->target == NULL)
779 return;
780
781 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
782
783 target_disable_btrace (btp->target);
784 btp->target = NULL;
785
786 btrace_clear (tp);
787 }
788
789 /* See btrace.h. */
790
791 void
792 btrace_teardown (struct thread_info *tp)
793 {
794 struct btrace_thread_info *btp = &tp->btrace;
795 int errcode = 0;
796
797 if (btp->target == NULL)
798 return;
799
800 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
801
802 target_teardown_btrace (btp->target);
803 btp->target = NULL;
804
805 btrace_clear (tp);
806 }
807
808 /* Stitch branch trace in BTS format. */
809
810 static int
811 btrace_stitch_bts (struct btrace_data_bts *btrace,
812 const struct btrace_thread_info *btinfo)
813 {
814 struct btrace_function *last_bfun;
815 struct btrace_insn *last_insn;
816 btrace_block_s *first_new_block;
817
818 last_bfun = btinfo->end;
819 gdb_assert (last_bfun != NULL);
820
821 /* Beware that block trace starts with the most recent block, so the
822 chronologically first block in the new trace is the last block in
823 the new trace's block vector. */
824 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
825 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
826 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
827
828 /* If the current PC at the end of the block is the same as in our current
829 trace, there are two explanations:
830 1. we executed the instruction and some branch brought us back.
831 2. we have not made any progress.
832 In the first case, the delta trace vector should contain at least two
833 entries.
834 In the second case, the delta trace vector should contain exactly one
835 entry for the partial block containing the current PC. Remove it. */
836 if (first_new_block->end == last_insn->pc
837 && VEC_length (btrace_block_s, btrace->blocks) == 1)
838 {
839 VEC_pop (btrace_block_s, btrace->blocks);
840 return 0;
841 }
842
843 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
844 core_addr_to_string_nz (first_new_block->end));
845
846 /* Do a simple sanity check to make sure we don't accidentally end up
847 with a bad block. This should not occur in practice. */
848 if (first_new_block->end < last_insn->pc)
849 {
850 warning (_("Error while trying to read delta trace. Falling back to "
851 "a full read."));
852 return -1;
853 }
854
855 /* We adjust the last block to start at the end of our current trace. */
856 gdb_assert (first_new_block->begin == 0);
857 first_new_block->begin = last_insn->pc;
858
859 /* We simply pop the last insn so we can insert it again as part of
860 the normal branch trace computation.
861 Since instruction iterators are based on indices in the instructions
862 vector, we don't leave any pointers dangling. */
863 DEBUG ("pruning insn at %s for stitching",
864 ftrace_print_insn_addr (last_insn));
865
866 VEC_pop (btrace_insn_s, last_bfun->insn);
867
868 /* The instructions vector may become empty temporarily if this has
869 been the only instruction in this function segment.
870 This violates the invariant but will be remedied shortly by
871 btrace_compute_ftrace when we add the new trace. */
872 return 0;
873 }
874
875 /* Adjust the block trace in order to stitch old and new trace together.
876 BTRACE is the new delta trace between the last and the current stop.
877 BTINFO is the old branch trace until the last stop.
878 May modifx BTRACE as well as the existing trace in BTINFO.
879 Return 0 on success, -1 otherwise. */
880
881 static int
882 btrace_stitch_trace (struct btrace_data *btrace,
883 const struct btrace_thread_info *btinfo)
884 {
885 /* If we don't have trace, there's nothing to do. */
886 if (btrace_data_empty (btrace))
887 return 0;
888
889 switch (btrace->format)
890 {
891 case BTRACE_FORMAT_NONE:
892 return 0;
893
894 case BTRACE_FORMAT_BTS:
895 return btrace_stitch_bts (&btrace->variant.bts, btinfo);
896 }
897
898 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
899 }
900
901 /* Clear the branch trace histories in BTINFO. */
902
903 static void
904 btrace_clear_history (struct btrace_thread_info *btinfo)
905 {
906 xfree (btinfo->insn_history);
907 xfree (btinfo->call_history);
908 xfree (btinfo->replay);
909
910 btinfo->insn_history = NULL;
911 btinfo->call_history = NULL;
912 btinfo->replay = NULL;
913 }
914
915 /* See btrace.h. */
916
917 void
918 btrace_fetch (struct thread_info *tp)
919 {
920 struct btrace_thread_info *btinfo;
921 struct btrace_target_info *tinfo;
922 struct btrace_data btrace;
923 struct cleanup *cleanup;
924 int errcode;
925
926 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
927
928 btinfo = &tp->btrace;
929 tinfo = btinfo->target;
930 if (tinfo == NULL)
931 return;
932
933 /* There's no way we could get new trace while replaying.
934 On the other hand, delta trace would return a partial record with the
935 current PC, which is the replay PC, not the last PC, as expected. */
936 if (btinfo->replay != NULL)
937 return;
938
939 btrace_data_init (&btrace);
940 cleanup = make_cleanup_btrace_data (&btrace);
941
942 /* Let's first try to extend the trace we already have. */
943 if (btinfo->end != NULL)
944 {
945 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
946 if (errcode == 0)
947 {
948 /* Success. Let's try to stitch the traces together. */
949 errcode = btrace_stitch_trace (&btrace, btinfo);
950 }
951 else
952 {
953 /* We failed to read delta trace. Let's try to read new trace. */
954 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
955
956 /* If we got any new trace, discard what we have. */
957 if (errcode == 0 && !btrace_data_empty (&btrace))
958 btrace_clear (tp);
959 }
960
961 /* If we were not able to read the trace, we start over. */
962 if (errcode != 0)
963 {
964 btrace_clear (tp);
965 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
966 }
967 }
968 else
969 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
970
971 /* If we were not able to read the branch trace, signal an error. */
972 if (errcode != 0)
973 error (_("Failed to read branch trace."));
974
975 /* Compute the trace, provided we have any. */
976 if (!btrace_data_empty (&btrace))
977 {
978 btrace_clear_history (btinfo);
979 btrace_compute_ftrace (tp, &btrace);
980 }
981
982 do_cleanups (cleanup);
983 }
984
985 /* See btrace.h. */
986
987 void
988 btrace_clear (struct thread_info *tp)
989 {
990 struct btrace_thread_info *btinfo;
991 struct btrace_function *it, *trash;
992
993 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
994
995 /* Make sure btrace frames that may hold a pointer into the branch
996 trace data are destroyed. */
997 reinit_frame_cache ();
998
999 btinfo = &tp->btrace;
1000
1001 it = btinfo->begin;
1002 while (it != NULL)
1003 {
1004 trash = it;
1005 it = it->flow.next;
1006
1007 xfree (trash);
1008 }
1009
1010 btinfo->begin = NULL;
1011 btinfo->end = NULL;
1012
1013 btrace_clear_history (btinfo);
1014 }
1015
1016 /* See btrace.h. */
1017
1018 void
1019 btrace_free_objfile (struct objfile *objfile)
1020 {
1021 struct thread_info *tp;
1022
1023 DEBUG ("free objfile");
1024
1025 ALL_NON_EXITED_THREADS (tp)
1026 btrace_clear (tp);
1027 }
1028
1029 #if defined (HAVE_LIBEXPAT)
1030
1031 /* Check the btrace document version. */
1032
1033 static void
1034 check_xml_btrace_version (struct gdb_xml_parser *parser,
1035 const struct gdb_xml_element *element,
1036 void *user_data, VEC (gdb_xml_value_s) *attributes)
1037 {
1038 const char *version = xml_find_attribute (attributes, "version")->value;
1039
1040 if (strcmp (version, "1.0") != 0)
1041 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1042 }
1043
1044 /* Parse a btrace "block" xml record. */
1045
1046 static void
1047 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1048 const struct gdb_xml_element *element,
1049 void *user_data, VEC (gdb_xml_value_s) *attributes)
1050 {
1051 struct btrace_data *btrace;
1052 struct btrace_block *block;
1053 ULONGEST *begin, *end;
1054
1055 btrace = user_data;
1056
1057 switch (btrace->format)
1058 {
1059 case BTRACE_FORMAT_BTS:
1060 break;
1061
1062 case BTRACE_FORMAT_NONE:
1063 btrace->format = BTRACE_FORMAT_BTS;
1064 btrace->variant.bts.blocks = NULL;
1065 break;
1066
1067 default:
1068 gdb_xml_error (parser, _("Btrace format error."));
1069 }
1070
1071 begin = xml_find_attribute (attributes, "begin")->value;
1072 end = xml_find_attribute (attributes, "end")->value;
1073
1074 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1075 block->begin = *begin;
1076 block->end = *end;
1077 }
1078
1079 static const struct gdb_xml_attribute block_attributes[] = {
1080 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1081 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1082 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1083 };
1084
1085 static const struct gdb_xml_attribute btrace_attributes[] = {
1086 { "version", GDB_XML_AF_NONE, NULL, NULL },
1087 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1088 };
1089
1090 static const struct gdb_xml_element btrace_children[] = {
1091 { "block", block_attributes, NULL,
1092 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1093 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1094 };
1095
1096 static const struct gdb_xml_element btrace_elements[] = {
1097 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1098 check_xml_btrace_version, NULL },
1099 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1100 };
1101
1102 #endif /* defined (HAVE_LIBEXPAT) */
1103
1104 /* See btrace.h. */
1105
1106 void
1107 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1108 {
1109 struct cleanup *cleanup;
1110 int errcode;
1111
1112 #if defined (HAVE_LIBEXPAT)
1113
1114 btrace->format = BTRACE_FORMAT_NONE;
1115
1116 cleanup = make_cleanup_btrace_data (btrace);
1117 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1118 buffer, btrace);
1119 if (errcode != 0)
1120 error (_("Error parsing branch trace."));
1121
1122 /* Keep parse results. */
1123 discard_cleanups (cleanup);
1124
1125 #else /* !defined (HAVE_LIBEXPAT) */
1126
1127 error (_("Cannot process branch trace. XML parsing is not supported."));
1128
1129 #endif /* !defined (HAVE_LIBEXPAT) */
1130 }
1131
1132 #if defined (HAVE_LIBEXPAT)
1133
1134 /* Parse a btrace-conf "bts" xml record. */
1135
1136 static void
1137 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1138 const struct gdb_xml_element *element,
1139 void *user_data, VEC (gdb_xml_value_s) *attributes)
1140 {
1141 struct btrace_config *conf;
1142 struct gdb_xml_value *size;
1143
1144 conf = user_data;
1145 conf->format = BTRACE_FORMAT_BTS;
1146 conf->bts.size = 0;
1147
1148 size = xml_find_attribute (attributes, "size");
1149 if (size != NULL)
1150 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1151 }
1152
1153 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1154 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1155 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1156 };
1157
1158 static const struct gdb_xml_element btrace_conf_children[] = {
1159 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1160 parse_xml_btrace_conf_bts, NULL },
1161 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1162 };
1163
1164 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1165 { "version", GDB_XML_AF_NONE, NULL, NULL },
1166 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1167 };
1168
1169 static const struct gdb_xml_element btrace_conf_elements[] = {
1170 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1171 GDB_XML_EF_NONE, NULL, NULL },
1172 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1173 };
1174
1175 #endif /* defined (HAVE_LIBEXPAT) */
1176
1177 /* See btrace.h. */
1178
1179 void
1180 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1181 {
1182 int errcode;
1183
1184 #if defined (HAVE_LIBEXPAT)
1185
1186 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1187 btrace_conf_elements, xml, conf);
1188 if (errcode != 0)
1189 error (_("Error parsing branch trace configuration."));
1190
1191 #else /* !defined (HAVE_LIBEXPAT) */
1192
1193 error (_("XML parsing is not supported."));
1194
1195 #endif /* !defined (HAVE_LIBEXPAT) */
1196 }
1197
1198 /* See btrace.h. */
1199
1200 const struct btrace_insn *
1201 btrace_insn_get (const struct btrace_insn_iterator *it)
1202 {
1203 const struct btrace_function *bfun;
1204 unsigned int index, end;
1205
1206 index = it->index;
1207 bfun = it->function;
1208
1209 /* The index is within the bounds of this function's instruction vector. */
1210 end = VEC_length (btrace_insn_s, bfun->insn);
1211 gdb_assert (0 < end);
1212 gdb_assert (index < end);
1213
1214 return VEC_index (btrace_insn_s, bfun->insn, index);
1215 }
1216
1217 /* See btrace.h. */
1218
1219 unsigned int
1220 btrace_insn_number (const struct btrace_insn_iterator *it)
1221 {
1222 const struct btrace_function *bfun;
1223
1224 bfun = it->function;
1225 return bfun->insn_offset + it->index;
1226 }
1227
1228 /* See btrace.h. */
1229
1230 void
1231 btrace_insn_begin (struct btrace_insn_iterator *it,
1232 const struct btrace_thread_info *btinfo)
1233 {
1234 const struct btrace_function *bfun;
1235
1236 bfun = btinfo->begin;
1237 if (bfun == NULL)
1238 error (_("No trace."));
1239
1240 it->function = bfun;
1241 it->index = 0;
1242 }
1243
1244 /* See btrace.h. */
1245
1246 void
1247 btrace_insn_end (struct btrace_insn_iterator *it,
1248 const struct btrace_thread_info *btinfo)
1249 {
1250 const struct btrace_function *bfun;
1251 unsigned int length;
1252
1253 bfun = btinfo->end;
1254 if (bfun == NULL)
1255 error (_("No trace."));
1256
1257 /* The last instruction in the last function is the current instruction.
1258 We point to it - it is one past the end of the execution trace. */
1259 length = VEC_length (btrace_insn_s, bfun->insn);
1260
1261 it->function = bfun;
1262 it->index = length - 1;
1263 }
1264
1265 /* See btrace.h. */
1266
1267 unsigned int
1268 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1269 {
1270 const struct btrace_function *bfun;
1271 unsigned int index, steps;
1272
1273 bfun = it->function;
1274 steps = 0;
1275 index = it->index;
1276
1277 while (stride != 0)
1278 {
1279 unsigned int end, space, adv;
1280
1281 end = VEC_length (btrace_insn_s, bfun->insn);
1282
1283 gdb_assert (0 < end);
1284 gdb_assert (index < end);
1285
1286 /* Compute the number of instructions remaining in this segment. */
1287 space = end - index;
1288
1289 /* Advance the iterator as far as possible within this segment. */
1290 adv = min (space, stride);
1291 stride -= adv;
1292 index += adv;
1293 steps += adv;
1294
1295 /* Move to the next function if we're at the end of this one. */
1296 if (index == end)
1297 {
1298 const struct btrace_function *next;
1299
1300 next = bfun->flow.next;
1301 if (next == NULL)
1302 {
1303 /* We stepped past the last function.
1304
1305 Let's adjust the index to point to the last instruction in
1306 the previous function. */
1307 index -= 1;
1308 steps -= 1;
1309 break;
1310 }
1311
1312 /* We now point to the first instruction in the new function. */
1313 bfun = next;
1314 index = 0;
1315 }
1316
1317 /* We did make progress. */
1318 gdb_assert (adv > 0);
1319 }
1320
1321 /* Update the iterator. */
1322 it->function = bfun;
1323 it->index = index;
1324
1325 return steps;
1326 }
1327
1328 /* See btrace.h. */
1329
1330 unsigned int
1331 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1332 {
1333 const struct btrace_function *bfun;
1334 unsigned int index, steps;
1335
1336 bfun = it->function;
1337 steps = 0;
1338 index = it->index;
1339
1340 while (stride != 0)
1341 {
1342 unsigned int adv;
1343
1344 /* Move to the previous function if we're at the start of this one. */
1345 if (index == 0)
1346 {
1347 const struct btrace_function *prev;
1348
1349 prev = bfun->flow.prev;
1350 if (prev == NULL)
1351 break;
1352
1353 /* We point to one after the last instruction in the new function. */
1354 bfun = prev;
1355 index = VEC_length (btrace_insn_s, bfun->insn);
1356
1357 /* There is at least one instruction in this function segment. */
1358 gdb_assert (index > 0);
1359 }
1360
1361 /* Advance the iterator as far as possible within this segment. */
1362 adv = min (index, stride);
1363 stride -= adv;
1364 index -= adv;
1365 steps += adv;
1366
1367 /* We did make progress. */
1368 gdb_assert (adv > 0);
1369 }
1370
1371 /* Update the iterator. */
1372 it->function = bfun;
1373 it->index = index;
1374
1375 return steps;
1376 }
1377
1378 /* See btrace.h. */
1379
1380 int
1381 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1382 const struct btrace_insn_iterator *rhs)
1383 {
1384 unsigned int lnum, rnum;
1385
1386 lnum = btrace_insn_number (lhs);
1387 rnum = btrace_insn_number (rhs);
1388
1389 return (int) (lnum - rnum);
1390 }
1391
1392 /* See btrace.h. */
1393
1394 int
1395 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1396 const struct btrace_thread_info *btinfo,
1397 unsigned int number)
1398 {
1399 const struct btrace_function *bfun;
1400 unsigned int end;
1401
1402 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1403 if (bfun->insn_offset <= number)
1404 break;
1405
1406 if (bfun == NULL)
1407 return 0;
1408
1409 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1410 if (end <= number)
1411 return 0;
1412
1413 it->function = bfun;
1414 it->index = number - bfun->insn_offset;
1415
1416 return 1;
1417 }
1418
1419 /* See btrace.h. */
1420
1421 const struct btrace_function *
1422 btrace_call_get (const struct btrace_call_iterator *it)
1423 {
1424 return it->function;
1425 }
1426
1427 /* See btrace.h. */
1428
1429 unsigned int
1430 btrace_call_number (const struct btrace_call_iterator *it)
1431 {
1432 const struct btrace_thread_info *btinfo;
1433 const struct btrace_function *bfun;
1434 unsigned int insns;
1435
1436 btinfo = it->btinfo;
1437 bfun = it->function;
1438 if (bfun != NULL)
1439 return bfun->number;
1440
1441 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1442 number of the last function. */
1443 bfun = btinfo->end;
1444 insns = VEC_length (btrace_insn_s, bfun->insn);
1445
1446 /* If the function contains only a single instruction (i.e. the current
1447 instruction), it will be skipped and its number is already the number
1448 we seek. */
1449 if (insns == 1)
1450 return bfun->number;
1451
1452 /* Otherwise, return one more than the number of the last function. */
1453 return bfun->number + 1;
1454 }
1455
1456 /* See btrace.h. */
1457
1458 void
1459 btrace_call_begin (struct btrace_call_iterator *it,
1460 const struct btrace_thread_info *btinfo)
1461 {
1462 const struct btrace_function *bfun;
1463
1464 bfun = btinfo->begin;
1465 if (bfun == NULL)
1466 error (_("No trace."));
1467
1468 it->btinfo = btinfo;
1469 it->function = bfun;
1470 }
1471
1472 /* See btrace.h. */
1473
1474 void
1475 btrace_call_end (struct btrace_call_iterator *it,
1476 const struct btrace_thread_info *btinfo)
1477 {
1478 const struct btrace_function *bfun;
1479
1480 bfun = btinfo->end;
1481 if (bfun == NULL)
1482 error (_("No trace."));
1483
1484 it->btinfo = btinfo;
1485 it->function = NULL;
1486 }
1487
1488 /* See btrace.h. */
1489
1490 unsigned int
1491 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1492 {
1493 const struct btrace_function *bfun;
1494 unsigned int steps;
1495
1496 bfun = it->function;
1497 steps = 0;
1498 while (bfun != NULL)
1499 {
1500 const struct btrace_function *next;
1501 unsigned int insns;
1502
1503 next = bfun->flow.next;
1504 if (next == NULL)
1505 {
1506 /* Ignore the last function if it only contains a single
1507 (i.e. the current) instruction. */
1508 insns = VEC_length (btrace_insn_s, bfun->insn);
1509 if (insns == 1)
1510 steps -= 1;
1511 }
1512
1513 if (stride == steps)
1514 break;
1515
1516 bfun = next;
1517 steps += 1;
1518 }
1519
1520 it->function = bfun;
1521 return steps;
1522 }
1523
1524 /* See btrace.h. */
1525
1526 unsigned int
1527 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1528 {
1529 const struct btrace_thread_info *btinfo;
1530 const struct btrace_function *bfun;
1531 unsigned int steps;
1532
1533 bfun = it->function;
1534 steps = 0;
1535
1536 if (bfun == NULL)
1537 {
1538 unsigned int insns;
1539
1540 btinfo = it->btinfo;
1541 bfun = btinfo->end;
1542 if (bfun == NULL)
1543 return 0;
1544
1545 /* Ignore the last function if it only contains a single
1546 (i.e. the current) instruction. */
1547 insns = VEC_length (btrace_insn_s, bfun->insn);
1548 if (insns == 1)
1549 bfun = bfun->flow.prev;
1550
1551 if (bfun == NULL)
1552 return 0;
1553
1554 steps += 1;
1555 }
1556
1557 while (steps < stride)
1558 {
1559 const struct btrace_function *prev;
1560
1561 prev = bfun->flow.prev;
1562 if (prev == NULL)
1563 break;
1564
1565 bfun = prev;
1566 steps += 1;
1567 }
1568
1569 it->function = bfun;
1570 return steps;
1571 }
1572
1573 /* See btrace.h. */
1574
1575 int
1576 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1577 const struct btrace_call_iterator *rhs)
1578 {
1579 unsigned int lnum, rnum;
1580
1581 lnum = btrace_call_number (lhs);
1582 rnum = btrace_call_number (rhs);
1583
1584 return (int) (lnum - rnum);
1585 }
1586
1587 /* See btrace.h. */
1588
1589 int
1590 btrace_find_call_by_number (struct btrace_call_iterator *it,
1591 const struct btrace_thread_info *btinfo,
1592 unsigned int number)
1593 {
1594 const struct btrace_function *bfun;
1595
1596 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1597 {
1598 unsigned int bnum;
1599
1600 bnum = bfun->number;
1601 if (number == bnum)
1602 {
1603 it->btinfo = btinfo;
1604 it->function = bfun;
1605 return 1;
1606 }
1607
1608 /* Functions are ordered and numbered consecutively. We could bail out
1609 earlier. On the other hand, it is very unlikely that we search for
1610 a nonexistent function. */
1611 }
1612
1613 return 0;
1614 }
1615
1616 /* See btrace.h. */
1617
1618 void
1619 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1620 const struct btrace_insn_iterator *begin,
1621 const struct btrace_insn_iterator *end)
1622 {
1623 if (btinfo->insn_history == NULL)
1624 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1625
1626 btinfo->insn_history->begin = *begin;
1627 btinfo->insn_history->end = *end;
1628 }
1629
1630 /* See btrace.h. */
1631
1632 void
1633 btrace_set_call_history (struct btrace_thread_info *btinfo,
1634 const struct btrace_call_iterator *begin,
1635 const struct btrace_call_iterator *end)
1636 {
1637 gdb_assert (begin->btinfo == end->btinfo);
1638
1639 if (btinfo->call_history == NULL)
1640 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1641
1642 btinfo->call_history->begin = *begin;
1643 btinfo->call_history->end = *end;
1644 }
1645
1646 /* See btrace.h. */
1647
1648 int
1649 btrace_is_replaying (struct thread_info *tp)
1650 {
1651 return tp->btrace.replay != NULL;
1652 }
1653
1654 /* See btrace.h. */
1655
1656 int
1657 btrace_is_empty (struct thread_info *tp)
1658 {
1659 struct btrace_insn_iterator begin, end;
1660 struct btrace_thread_info *btinfo;
1661
1662 btinfo = &tp->btrace;
1663
1664 if (btinfo->begin == NULL)
1665 return 1;
1666
1667 btrace_insn_begin (&begin, btinfo);
1668 btrace_insn_end (&end, btinfo);
1669
1670 return btrace_insn_cmp (&begin, &end) == 0;
1671 }
1672
1673 /* Forward the cleanup request. */
1674
1675 static void
1676 do_btrace_data_cleanup (void *arg)
1677 {
1678 btrace_data_fini (arg);
1679 }
1680
1681 /* See btrace.h. */
1682
1683 struct cleanup *
1684 make_cleanup_btrace_data (struct btrace_data *data)
1685 {
1686 return make_cleanup (do_btrace_data_cleanup, data);
1687 }
This page took 0.064795 seconds and 4 git commands to generate.