change minsym representation
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "btrace.h"
23 #include "gdbthread.h"
24 #include "exceptions.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (sym->symtab);
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 lbegin = bfun->lbegin;
115 lend = bfun->lend;
116
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
122 ibegin, iend);
123 }
124
125 /* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
127
128 static int
129 ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
132 {
133 struct minimal_symbol *msym;
134 struct symbol *sym;
135
136 msym = bfun->msym;
137 sym = bfun->sym;
138
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
142 return 1;
143
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
146 {
147 const char *bfname, *fname;
148
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
151 return 1;
152
153 /* Check the location of those functions, as well. */
154 bfname = symtab_to_fullname (sym->symtab);
155 fname = symtab_to_fullname (fun->symtab);
156 if (filename_cmp (fname, bfname) != 0)
157 return 1;
158 }
159
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
162 return 1;
163
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
166 return 1;
167
168 return 0;
169 }
170
171 /* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
175
176 static int
177 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
178 {
179 struct symbol *sym;
180 const char *bfile;
181
182 sym = bfun->sym;
183 if (sym == NULL)
184 return 1;
185
186 bfile = symtab_to_fullname (sym->symtab);
187
188 return (filename_cmp (bfile, fullname) != 0);
189 }
190
191 /* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
194
195 static struct btrace_function *
196 ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
198 struct symbol *fun)
199 {
200 struct btrace_function *bfun;
201
202 bfun = xzalloc (sizeof (*bfun));
203
204 bfun->msym = mfun;
205 bfun->sym = fun;
206 bfun->flow.prev = prev;
207
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
211
212 if (prev == NULL)
213 {
214 /* Start counting at one. */
215 bfun->number = 1;
216 bfun->insn_offset = 1;
217 }
218 else
219 {
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
222
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
226 }
227
228 return bfun;
229 }
230
231 /* Update the UP field of a function segment. */
232
233 static void
234 ftrace_update_caller (struct btrace_function *bfun,
235 struct btrace_function *caller,
236 enum btrace_function_flag flags)
237 {
238 if (bfun->up != NULL)
239 ftrace_debug (bfun, "updating caller");
240
241 bfun->up = caller;
242 bfun->flags = flags;
243
244 ftrace_debug (bfun, "set caller");
245 }
246
247 /* Fix up the caller for all segments of a function. */
248
249 static void
250 ftrace_fixup_caller (struct btrace_function *bfun,
251 struct btrace_function *caller,
252 enum btrace_function_flag flags)
253 {
254 struct btrace_function *prev, *next;
255
256 ftrace_update_caller (bfun, caller, flags);
257
258 /* Update all function segments belonging to the same function. */
259 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
260 ftrace_update_caller (prev, caller, flags);
261
262 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
263 ftrace_update_caller (next, caller, flags);
264 }
265
266 /* Add a new function segment for a call.
267 CALLER is the chronologically preceding function segment.
268 MFUN and FUN are the symbol information we have for this function. */
269
270 static struct btrace_function *
271 ftrace_new_call (struct btrace_function *caller,
272 struct minimal_symbol *mfun,
273 struct symbol *fun)
274 {
275 struct btrace_function *bfun;
276
277 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->up = caller;
279 bfun->level = caller->level + 1;
280
281 ftrace_debug (bfun, "new call");
282
283 return bfun;
284 }
285
286 /* Add a new function segment for a tail call.
287 CALLER is the chronologically preceding function segment.
288 MFUN and FUN are the symbol information we have for this function. */
289
290 static struct btrace_function *
291 ftrace_new_tailcall (struct btrace_function *caller,
292 struct minimal_symbol *mfun,
293 struct symbol *fun)
294 {
295 struct btrace_function *bfun;
296
297 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->up = caller;
299 bfun->level = caller->level + 1;
300 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301
302 ftrace_debug (bfun, "new tail call");
303
304 return bfun;
305 }
306
307 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
308 symbol information. */
309
310 static struct btrace_function *
311 ftrace_find_caller (struct btrace_function *bfun,
312 struct minimal_symbol *mfun,
313 struct symbol *fun)
314 {
315 for (; bfun != NULL; bfun = bfun->up)
316 {
317 /* Skip functions with incompatible symbol information. */
318 if (ftrace_function_switched (bfun, mfun, fun))
319 continue;
320
321 /* This is the function segment we're looking for. */
322 break;
323 }
324
325 return bfun;
326 }
327
328 /* Find the innermost caller in the back trace of BFUN, skipping all
329 function segments that do not end with a call instruction (e.g.
330 tail calls ending with a jump). */
331
332 static struct btrace_function *
333 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334 {
335 for (; bfun != NULL; bfun = bfun->up)
336 {
337 struct btrace_insn *last;
338 CORE_ADDR pc;
339
340 /* We do not allow empty function segments. */
341 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342
343 last = VEC_last (btrace_insn_s, bfun->insn);
344 pc = last->pc;
345
346 if (gdbarch_insn_is_call (gdbarch, pc))
347 break;
348 }
349
350 return bfun;
351 }
352
353 /* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
356
357 static struct btrace_function *
358 ftrace_new_return (struct gdbarch *gdbarch,
359 struct btrace_function *prev,
360 struct minimal_symbol *mfun,
361 struct symbol *fun)
362 {
363 struct btrace_function *bfun, *caller;
364
365 bfun = ftrace_new_function (prev, mfun, fun);
366
367 /* It is important to start at PREV's caller. Otherwise, we might find
368 PREV itself, if PREV is a recursive function. */
369 caller = ftrace_find_caller (prev->up, mfun, fun);
370 if (caller != NULL)
371 {
372 /* The caller of PREV is the preceding btrace function segment in this
373 function instance. */
374 gdb_assert (caller->segment.next == NULL);
375
376 caller->segment.next = bfun;
377 bfun->segment.prev = caller;
378
379 /* Maintain the function level. */
380 bfun->level = caller->level;
381
382 /* Maintain the call stack. */
383 bfun->up = caller->up;
384 bfun->flags = caller->flags;
385
386 ftrace_debug (bfun, "new return");
387 }
388 else
389 {
390 /* We did not find a caller. This could mean that something went
391 wrong or that the call is simply not included in the trace. */
392
393 /* Let's search for some actual call. */
394 caller = ftrace_find_call (gdbarch, prev->up);
395 if (caller == NULL)
396 {
397 /* There is no call in PREV's back trace. We assume that the
398 branch trace did not include it. */
399
400 /* Let's find the topmost call function - this skips tail calls. */
401 while (prev->up != NULL)
402 prev = prev->up;
403
404 /* We maintain levels for a series of returns for which we have
405 not seen the calls.
406 We start at the preceding function's level in case this has
407 already been a return for which we have not seen the call.
408 We start at level 0 otherwise, to handle tail calls correctly. */
409 bfun->level = min (0, prev->level) - 1;
410
411 /* Fix up the call stack for PREV. */
412 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413
414 ftrace_debug (bfun, "new return - no caller");
415 }
416 else
417 {
418 /* There is a call in PREV's back trace to which we should have
419 returned. Let's remain at this level. */
420 bfun->level = prev->level;
421
422 ftrace_debug (bfun, "new return - unknown caller");
423 }
424 }
425
426 return bfun;
427 }
428
429 /* Add a new function segment for a function switch.
430 PREV is the chronologically preceding function segment.
431 MFUN and FUN are the symbol information we have for this function. */
432
433 static struct btrace_function *
434 ftrace_new_switch (struct btrace_function *prev,
435 struct minimal_symbol *mfun,
436 struct symbol *fun)
437 {
438 struct btrace_function *bfun;
439
440 /* This is an unexplained function switch. The call stack will likely
441 be wrong at this point. */
442 bfun = ftrace_new_function (prev, mfun, fun);
443
444 /* We keep the function level. */
445 bfun->level = prev->level;
446
447 ftrace_debug (bfun, "new switch");
448
449 return bfun;
450 }
451
452 /* Update BFUN with respect to the instruction at PC. This may create new
453 function segments.
454 Return the chronologically latest function segment, never NULL. */
455
456 static struct btrace_function *
457 ftrace_update_function (struct gdbarch *gdbarch,
458 struct btrace_function *bfun, CORE_ADDR pc)
459 {
460 struct bound_minimal_symbol bmfun;
461 struct minimal_symbol *mfun;
462 struct symbol *fun;
463 struct btrace_insn *last;
464
465 /* Try to determine the function we're in. We use both types of symbols
466 to avoid surprises when we sometimes get a full symbol and sometimes
467 only a minimal symbol. */
468 fun = find_pc_function (pc);
469 bmfun = lookup_minimal_symbol_by_pc (pc);
470 mfun = bmfun.minsym;
471
472 if (fun == NULL && mfun == NULL)
473 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474
475 /* If we didn't have a function before, we create one. */
476 if (bfun == NULL)
477 return ftrace_new_function (bfun, mfun, fun);
478
479 /* Check the last instruction, if we have one.
480 We do this check first, since it allows us to fill in the call stack
481 links in addition to the normal flow links. */
482 last = NULL;
483 if (!VEC_empty (btrace_insn_s, bfun->insn))
484 last = VEC_last (btrace_insn_s, bfun->insn);
485
486 if (last != NULL)
487 {
488 CORE_ADDR lpc;
489
490 lpc = last->pc;
491
492 /* Check for returns. */
493 if (gdbarch_insn_is_ret (gdbarch, lpc))
494 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495
496 /* Check for calls. */
497 if (gdbarch_insn_is_call (gdbarch, lpc))
498 {
499 int size;
500
501 size = gdb_insn_length (gdbarch, lpc);
502
503 /* Ignore calls to the next instruction. They are used for PIC. */
504 if (lpc + size != pc)
505 return ftrace_new_call (bfun, mfun, fun);
506 }
507 }
508
509 /* Check if we're switching functions for some other reason. */
510 if (ftrace_function_switched (bfun, mfun, fun))
511 {
512 DEBUG_FTRACE ("switching from %s in %s at %s",
513 ftrace_print_insn_addr (last),
514 ftrace_print_function_name (bfun),
515 ftrace_print_filename (bfun));
516
517 if (last != NULL)
518 {
519 CORE_ADDR start, lpc;
520
521 start = get_pc_function_start (pc);
522
523 /* If we can't determine the function for PC, we treat a jump at
524 the end of the block as tail call. */
525 if (start == 0)
526 start = pc;
527
528 lpc = last->pc;
529
530 /* Jumps indicate optimized tail calls. */
531 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
532 return ftrace_new_tailcall (bfun, mfun, fun);
533 }
534
535 return ftrace_new_switch (bfun, mfun, fun);
536 }
537
538 return bfun;
539 }
540
541 /* Update BFUN's source range with respect to the instruction at PC. */
542
543 static void
544 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545 {
546 struct symtab_and_line sal;
547 const char *fullname;
548
549 sal = find_pc_line (pc, 0);
550 if (sal.symtab == NULL || sal.line == 0)
551 {
552 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
553 return;
554 }
555
556 /* Check if we switched files. This could happen if, say, a macro that
557 is defined in another file is expanded here. */
558 fullname = symtab_to_fullname (sal.symtab);
559 if (ftrace_skip_file (bfun, fullname))
560 {
561 DEBUG_FTRACE ("ignoring file at %s, file=%s",
562 core_addr_to_string_nz (pc), fullname);
563 return;
564 }
565
566 /* Update the line range. */
567 bfun->lbegin = min (bfun->lbegin, sal.line);
568 bfun->lend = max (bfun->lend, sal.line);
569
570 if (record_debug > 1)
571 ftrace_debug (bfun, "update lines");
572 }
573
574 /* Add the instruction at PC to BFUN's instructions. */
575
576 static void
577 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578 {
579 struct btrace_insn *insn;
580
581 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
582 insn->pc = pc;
583
584 if (record_debug > 1)
585 ftrace_debug (bfun, "update insn");
586 }
587
588 /* Compute the function branch trace from a block branch trace BTRACE for
589 a thread given by BTINFO. */
590
591 static void
592 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
593 VEC (btrace_block_s) *btrace)
594 {
595 struct btrace_function *begin, *end;
596 struct gdbarch *gdbarch;
597 unsigned int blk;
598 int level;
599
600 DEBUG ("compute ftrace");
601
602 gdbarch = target_gdbarch ();
603 begin = btinfo->begin;
604 end = btinfo->end;
605 level = begin != NULL ? -btinfo->level : INT_MAX;
606 blk = VEC_length (btrace_block_s, btrace);
607
608 while (blk != 0)
609 {
610 btrace_block_s *block;
611 CORE_ADDR pc;
612
613 blk -= 1;
614
615 block = VEC_index (btrace_block_s, btrace, blk);
616 pc = block->begin;
617
618 for (;;)
619 {
620 int size;
621
622 /* We should hit the end of the block. Warn if we went too far. */
623 if (block->end < pc)
624 {
625 warning (_("Recorded trace may be corrupted around %s."),
626 core_addr_to_string_nz (pc));
627 break;
628 }
629
630 end = ftrace_update_function (gdbarch, end, pc);
631 if (begin == NULL)
632 begin = end;
633
634 /* Maintain the function level offset.
635 For all but the last block, we do it here. */
636 if (blk != 0)
637 level = min (level, end->level);
638
639 ftrace_update_insns (end, pc);
640 ftrace_update_lines (end, pc);
641
642 /* We're done once we pushed the instruction at the end. */
643 if (block->end == pc)
644 break;
645
646 size = gdb_insn_length (gdbarch, pc);
647
648 /* Make sure we terminate if we fail to compute the size. */
649 if (size <= 0)
650 {
651 warning (_("Recorded trace may be incomplete around %s."),
652 core_addr_to_string_nz (pc));
653 break;
654 }
655
656 pc += size;
657
658 /* Maintain the function level offset.
659 For the last block, we do it here to not consider the last
660 instruction.
661 Since the last instruction corresponds to the current instruction
662 and is not really part of the execution history, it shouldn't
663 affect the level. */
664 if (blk == 0)
665 level = min (level, end->level);
666 }
667 }
668
669 btinfo->begin = begin;
670 btinfo->end = end;
671
672 /* LEVEL is the minimal function level of all btrace function segments.
673 Define the global level offset to -LEVEL so all function levels are
674 normalized to start at zero. */
675 btinfo->level = -level;
676 }
677
678 /* Add an entry for the current PC. */
679
680 static void
681 btrace_add_pc (struct thread_info *tp)
682 {
683 VEC (btrace_block_s) *btrace;
684 struct btrace_block *block;
685 struct regcache *regcache;
686 struct cleanup *cleanup;
687 CORE_ADDR pc;
688
689 regcache = get_thread_regcache (tp->ptid);
690 pc = regcache_read_pc (regcache);
691
692 btrace = NULL;
693 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
694
695 block = VEC_safe_push (btrace_block_s, btrace, NULL);
696 block->begin = pc;
697 block->end = pc;
698
699 btrace_compute_ftrace (&tp->btrace, btrace);
700
701 do_cleanups (cleanup);
702 }
703
704 /* See btrace.h. */
705
706 void
707 btrace_enable (struct thread_info *tp)
708 {
709 if (tp->btrace.target != NULL)
710 return;
711
712 if (!target_supports_btrace ())
713 error (_("Target does not support branch tracing."));
714
715 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
716
717 tp->btrace.target = target_enable_btrace (tp->ptid);
718
719 /* Add an entry for the current PC so we start tracing from where we
720 enabled it. */
721 if (tp->btrace.target != NULL)
722 btrace_add_pc (tp);
723 }
724
725 /* See btrace.h. */
726
727 void
728 btrace_disable (struct thread_info *tp)
729 {
730 struct btrace_thread_info *btp = &tp->btrace;
731 int errcode = 0;
732
733 if (btp->target == NULL)
734 return;
735
736 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
737
738 target_disable_btrace (btp->target);
739 btp->target = NULL;
740
741 btrace_clear (tp);
742 }
743
744 /* See btrace.h. */
745
746 void
747 btrace_teardown (struct thread_info *tp)
748 {
749 struct btrace_thread_info *btp = &tp->btrace;
750 int errcode = 0;
751
752 if (btp->target == NULL)
753 return;
754
755 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
756
757 target_teardown_btrace (btp->target);
758 btp->target = NULL;
759
760 btrace_clear (tp);
761 }
762
763 /* Adjust the block trace in order to stitch old and new trace together.
764 BTRACE is the new delta trace between the last and the current stop.
765 BTINFO is the old branch trace until the last stop.
766 May modify BTRACE as well as the existing trace in BTINFO.
767 Return 0 on success, -1 otherwise. */
768
769 static int
770 btrace_stitch_trace (VEC (btrace_block_s) **btrace,
771 const struct btrace_thread_info *btinfo)
772 {
773 struct btrace_function *last_bfun;
774 struct btrace_insn *last_insn;
775 btrace_block_s *first_new_block;
776
777 /* If we don't have trace, there's nothing to do. */
778 if (VEC_empty (btrace_block_s, *btrace))
779 return 0;
780
781 last_bfun = btinfo->end;
782 gdb_assert (last_bfun != NULL);
783
784 /* Beware that block trace starts with the most recent block, so the
785 chronologically first block in the new trace is the last block in
786 the new trace's block vector. */
787 first_new_block = VEC_last (btrace_block_s, *btrace);
788 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
789
790 /* If the current PC at the end of the block is the same as in our current
791 trace, there are two explanations:
792 1. we executed the instruction and some branch brought us back.
793 2. we have not made any progress.
794 In the first case, the delta trace vector should contain at least two
795 entries.
796 In the second case, the delta trace vector should contain exactly one
797 entry for the partial block containing the current PC. Remove it. */
798 if (first_new_block->end == last_insn->pc
799 && VEC_length (btrace_block_s, *btrace) == 1)
800 {
801 VEC_pop (btrace_block_s, *btrace);
802 return 0;
803 }
804
805 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
806 core_addr_to_string_nz (first_new_block->end));
807
808 /* Do a simple sanity check to make sure we don't accidentally end up
809 with a bad block. This should not occur in practice. */
810 if (first_new_block->end < last_insn->pc)
811 {
812 warning (_("Error while trying to read delta trace. Falling back to "
813 "a full read."));
814 return -1;
815 }
816
817 /* We adjust the last block to start at the end of our current trace. */
818 gdb_assert (first_new_block->begin == 0);
819 first_new_block->begin = last_insn->pc;
820
821 /* We simply pop the last insn so we can insert it again as part of
822 the normal branch trace computation.
823 Since instruction iterators are based on indices in the instructions
824 vector, we don't leave any pointers dangling. */
825 DEBUG ("pruning insn at %s for stitching",
826 ftrace_print_insn_addr (last_insn));
827
828 VEC_pop (btrace_insn_s, last_bfun->insn);
829
830 /* The instructions vector may become empty temporarily if this has
831 been the only instruction in this function segment.
832 This violates the invariant but will be remedied shortly by
833 btrace_compute_ftrace when we add the new trace. */
834 return 0;
835 }
836
837 /* Clear the branch trace histories in BTINFO. */
838
839 static void
840 btrace_clear_history (struct btrace_thread_info *btinfo)
841 {
842 xfree (btinfo->insn_history);
843 xfree (btinfo->call_history);
844 xfree (btinfo->replay);
845
846 btinfo->insn_history = NULL;
847 btinfo->call_history = NULL;
848 btinfo->replay = NULL;
849 }
850
851 /* See btrace.h. */
852
853 void
854 btrace_fetch (struct thread_info *tp)
855 {
856 struct btrace_thread_info *btinfo;
857 struct btrace_target_info *tinfo;
858 VEC (btrace_block_s) *btrace;
859 struct cleanup *cleanup;
860 int errcode;
861
862 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
863
864 btrace = NULL;
865 btinfo = &tp->btrace;
866 tinfo = btinfo->target;
867 if (tinfo == NULL)
868 return;
869
870 /* There's no way we could get new trace while replaying.
871 On the other hand, delta trace would return a partial record with the
872 current PC, which is the replay PC, not the last PC, as expected. */
873 if (btinfo->replay != NULL)
874 return;
875
876 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
877
878 /* Let's first try to extend the trace we already have. */
879 if (btinfo->end != NULL)
880 {
881 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
882 if (errcode == 0)
883 {
884 /* Success. Let's try to stitch the traces together. */
885 errcode = btrace_stitch_trace (&btrace, btinfo);
886 }
887 else
888 {
889 /* We failed to read delta trace. Let's try to read new trace. */
890 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
891
892 /* If we got any new trace, discard what we have. */
893 if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
894 btrace_clear (tp);
895 }
896
897 /* If we were not able to read the trace, we start over. */
898 if (errcode != 0)
899 {
900 btrace_clear (tp);
901 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
902 }
903 }
904 else
905 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
906
907 /* If we were not able to read the branch trace, signal an error. */
908 if (errcode != 0)
909 error (_("Failed to read branch trace."));
910
911 /* Compute the trace, provided we have any. */
912 if (!VEC_empty (btrace_block_s, btrace))
913 {
914 btrace_clear_history (btinfo);
915 btrace_compute_ftrace (btinfo, btrace);
916 }
917
918 do_cleanups (cleanup);
919 }
920
921 /* See btrace.h. */
922
923 void
924 btrace_clear (struct thread_info *tp)
925 {
926 struct btrace_thread_info *btinfo;
927 struct btrace_function *it, *trash;
928
929 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
930
931 /* Make sure btrace frames that may hold a pointer into the branch
932 trace data are destroyed. */
933 reinit_frame_cache ();
934
935 btinfo = &tp->btrace;
936
937 it = btinfo->begin;
938 while (it != NULL)
939 {
940 trash = it;
941 it = it->flow.next;
942
943 xfree (trash);
944 }
945
946 btinfo->begin = NULL;
947 btinfo->end = NULL;
948
949 btrace_clear_history (btinfo);
950 }
951
952 /* See btrace.h. */
953
954 void
955 btrace_free_objfile (struct objfile *objfile)
956 {
957 struct thread_info *tp;
958
959 DEBUG ("free objfile");
960
961 ALL_THREADS (tp)
962 btrace_clear (tp);
963 }
964
965 #if defined (HAVE_LIBEXPAT)
966
967 /* Check the btrace document version. */
968
969 static void
970 check_xml_btrace_version (struct gdb_xml_parser *parser,
971 const struct gdb_xml_element *element,
972 void *user_data, VEC (gdb_xml_value_s) *attributes)
973 {
974 const char *version = xml_find_attribute (attributes, "version")->value;
975
976 if (strcmp (version, "1.0") != 0)
977 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
978 }
979
980 /* Parse a btrace "block" xml record. */
981
982 static void
983 parse_xml_btrace_block (struct gdb_xml_parser *parser,
984 const struct gdb_xml_element *element,
985 void *user_data, VEC (gdb_xml_value_s) *attributes)
986 {
987 VEC (btrace_block_s) **btrace;
988 struct btrace_block *block;
989 ULONGEST *begin, *end;
990
991 btrace = user_data;
992 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
993
994 begin = xml_find_attribute (attributes, "begin")->value;
995 end = xml_find_attribute (attributes, "end")->value;
996
997 block->begin = *begin;
998 block->end = *end;
999 }
1000
1001 static const struct gdb_xml_attribute block_attributes[] = {
1002 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1003 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1004 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1005 };
1006
1007 static const struct gdb_xml_attribute btrace_attributes[] = {
1008 { "version", GDB_XML_AF_NONE, NULL, NULL },
1009 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1010 };
1011
1012 static const struct gdb_xml_element btrace_children[] = {
1013 { "block", block_attributes, NULL,
1014 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1015 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1016 };
1017
1018 static const struct gdb_xml_element btrace_elements[] = {
1019 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1020 check_xml_btrace_version, NULL },
1021 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1022 };
1023
1024 #endif /* defined (HAVE_LIBEXPAT) */
1025
1026 /* See btrace.h. */
1027
1028 VEC (btrace_block_s) *
1029 parse_xml_btrace (const char *buffer)
1030 {
1031 VEC (btrace_block_s) *btrace = NULL;
1032 struct cleanup *cleanup;
1033 int errcode;
1034
1035 #if defined (HAVE_LIBEXPAT)
1036
1037 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
1038 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1039 buffer, &btrace);
1040 if (errcode != 0)
1041 error (_("Error parsing branch trace."));
1042
1043 /* Keep parse results. */
1044 discard_cleanups (cleanup);
1045
1046 #else /* !defined (HAVE_LIBEXPAT) */
1047
1048 error (_("Cannot process branch trace. XML parsing is not supported."));
1049
1050 #endif /* !defined (HAVE_LIBEXPAT) */
1051
1052 return btrace;
1053 }
1054
1055 /* See btrace.h. */
1056
1057 const struct btrace_insn *
1058 btrace_insn_get (const struct btrace_insn_iterator *it)
1059 {
1060 const struct btrace_function *bfun;
1061 unsigned int index, end;
1062
1063 index = it->index;
1064 bfun = it->function;
1065
1066 /* The index is within the bounds of this function's instruction vector. */
1067 end = VEC_length (btrace_insn_s, bfun->insn);
1068 gdb_assert (0 < end);
1069 gdb_assert (index < end);
1070
1071 return VEC_index (btrace_insn_s, bfun->insn, index);
1072 }
1073
1074 /* See btrace.h. */
1075
1076 unsigned int
1077 btrace_insn_number (const struct btrace_insn_iterator *it)
1078 {
1079 const struct btrace_function *bfun;
1080
1081 bfun = it->function;
1082 return bfun->insn_offset + it->index;
1083 }
1084
1085 /* See btrace.h. */
1086
1087 void
1088 btrace_insn_begin (struct btrace_insn_iterator *it,
1089 const struct btrace_thread_info *btinfo)
1090 {
1091 const struct btrace_function *bfun;
1092
1093 bfun = btinfo->begin;
1094 if (bfun == NULL)
1095 error (_("No trace."));
1096
1097 it->function = bfun;
1098 it->index = 0;
1099 }
1100
1101 /* See btrace.h. */
1102
1103 void
1104 btrace_insn_end (struct btrace_insn_iterator *it,
1105 const struct btrace_thread_info *btinfo)
1106 {
1107 const struct btrace_function *bfun;
1108 unsigned int length;
1109
1110 bfun = btinfo->end;
1111 if (bfun == NULL)
1112 error (_("No trace."));
1113
1114 /* The last instruction in the last function is the current instruction.
1115 We point to it - it is one past the end of the execution trace. */
1116 length = VEC_length (btrace_insn_s, bfun->insn);
1117
1118 it->function = bfun;
1119 it->index = length - 1;
1120 }
1121
1122 /* See btrace.h. */
1123
1124 unsigned int
1125 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1126 {
1127 const struct btrace_function *bfun;
1128 unsigned int index, steps;
1129
1130 bfun = it->function;
1131 steps = 0;
1132 index = it->index;
1133
1134 while (stride != 0)
1135 {
1136 unsigned int end, space, adv;
1137
1138 end = VEC_length (btrace_insn_s, bfun->insn);
1139
1140 gdb_assert (0 < end);
1141 gdb_assert (index < end);
1142
1143 /* Compute the number of instructions remaining in this segment. */
1144 space = end - index;
1145
1146 /* Advance the iterator as far as possible within this segment. */
1147 adv = min (space, stride);
1148 stride -= adv;
1149 index += adv;
1150 steps += adv;
1151
1152 /* Move to the next function if we're at the end of this one. */
1153 if (index == end)
1154 {
1155 const struct btrace_function *next;
1156
1157 next = bfun->flow.next;
1158 if (next == NULL)
1159 {
1160 /* We stepped past the last function.
1161
1162 Let's adjust the index to point to the last instruction in
1163 the previous function. */
1164 index -= 1;
1165 steps -= 1;
1166 break;
1167 }
1168
1169 /* We now point to the first instruction in the new function. */
1170 bfun = next;
1171 index = 0;
1172 }
1173
1174 /* We did make progress. */
1175 gdb_assert (adv > 0);
1176 }
1177
1178 /* Update the iterator. */
1179 it->function = bfun;
1180 it->index = index;
1181
1182 return steps;
1183 }
1184
1185 /* See btrace.h. */
1186
1187 unsigned int
1188 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1189 {
1190 const struct btrace_function *bfun;
1191 unsigned int index, steps;
1192
1193 bfun = it->function;
1194 steps = 0;
1195 index = it->index;
1196
1197 while (stride != 0)
1198 {
1199 unsigned int adv;
1200
1201 /* Move to the previous function if we're at the start of this one. */
1202 if (index == 0)
1203 {
1204 const struct btrace_function *prev;
1205
1206 prev = bfun->flow.prev;
1207 if (prev == NULL)
1208 break;
1209
1210 /* We point to one after the last instruction in the new function. */
1211 bfun = prev;
1212 index = VEC_length (btrace_insn_s, bfun->insn);
1213
1214 /* There is at least one instruction in this function segment. */
1215 gdb_assert (index > 0);
1216 }
1217
1218 /* Advance the iterator as far as possible within this segment. */
1219 adv = min (index, stride);
1220 stride -= adv;
1221 index -= adv;
1222 steps += adv;
1223
1224 /* We did make progress. */
1225 gdb_assert (adv > 0);
1226 }
1227
1228 /* Update the iterator. */
1229 it->function = bfun;
1230 it->index = index;
1231
1232 return steps;
1233 }
1234
1235 /* See btrace.h. */
1236
1237 int
1238 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1239 const struct btrace_insn_iterator *rhs)
1240 {
1241 unsigned int lnum, rnum;
1242
1243 lnum = btrace_insn_number (lhs);
1244 rnum = btrace_insn_number (rhs);
1245
1246 return (int) (lnum - rnum);
1247 }
1248
1249 /* See btrace.h. */
1250
1251 int
1252 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1253 const struct btrace_thread_info *btinfo,
1254 unsigned int number)
1255 {
1256 const struct btrace_function *bfun;
1257 unsigned int end;
1258
1259 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1260 if (bfun->insn_offset <= number)
1261 break;
1262
1263 if (bfun == NULL)
1264 return 0;
1265
1266 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1267 if (end <= number)
1268 return 0;
1269
1270 it->function = bfun;
1271 it->index = number - bfun->insn_offset;
1272
1273 return 1;
1274 }
1275
1276 /* See btrace.h. */
1277
1278 const struct btrace_function *
1279 btrace_call_get (const struct btrace_call_iterator *it)
1280 {
1281 return it->function;
1282 }
1283
1284 /* See btrace.h. */
1285
1286 unsigned int
1287 btrace_call_number (const struct btrace_call_iterator *it)
1288 {
1289 const struct btrace_thread_info *btinfo;
1290 const struct btrace_function *bfun;
1291 unsigned int insns;
1292
1293 btinfo = it->btinfo;
1294 bfun = it->function;
1295 if (bfun != NULL)
1296 return bfun->number;
1297
1298 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1299 number of the last function. */
1300 bfun = btinfo->end;
1301 insns = VEC_length (btrace_insn_s, bfun->insn);
1302
1303 /* If the function contains only a single instruction (i.e. the current
1304 instruction), it will be skipped and its number is already the number
1305 we seek. */
1306 if (insns == 1)
1307 return bfun->number;
1308
1309 /* Otherwise, return one more than the number of the last function. */
1310 return bfun->number + 1;
1311 }
1312
1313 /* See btrace.h. */
1314
1315 void
1316 btrace_call_begin (struct btrace_call_iterator *it,
1317 const struct btrace_thread_info *btinfo)
1318 {
1319 const struct btrace_function *bfun;
1320
1321 bfun = btinfo->begin;
1322 if (bfun == NULL)
1323 error (_("No trace."));
1324
1325 it->btinfo = btinfo;
1326 it->function = bfun;
1327 }
1328
1329 /* See btrace.h. */
1330
1331 void
1332 btrace_call_end (struct btrace_call_iterator *it,
1333 const struct btrace_thread_info *btinfo)
1334 {
1335 const struct btrace_function *bfun;
1336
1337 bfun = btinfo->end;
1338 if (bfun == NULL)
1339 error (_("No trace."));
1340
1341 it->btinfo = btinfo;
1342 it->function = NULL;
1343 }
1344
1345 /* See btrace.h. */
1346
1347 unsigned int
1348 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1349 {
1350 const struct btrace_function *bfun;
1351 unsigned int steps;
1352
1353 bfun = it->function;
1354 steps = 0;
1355 while (bfun != NULL)
1356 {
1357 const struct btrace_function *next;
1358 unsigned int insns;
1359
1360 next = bfun->flow.next;
1361 if (next == NULL)
1362 {
1363 /* Ignore the last function if it only contains a single
1364 (i.e. the current) instruction. */
1365 insns = VEC_length (btrace_insn_s, bfun->insn);
1366 if (insns == 1)
1367 steps -= 1;
1368 }
1369
1370 if (stride == steps)
1371 break;
1372
1373 bfun = next;
1374 steps += 1;
1375 }
1376
1377 it->function = bfun;
1378 return steps;
1379 }
1380
1381 /* See btrace.h. */
1382
1383 unsigned int
1384 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1385 {
1386 const struct btrace_thread_info *btinfo;
1387 const struct btrace_function *bfun;
1388 unsigned int steps;
1389
1390 bfun = it->function;
1391 steps = 0;
1392
1393 if (bfun == NULL)
1394 {
1395 unsigned int insns;
1396
1397 btinfo = it->btinfo;
1398 bfun = btinfo->end;
1399 if (bfun == NULL)
1400 return 0;
1401
1402 /* Ignore the last function if it only contains a single
1403 (i.e. the current) instruction. */
1404 insns = VEC_length (btrace_insn_s, bfun->insn);
1405 if (insns == 1)
1406 bfun = bfun->flow.prev;
1407
1408 if (bfun == NULL)
1409 return 0;
1410
1411 steps += 1;
1412 }
1413
1414 while (steps < stride)
1415 {
1416 const struct btrace_function *prev;
1417
1418 prev = bfun->flow.prev;
1419 if (prev == NULL)
1420 break;
1421
1422 bfun = prev;
1423 steps += 1;
1424 }
1425
1426 it->function = bfun;
1427 return steps;
1428 }
1429
1430 /* See btrace.h. */
1431
1432 int
1433 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1434 const struct btrace_call_iterator *rhs)
1435 {
1436 unsigned int lnum, rnum;
1437
1438 lnum = btrace_call_number (lhs);
1439 rnum = btrace_call_number (rhs);
1440
1441 return (int) (lnum - rnum);
1442 }
1443
1444 /* See btrace.h. */
1445
1446 int
1447 btrace_find_call_by_number (struct btrace_call_iterator *it,
1448 const struct btrace_thread_info *btinfo,
1449 unsigned int number)
1450 {
1451 const struct btrace_function *bfun;
1452
1453 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1454 {
1455 unsigned int bnum;
1456
1457 bnum = bfun->number;
1458 if (number == bnum)
1459 {
1460 it->btinfo = btinfo;
1461 it->function = bfun;
1462 return 1;
1463 }
1464
1465 /* Functions are ordered and numbered consecutively. We could bail out
1466 earlier. On the other hand, it is very unlikely that we search for
1467 a nonexistent function. */
1468 }
1469
1470 return 0;
1471 }
1472
1473 /* See btrace.h. */
1474
1475 void
1476 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1477 const struct btrace_insn_iterator *begin,
1478 const struct btrace_insn_iterator *end)
1479 {
1480 if (btinfo->insn_history == NULL)
1481 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1482
1483 btinfo->insn_history->begin = *begin;
1484 btinfo->insn_history->end = *end;
1485 }
1486
1487 /* See btrace.h. */
1488
1489 void
1490 btrace_set_call_history (struct btrace_thread_info *btinfo,
1491 const struct btrace_call_iterator *begin,
1492 const struct btrace_call_iterator *end)
1493 {
1494 gdb_assert (begin->btinfo == end->btinfo);
1495
1496 if (btinfo->call_history == NULL)
1497 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1498
1499 btinfo->call_history->begin = *begin;
1500 btinfo->call_history->end = *end;
1501 }
1502
1503 /* See btrace.h. */
1504
1505 int
1506 btrace_is_replaying (struct thread_info *tp)
1507 {
1508 return tp->btrace.replay != NULL;
1509 }
1510
1511 /* See btrace.h. */
1512
1513 int
1514 btrace_is_empty (struct thread_info *tp)
1515 {
1516 struct btrace_insn_iterator begin, end;
1517 struct btrace_thread_info *btinfo;
1518
1519 btinfo = &tp->btrace;
1520
1521 if (btinfo->begin == NULL)
1522 return 1;
1523
1524 btrace_insn_begin (&begin, btinfo);
1525 btrace_insn_end (&end, btinfo);
1526
1527 return btrace_insn_cmp (&begin, &end) == 0;
1528 }
This page took 0.061215 seconds and 5 git commands to generate.