btrace: change branch trace data structure
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "btrace.h"
23 #include "gdbthread.h"
24 #include "exceptions.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33
34 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
36
37 #define DEBUG(msg, args...) \
38 do \
39 { \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
43 } \
44 while (0)
45
46 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
47
48 /* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
50
51 static const char *
52 ftrace_print_function_name (const struct btrace_function *bfun)
53 {
54 struct minimal_symbol *msym;
55 struct symbol *sym;
56
57 msym = bfun->msym;
58 sym = bfun->sym;
59
60 if (sym != NULL)
61 return SYMBOL_PRINT_NAME (sym);
62
63 if (msym != NULL)
64 return SYMBOL_PRINT_NAME (msym);
65
66 return "<unknown>";
67 }
68
69 /* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
71
72 static const char *
73 ftrace_print_filename (const struct btrace_function *bfun)
74 {
75 struct symbol *sym;
76 const char *filename;
77
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 filename = symtab_to_filename_for_display (sym->symtab);
82 else
83 filename = "<unknown>";
84
85 return filename;
86 }
87
88 /* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
90
91 static const char *
92 ftrace_print_insn_addr (const struct btrace_insn *insn)
93 {
94 if (insn == NULL)
95 return "<nil>";
96
97 return core_addr_to_string_nz (insn->pc);
98 }
99
100 /* Print an ftrace debug status message. */
101
102 static void
103 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
104 {
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
108
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
111 level = bfun->level;
112
113 lbegin = bfun->lbegin;
114 lend = bfun->lend;
115
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
118
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
121 ibegin, iend);
122 }
123
124 /* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
126
127 static int
128 ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
131 {
132 struct minimal_symbol *msym;
133 struct symbol *sym;
134
135 msym = bfun->msym;
136 sym = bfun->sym;
137
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
141 return 1;
142
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
145 {
146 const char *bfname, *fname;
147
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
150 return 1;
151
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
156 return 1;
157 }
158
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
161 return 1;
162
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
165 return 1;
166
167 return 0;
168 }
169
170 /* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
174
175 static int
176 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
177 {
178 struct symbol *sym;
179 const char *bfile;
180
181 sym = bfun->sym;
182 if (sym == NULL)
183 return 1;
184
185 bfile = symtab_to_fullname (sym->symtab);
186
187 return (filename_cmp (bfile, fullname) != 0);
188 }
189
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
193
194 static struct btrace_function *
195 ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
197 struct symbol *fun)
198 {
199 struct btrace_function *bfun;
200
201 bfun = xzalloc (sizeof (*bfun));
202
203 bfun->msym = mfun;
204 bfun->sym = fun;
205 bfun->flow.prev = prev;
206
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
210
211 if (prev != NULL)
212 {
213 gdb_assert (prev->flow.next == NULL);
214 prev->flow.next = bfun;
215
216 bfun->number = prev->number + 1;
217 bfun->insn_offset = (prev->insn_offset
218 + VEC_length (btrace_insn_s, prev->insn));
219 }
220
221 return bfun;
222 }
223
224 /* Update the UP field of a function segment. */
225
226 static void
227 ftrace_update_caller (struct btrace_function *bfun,
228 struct btrace_function *caller,
229 enum btrace_function_flag flags)
230 {
231 if (bfun->up != NULL)
232 ftrace_debug (bfun, "updating caller");
233
234 bfun->up = caller;
235 bfun->flags = flags;
236
237 ftrace_debug (bfun, "set caller");
238 }
239
240 /* Fix up the caller for all segments of a function. */
241
242 static void
243 ftrace_fixup_caller (struct btrace_function *bfun,
244 struct btrace_function *caller,
245 enum btrace_function_flag flags)
246 {
247 struct btrace_function *prev, *next;
248
249 ftrace_update_caller (bfun, caller, flags);
250
251 /* Update all function segments belonging to the same function. */
252 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
253 ftrace_update_caller (prev, caller, flags);
254
255 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
256 ftrace_update_caller (next, caller, flags);
257 }
258
259 /* Add a new function segment for a call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
262
263 static struct btrace_function *
264 ftrace_new_call (struct btrace_function *caller,
265 struct minimal_symbol *mfun,
266 struct symbol *fun)
267 {
268 struct btrace_function *bfun;
269
270 bfun = ftrace_new_function (caller, mfun, fun);
271 bfun->up = caller;
272 bfun->level = caller->level + 1;
273
274 ftrace_debug (bfun, "new call");
275
276 return bfun;
277 }
278
279 /* Add a new function segment for a tail call.
280 CALLER is the chronologically preceding function segment.
281 MFUN and FUN are the symbol information we have for this function. */
282
283 static struct btrace_function *
284 ftrace_new_tailcall (struct btrace_function *caller,
285 struct minimal_symbol *mfun,
286 struct symbol *fun)
287 {
288 struct btrace_function *bfun;
289
290 bfun = ftrace_new_function (caller, mfun, fun);
291 bfun->up = caller;
292 bfun->level = caller->level + 1;
293 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
294
295 ftrace_debug (bfun, "new tail call");
296
297 return bfun;
298 }
299
300 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
301 symbol information. */
302
303 static struct btrace_function *
304 ftrace_find_caller (struct btrace_function *bfun,
305 struct minimal_symbol *mfun,
306 struct symbol *fun)
307 {
308 for (; bfun != NULL; bfun = bfun->up)
309 {
310 /* Skip functions with incompatible symbol information. */
311 if (ftrace_function_switched (bfun, mfun, fun))
312 continue;
313
314 /* This is the function segment we're looking for. */
315 break;
316 }
317
318 return bfun;
319 }
320
321 /* Find the innermost caller in the back trace of BFUN, skipping all
322 function segments that do not end with a call instruction (e.g.
323 tail calls ending with a jump). */
324
325 static struct btrace_function *
326 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
327 {
328 for (; bfun != NULL; bfun = bfun->up)
329 {
330 struct btrace_insn *last;
331 CORE_ADDR pc;
332
333 /* We do not allow empty function segments. */
334 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
335
336 last = VEC_last (btrace_insn_s, bfun->insn);
337 pc = last->pc;
338
339 if (gdbarch_insn_is_call (gdbarch, pc))
340 break;
341 }
342
343 return bfun;
344 }
345
346 /* Add a continuation segment for a function into which we return.
347 PREV is the chronologically preceding function segment.
348 MFUN and FUN are the symbol information we have for this function. */
349
350 static struct btrace_function *
351 ftrace_new_return (struct gdbarch *gdbarch,
352 struct btrace_function *prev,
353 struct minimal_symbol *mfun,
354 struct symbol *fun)
355 {
356 struct btrace_function *bfun, *caller;
357
358 bfun = ftrace_new_function (prev, mfun, fun);
359
360 /* It is important to start at PREV's caller. Otherwise, we might find
361 PREV itself, if PREV is a recursive function. */
362 caller = ftrace_find_caller (prev->up, mfun, fun);
363 if (caller != NULL)
364 {
365 /* The caller of PREV is the preceding btrace function segment in this
366 function instance. */
367 gdb_assert (caller->segment.next == NULL);
368
369 caller->segment.next = bfun;
370 bfun->segment.prev = caller;
371
372 /* Maintain the function level. */
373 bfun->level = caller->level;
374
375 /* Maintain the call stack. */
376 bfun->up = caller->up;
377 bfun->flags = caller->flags;
378
379 ftrace_debug (bfun, "new return");
380 }
381 else
382 {
383 /* We did not find a caller. This could mean that something went
384 wrong or that the call is simply not included in the trace. */
385
386 /* Let's search for some actual call. */
387 caller = ftrace_find_call (gdbarch, prev->up);
388 if (caller == NULL)
389 {
390 /* There is no call in PREV's back trace. We assume that the
391 branch trace did not include it. */
392
393 /* Let's find the topmost call function - this skips tail calls. */
394 while (prev->up != NULL)
395 prev = prev->up;
396
397 /* We maintain levels for a series of returns for which we have
398 not seen the calls.
399 We start at the preceding function's level in case this has
400 already been a return for which we have not seen the call.
401 We start at level 0 otherwise, to handle tail calls correctly. */
402 bfun->level = min (0, prev->level) - 1;
403
404 /* Fix up the call stack for PREV. */
405 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
406
407 ftrace_debug (bfun, "new return - no caller");
408 }
409 else
410 {
411 /* There is a call in PREV's back trace to which we should have
412 returned. Let's remain at this level. */
413 bfun->level = prev->level;
414
415 ftrace_debug (bfun, "new return - unknown caller");
416 }
417 }
418
419 return bfun;
420 }
421
422 /* Add a new function segment for a function switch.
423 PREV is the chronologically preceding function segment.
424 MFUN and FUN are the symbol information we have for this function. */
425
426 static struct btrace_function *
427 ftrace_new_switch (struct btrace_function *prev,
428 struct minimal_symbol *mfun,
429 struct symbol *fun)
430 {
431 struct btrace_function *bfun;
432
433 /* This is an unexplained function switch. The call stack will likely
434 be wrong at this point. */
435 bfun = ftrace_new_function (prev, mfun, fun);
436
437 /* We keep the function level. */
438 bfun->level = prev->level;
439
440 ftrace_debug (bfun, "new switch");
441
442 return bfun;
443 }
444
445 /* Update BFUN with respect to the instruction at PC. This may create new
446 function segments.
447 Return the chronologically latest function segment, never NULL. */
448
449 static struct btrace_function *
450 ftrace_update_function (struct gdbarch *gdbarch,
451 struct btrace_function *bfun, CORE_ADDR pc)
452 {
453 struct bound_minimal_symbol bmfun;
454 struct minimal_symbol *mfun;
455 struct symbol *fun;
456 struct btrace_insn *last;
457
458 /* Try to determine the function we're in. We use both types of symbols
459 to avoid surprises when we sometimes get a full symbol and sometimes
460 only a minimal symbol. */
461 fun = find_pc_function (pc);
462 bmfun = lookup_minimal_symbol_by_pc (pc);
463 mfun = bmfun.minsym;
464
465 if (fun == NULL && mfun == NULL)
466 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
467
468 /* If we didn't have a function before, we create one. */
469 if (bfun == NULL)
470 return ftrace_new_function (bfun, mfun, fun);
471
472 /* Check the last instruction, if we have one.
473 We do this check first, since it allows us to fill in the call stack
474 links in addition to the normal flow links. */
475 last = NULL;
476 if (!VEC_empty (btrace_insn_s, bfun->insn))
477 last = VEC_last (btrace_insn_s, bfun->insn);
478
479 if (last != NULL)
480 {
481 CORE_ADDR lpc;
482
483 lpc = last->pc;
484
485 /* Check for returns. */
486 if (gdbarch_insn_is_ret (gdbarch, lpc))
487 return ftrace_new_return (gdbarch, bfun, mfun, fun);
488
489 /* Check for calls. */
490 if (gdbarch_insn_is_call (gdbarch, lpc))
491 {
492 int size;
493
494 size = gdb_insn_length (gdbarch, lpc);
495
496 /* Ignore calls to the next instruction. They are used for PIC. */
497 if (lpc + size != pc)
498 return ftrace_new_call (bfun, mfun, fun);
499 }
500 }
501
502 /* Check if we're switching functions for some other reason. */
503 if (ftrace_function_switched (bfun, mfun, fun))
504 {
505 DEBUG_FTRACE ("switching from %s in %s at %s",
506 ftrace_print_insn_addr (last),
507 ftrace_print_function_name (bfun),
508 ftrace_print_filename (bfun));
509
510 if (last != NULL)
511 {
512 CORE_ADDR start, lpc;
513
514 start = get_pc_function_start (pc);
515
516 /* If we can't determine the function for PC, we treat a jump at
517 the end of the block as tail call. */
518 if (start == 0)
519 start = pc;
520
521 lpc = last->pc;
522
523 /* Jumps indicate optimized tail calls. */
524 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
525 return ftrace_new_tailcall (bfun, mfun, fun);
526 }
527
528 return ftrace_new_switch (bfun, mfun, fun);
529 }
530
531 return bfun;
532 }
533
534 /* Update BFUN's source range with respect to the instruction at PC. */
535
536 static void
537 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
538 {
539 struct symtab_and_line sal;
540 const char *fullname;
541
542 sal = find_pc_line (pc, 0);
543 if (sal.symtab == NULL || sal.line == 0)
544 {
545 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
546 return;
547 }
548
549 /* Check if we switched files. This could happen if, say, a macro that
550 is defined in another file is expanded here. */
551 fullname = symtab_to_fullname (sal.symtab);
552 if (ftrace_skip_file (bfun, fullname))
553 {
554 DEBUG_FTRACE ("ignoring file at %s, file=%s",
555 core_addr_to_string_nz (pc), fullname);
556 return;
557 }
558
559 /* Update the line range. */
560 bfun->lbegin = min (bfun->lbegin, sal.line);
561 bfun->lend = max (bfun->lend, sal.line);
562
563 if (record_debug > 1)
564 ftrace_debug (bfun, "update lines");
565 }
566
567 /* Add the instruction at PC to BFUN's instructions. */
568
569 static void
570 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
571 {
572 struct btrace_insn *insn;
573
574 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
575 insn->pc = pc;
576
577 if (record_debug > 1)
578 ftrace_debug (bfun, "update insn");
579 }
580
581 /* Compute the function branch trace from a block branch trace BTRACE for
582 a thread given by BTINFO. */
583
584 static void
585 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
586 VEC (btrace_block_s) *btrace)
587 {
588 struct btrace_function *begin, *end;
589 struct gdbarch *gdbarch;
590 unsigned int blk;
591 int level;
592
593 DEBUG ("compute ftrace");
594
595 gdbarch = target_gdbarch ();
596 begin = NULL;
597 end = NULL;
598 level = INT_MAX;
599 blk = VEC_length (btrace_block_s, btrace);
600
601 while (blk != 0)
602 {
603 btrace_block_s *block;
604 CORE_ADDR pc;
605
606 blk -= 1;
607
608 block = VEC_index (btrace_block_s, btrace, blk);
609 pc = block->begin;
610
611 for (;;)
612 {
613 int size;
614
615 /* We should hit the end of the block. Warn if we went too far. */
616 if (block->end < pc)
617 {
618 warning (_("Recorded trace may be corrupted around %s."),
619 core_addr_to_string_nz (pc));
620 break;
621 }
622
623 end = ftrace_update_function (gdbarch, end, pc);
624 if (begin == NULL)
625 begin = end;
626
627 /* Maintain the function level offset. */
628 level = min (level, end->level);
629
630 ftrace_update_insns (end, pc);
631 ftrace_update_lines (end, pc);
632
633 /* We're done once we pushed the instruction at the end. */
634 if (block->end == pc)
635 break;
636
637 size = gdb_insn_length (gdbarch, pc);
638
639 /* Make sure we terminate if we fail to compute the size. */
640 if (size <= 0)
641 {
642 warning (_("Recorded trace may be incomplete around %s."),
643 core_addr_to_string_nz (pc));
644 break;
645 }
646
647 pc += size;
648 }
649 }
650
651 btinfo->begin = begin;
652 btinfo->end = end;
653
654 /* LEVEL is the minimal function level of all btrace function segments.
655 Define the global level offset to -LEVEL so all function levels are
656 normalized to start at zero. */
657 btinfo->level = -level;
658 }
659
660 /* See btrace.h. */
661
662 void
663 btrace_enable (struct thread_info *tp)
664 {
665 if (tp->btrace.target != NULL)
666 return;
667
668 if (!target_supports_btrace ())
669 error (_("Target does not support branch tracing."));
670
671 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
672
673 tp->btrace.target = target_enable_btrace (tp->ptid);
674 }
675
676 /* See btrace.h. */
677
678 void
679 btrace_disable (struct thread_info *tp)
680 {
681 struct btrace_thread_info *btp = &tp->btrace;
682 int errcode = 0;
683
684 if (btp->target == NULL)
685 return;
686
687 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
688
689 target_disable_btrace (btp->target);
690 btp->target = NULL;
691
692 btrace_clear (tp);
693 }
694
695 /* See btrace.h. */
696
697 void
698 btrace_teardown (struct thread_info *tp)
699 {
700 struct btrace_thread_info *btp = &tp->btrace;
701 int errcode = 0;
702
703 if (btp->target == NULL)
704 return;
705
706 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
707
708 target_teardown_btrace (btp->target);
709 btp->target = NULL;
710
711 btrace_clear (tp);
712 }
713
714 /* See btrace.h. */
715
716 void
717 btrace_fetch (struct thread_info *tp)
718 {
719 struct btrace_thread_info *btinfo;
720 VEC (btrace_block_s) *btrace;
721 struct cleanup *cleanup;
722
723 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
724
725 btinfo = &tp->btrace;
726 if (btinfo->target == NULL)
727 return;
728
729 btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
730 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
731
732 if (!VEC_empty (btrace_block_s, btrace))
733 {
734 btrace_clear (tp);
735 btrace_compute_ftrace (btinfo, btrace);
736 }
737
738 do_cleanups (cleanup);
739 }
740
741 /* See btrace.h. */
742
743 void
744 btrace_clear (struct thread_info *tp)
745 {
746 struct btrace_thread_info *btinfo;
747 struct btrace_function *it, *trash;
748
749 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
750
751 btinfo = &tp->btrace;
752
753 it = btinfo->begin;
754 while (it != NULL)
755 {
756 trash = it;
757 it = it->flow.next;
758
759 xfree (trash);
760 }
761
762 btinfo->begin = NULL;
763 btinfo->end = NULL;
764
765 xfree (btinfo->insn_history);
766 xfree (btinfo->call_history);
767
768 btinfo->insn_history = NULL;
769 btinfo->call_history = NULL;
770 }
771
772 /* See btrace.h. */
773
774 void
775 btrace_free_objfile (struct objfile *objfile)
776 {
777 struct thread_info *tp;
778
779 DEBUG ("free objfile");
780
781 ALL_THREADS (tp)
782 btrace_clear (tp);
783 }
784
785 #if defined (HAVE_LIBEXPAT)
786
787 /* Check the btrace document version. */
788
789 static void
790 check_xml_btrace_version (struct gdb_xml_parser *parser,
791 const struct gdb_xml_element *element,
792 void *user_data, VEC (gdb_xml_value_s) *attributes)
793 {
794 const char *version = xml_find_attribute (attributes, "version")->value;
795
796 if (strcmp (version, "1.0") != 0)
797 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
798 }
799
800 /* Parse a btrace "block" xml record. */
801
802 static void
803 parse_xml_btrace_block (struct gdb_xml_parser *parser,
804 const struct gdb_xml_element *element,
805 void *user_data, VEC (gdb_xml_value_s) *attributes)
806 {
807 VEC (btrace_block_s) **btrace;
808 struct btrace_block *block;
809 ULONGEST *begin, *end;
810
811 btrace = user_data;
812 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
813
814 begin = xml_find_attribute (attributes, "begin")->value;
815 end = xml_find_attribute (attributes, "end")->value;
816
817 block->begin = *begin;
818 block->end = *end;
819 }
820
821 static const struct gdb_xml_attribute block_attributes[] = {
822 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
823 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
824 { NULL, GDB_XML_AF_NONE, NULL, NULL }
825 };
826
827 static const struct gdb_xml_attribute btrace_attributes[] = {
828 { "version", GDB_XML_AF_NONE, NULL, NULL },
829 { NULL, GDB_XML_AF_NONE, NULL, NULL }
830 };
831
832 static const struct gdb_xml_element btrace_children[] = {
833 { "block", block_attributes, NULL,
834 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
835 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
836 };
837
838 static const struct gdb_xml_element btrace_elements[] = {
839 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
840 check_xml_btrace_version, NULL },
841 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
842 };
843
844 #endif /* defined (HAVE_LIBEXPAT) */
845
846 /* See btrace.h. */
847
848 VEC (btrace_block_s) *
849 parse_xml_btrace (const char *buffer)
850 {
851 VEC (btrace_block_s) *btrace = NULL;
852 struct cleanup *cleanup;
853 int errcode;
854
855 #if defined (HAVE_LIBEXPAT)
856
857 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
858 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
859 buffer, &btrace);
860 if (errcode != 0)
861 {
862 do_cleanups (cleanup);
863 return NULL;
864 }
865
866 /* Keep parse results. */
867 discard_cleanups (cleanup);
868
869 #else /* !defined (HAVE_LIBEXPAT) */
870
871 error (_("Cannot process branch trace. XML parsing is not supported."));
872
873 #endif /* !defined (HAVE_LIBEXPAT) */
874
875 return btrace;
876 }
877
878 /* See btrace.h. */
879
880 const struct btrace_insn *
881 btrace_insn_get (const struct btrace_insn_iterator *it)
882 {
883 const struct btrace_function *bfun;
884 unsigned int index, end;
885
886 index = it->index;
887 bfun = it->function;
888
889 /* The index is within the bounds of this function's instruction vector. */
890 end = VEC_length (btrace_insn_s, bfun->insn);
891 gdb_assert (0 < end);
892 gdb_assert (index < end);
893
894 return VEC_index (btrace_insn_s, bfun->insn, index);
895 }
896
897 /* See btrace.h. */
898
899 unsigned int
900 btrace_insn_number (const struct btrace_insn_iterator *it)
901 {
902 const struct btrace_function *bfun;
903
904 bfun = it->function;
905 return bfun->insn_offset + it->index;
906 }
907
908 /* See btrace.h. */
909
910 void
911 btrace_insn_begin (struct btrace_insn_iterator *it,
912 const struct btrace_thread_info *btinfo)
913 {
914 const struct btrace_function *bfun;
915
916 bfun = btinfo->begin;
917 if (bfun == NULL)
918 error (_("No trace."));
919
920 it->function = bfun;
921 it->index = 0;
922 }
923
924 /* See btrace.h. */
925
926 void
927 btrace_insn_end (struct btrace_insn_iterator *it,
928 const struct btrace_thread_info *btinfo)
929 {
930 const struct btrace_function *bfun;
931 unsigned int length;
932
933 bfun = btinfo->end;
934 if (bfun == NULL)
935 error (_("No trace."));
936
937 /* The last instruction in the last function is the current instruction.
938 We point to it - it is one past the end of the execution trace. */
939 length = VEC_length (btrace_insn_s, bfun->insn);
940
941 it->function = bfun;
942 it->index = length - 1;
943 }
944
945 /* See btrace.h. */
946
947 unsigned int
948 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
949 {
950 const struct btrace_function *bfun;
951 unsigned int index, steps;
952
953 bfun = it->function;
954 steps = 0;
955 index = it->index;
956
957 while (stride != 0)
958 {
959 unsigned int end, space, adv;
960
961 end = VEC_length (btrace_insn_s, bfun->insn);
962
963 gdb_assert (0 < end);
964 gdb_assert (index < end);
965
966 /* Compute the number of instructions remaining in this segment. */
967 space = end - index;
968
969 /* Advance the iterator as far as possible within this segment. */
970 adv = min (space, stride);
971 stride -= adv;
972 index += adv;
973 steps += adv;
974
975 /* Move to the next function if we're at the end of this one. */
976 if (index == end)
977 {
978 const struct btrace_function *next;
979
980 next = bfun->flow.next;
981 if (next == NULL)
982 {
983 /* We stepped past the last function.
984
985 Let's adjust the index to point to the last instruction in
986 the previous function. */
987 index -= 1;
988 steps -= 1;
989 break;
990 }
991
992 /* We now point to the first instruction in the new function. */
993 bfun = next;
994 index = 0;
995 }
996
997 /* We did make progress. */
998 gdb_assert (adv > 0);
999 }
1000
1001 /* Update the iterator. */
1002 it->function = bfun;
1003 it->index = index;
1004
1005 return steps;
1006 }
1007
1008 /* See btrace.h. */
1009
1010 unsigned int
1011 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1012 {
1013 const struct btrace_function *bfun;
1014 unsigned int index, steps;
1015
1016 bfun = it->function;
1017 steps = 0;
1018 index = it->index;
1019
1020 while (stride != 0)
1021 {
1022 unsigned int adv;
1023
1024 /* Move to the previous function if we're at the start of this one. */
1025 if (index == 0)
1026 {
1027 const struct btrace_function *prev;
1028
1029 prev = bfun->flow.prev;
1030 if (prev == NULL)
1031 break;
1032
1033 /* We point to one after the last instruction in the new function. */
1034 bfun = prev;
1035 index = VEC_length (btrace_insn_s, bfun->insn);
1036
1037 /* There is at least one instruction in this function segment. */
1038 gdb_assert (index > 0);
1039 }
1040
1041 /* Advance the iterator as far as possible within this segment. */
1042 adv = min (index, stride);
1043 stride -= adv;
1044 index -= adv;
1045 steps += adv;
1046
1047 /* We did make progress. */
1048 gdb_assert (adv > 0);
1049 }
1050
1051 /* Update the iterator. */
1052 it->function = bfun;
1053 it->index = index;
1054
1055 return steps;
1056 }
1057
1058 /* See btrace.h. */
1059
1060 int
1061 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1062 const struct btrace_insn_iterator *rhs)
1063 {
1064 unsigned int lnum, rnum;
1065
1066 lnum = btrace_insn_number (lhs);
1067 rnum = btrace_insn_number (rhs);
1068
1069 return (int) (lnum - rnum);
1070 }
1071
1072 /* See btrace.h. */
1073
1074 int
1075 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1076 const struct btrace_thread_info *btinfo,
1077 unsigned int number)
1078 {
1079 const struct btrace_function *bfun;
1080 unsigned int end;
1081
1082 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1083 if (bfun->insn_offset <= number)
1084 break;
1085
1086 if (bfun == NULL)
1087 return 0;
1088
1089 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1090 if (end <= number)
1091 return 0;
1092
1093 it->function = bfun;
1094 it->index = number - bfun->insn_offset;
1095
1096 return 1;
1097 }
1098
1099 /* See btrace.h. */
1100
1101 const struct btrace_function *
1102 btrace_call_get (const struct btrace_call_iterator *it)
1103 {
1104 return it->function;
1105 }
1106
1107 /* See btrace.h. */
1108
1109 unsigned int
1110 btrace_call_number (const struct btrace_call_iterator *it)
1111 {
1112 const struct btrace_thread_info *btinfo;
1113 const struct btrace_function *bfun;
1114 unsigned int insns;
1115
1116 btinfo = it->btinfo;
1117 bfun = it->function;
1118 if (bfun != NULL)
1119 return bfun->number;
1120
1121 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1122 number of the last function. */
1123 bfun = btinfo->end;
1124 insns = VEC_length (btrace_insn_s, bfun->insn);
1125
1126 /* If the function contains only a single instruction (i.e. the current
1127 instruction), it will be skipped and its number is already the number
1128 we seek. */
1129 if (insns == 1)
1130 return bfun->number;
1131
1132 /* Otherwise, return one more than the number of the last function. */
1133 return bfun->number + 1;
1134 }
1135
1136 /* See btrace.h. */
1137
1138 void
1139 btrace_call_begin (struct btrace_call_iterator *it,
1140 const struct btrace_thread_info *btinfo)
1141 {
1142 const struct btrace_function *bfun;
1143
1144 bfun = btinfo->begin;
1145 if (bfun == NULL)
1146 error (_("No trace."));
1147
1148 it->btinfo = btinfo;
1149 it->function = bfun;
1150 }
1151
1152 /* See btrace.h. */
1153
1154 void
1155 btrace_call_end (struct btrace_call_iterator *it,
1156 const struct btrace_thread_info *btinfo)
1157 {
1158 const struct btrace_function *bfun;
1159
1160 bfun = btinfo->end;
1161 if (bfun == NULL)
1162 error (_("No trace."));
1163
1164 it->btinfo = btinfo;
1165 it->function = NULL;
1166 }
1167
1168 /* See btrace.h. */
1169
1170 unsigned int
1171 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1172 {
1173 const struct btrace_function *bfun;
1174 unsigned int steps;
1175
1176 bfun = it->function;
1177 steps = 0;
1178 while (bfun != NULL)
1179 {
1180 const struct btrace_function *next;
1181 unsigned int insns;
1182
1183 next = bfun->flow.next;
1184 if (next == NULL)
1185 {
1186 /* Ignore the last function if it only contains a single
1187 (i.e. the current) instruction. */
1188 insns = VEC_length (btrace_insn_s, bfun->insn);
1189 if (insns == 1)
1190 steps -= 1;
1191 }
1192
1193 if (stride == steps)
1194 break;
1195
1196 bfun = next;
1197 steps += 1;
1198 }
1199
1200 it->function = bfun;
1201 return steps;
1202 }
1203
1204 /* See btrace.h. */
1205
1206 unsigned int
1207 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1208 {
1209 const struct btrace_thread_info *btinfo;
1210 const struct btrace_function *bfun;
1211 unsigned int steps;
1212
1213 bfun = it->function;
1214 steps = 0;
1215
1216 if (bfun == NULL)
1217 {
1218 unsigned int insns;
1219
1220 btinfo = it->btinfo;
1221 bfun = btinfo->end;
1222 if (bfun == NULL)
1223 return 0;
1224
1225 /* Ignore the last function if it only contains a single
1226 (i.e. the current) instruction. */
1227 insns = VEC_length (btrace_insn_s, bfun->insn);
1228 if (insns == 1)
1229 bfun = bfun->flow.prev;
1230
1231 if (bfun == NULL)
1232 return 0;
1233
1234 steps += 1;
1235 }
1236
1237 while (steps < stride)
1238 {
1239 const struct btrace_function *prev;
1240
1241 prev = bfun->flow.prev;
1242 if (prev == NULL)
1243 break;
1244
1245 bfun = prev;
1246 steps += 1;
1247 }
1248
1249 it->function = bfun;
1250 return steps;
1251 }
1252
1253 /* See btrace.h. */
1254
1255 int
1256 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1257 const struct btrace_call_iterator *rhs)
1258 {
1259 unsigned int lnum, rnum;
1260
1261 lnum = btrace_call_number (lhs);
1262 rnum = btrace_call_number (rhs);
1263
1264 return (int) (lnum - rnum);
1265 }
1266
1267 /* See btrace.h. */
1268
1269 int
1270 btrace_find_call_by_number (struct btrace_call_iterator *it,
1271 const struct btrace_thread_info *btinfo,
1272 unsigned int number)
1273 {
1274 const struct btrace_function *bfun;
1275
1276 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1277 {
1278 unsigned int bnum;
1279
1280 bnum = bfun->number;
1281 if (number == bnum)
1282 {
1283 it->btinfo = btinfo;
1284 it->function = bfun;
1285 return 1;
1286 }
1287
1288 /* Functions are ordered and numbered consecutively. We could bail out
1289 earlier. On the other hand, it is very unlikely that we search for
1290 a nonexistent function. */
1291 }
1292
1293 return 0;
1294 }
1295
1296 /* See btrace.h. */
1297
1298 void
1299 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1300 const struct btrace_insn_iterator *begin,
1301 const struct btrace_insn_iterator *end)
1302 {
1303 if (btinfo->insn_history == NULL)
1304 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1305
1306 btinfo->insn_history->begin = *begin;
1307 btinfo->insn_history->end = *end;
1308 }
1309
1310 /* See btrace.h. */
1311
1312 void
1313 btrace_set_call_history (struct btrace_thread_info *btinfo,
1314 const struct btrace_call_iterator *begin,
1315 const struct btrace_call_iterator *end)
1316 {
1317 gdb_assert (begin->btinfo == end->btinfo);
1318
1319 if (btinfo->call_history == NULL)
1320 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1321
1322 btinfo->call_history->begin = *begin;
1323 btinfo->call_history->end = *end;
1324 }
This page took 0.056416 seconds and 5 git commands to generate.