87b260109a1ef9542e82f29fca966411e585615b
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35
36 #include <inttypes.h>
37
38 static void btrace_add_pc (struct thread_info *tp);
39
40 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
41 when used in if statements. */
42
43 #define DEBUG(msg, args...) \
44 do \
45 { \
46 if (record_debug != 0) \
47 fprintf_unfiltered (gdb_stdlog, \
48 "[btrace] " msg "\n", ##args); \
49 } \
50 while (0)
51
52 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
53
54 /* Return the function name of a recorded function segment for printing.
55 This function never returns NULL. */
56
57 static const char *
58 ftrace_print_function_name (const struct btrace_function *bfun)
59 {
60 struct minimal_symbol *msym;
61 struct symbol *sym;
62
63 msym = bfun->msym;
64 sym = bfun->sym;
65
66 if (sym != NULL)
67 return SYMBOL_PRINT_NAME (sym);
68
69 if (msym != NULL)
70 return MSYMBOL_PRINT_NAME (msym);
71
72 return "<unknown>";
73 }
74
75 /* Return the file name of a recorded function segment for printing.
76 This function never returns NULL. */
77
78 static const char *
79 ftrace_print_filename (const struct btrace_function *bfun)
80 {
81 struct symbol *sym;
82 const char *filename;
83
84 sym = bfun->sym;
85
86 if (sym != NULL)
87 filename = symtab_to_filename_for_display (symbol_symtab (sym));
88 else
89 filename = "<unknown>";
90
91 return filename;
92 }
93
94 /* Return a string representation of the address of an instruction.
95 This function never returns NULL. */
96
97 static const char *
98 ftrace_print_insn_addr (const struct btrace_insn *insn)
99 {
100 if (insn == NULL)
101 return "<nil>";
102
103 return core_addr_to_string_nz (insn->pc);
104 }
105
106 /* Print an ftrace debug status message. */
107
108 static void
109 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
110 {
111 const char *fun, *file;
112 unsigned int ibegin, iend;
113 int level;
114
115 fun = ftrace_print_function_name (bfun);
116 file = ftrace_print_filename (bfun);
117 level = bfun->level;
118
119 ibegin = bfun->insn_offset;
120 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
121
122 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
123 prefix, fun, file, level, ibegin, iend);
124 }
125
126 /* Return non-zero if BFUN does not match MFUN and FUN,
127 return zero otherwise. */
128
129 static int
130 ftrace_function_switched (const struct btrace_function *bfun,
131 const struct minimal_symbol *mfun,
132 const struct symbol *fun)
133 {
134 struct minimal_symbol *msym;
135 struct symbol *sym;
136
137 msym = bfun->msym;
138 sym = bfun->sym;
139
140 /* If the minimal symbol changed, we certainly switched functions. */
141 if (mfun != NULL && msym != NULL
142 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
143 return 1;
144
145 /* If the symbol changed, we certainly switched functions. */
146 if (fun != NULL && sym != NULL)
147 {
148 const char *bfname, *fname;
149
150 /* Check the function name. */
151 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
152 return 1;
153
154 /* Check the location of those functions, as well. */
155 bfname = symtab_to_fullname (symbol_symtab (sym));
156 fname = symtab_to_fullname (symbol_symtab (fun));
157 if (filename_cmp (fname, bfname) != 0)
158 return 1;
159 }
160
161 /* If we lost symbol information, we switched functions. */
162 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
163 return 1;
164
165 /* If we gained symbol information, we switched functions. */
166 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
167 return 1;
168
169 return 0;
170 }
171
172 /* Allocate and initialize a new branch trace function segment.
173 PREV is the chronologically preceding function segment.
174 MFUN and FUN are the symbol information we have for this function. */
175
176 static struct btrace_function *
177 ftrace_new_function (struct btrace_function *prev,
178 struct minimal_symbol *mfun,
179 struct symbol *fun)
180 {
181 struct btrace_function *bfun;
182
183 bfun = xzalloc (sizeof (*bfun));
184
185 bfun->msym = mfun;
186 bfun->sym = fun;
187 bfun->flow.prev = prev;
188
189 if (prev == NULL)
190 {
191 /* Start counting at one. */
192 bfun->number = 1;
193 bfun->insn_offset = 1;
194 }
195 else
196 {
197 gdb_assert (prev->flow.next == NULL);
198 prev->flow.next = bfun;
199
200 bfun->number = prev->number + 1;
201 bfun->insn_offset = (prev->insn_offset
202 + VEC_length (btrace_insn_s, prev->insn));
203 bfun->level = prev->level;
204 }
205
206 return bfun;
207 }
208
209 /* Update the UP field of a function segment. */
210
211 static void
212 ftrace_update_caller (struct btrace_function *bfun,
213 struct btrace_function *caller,
214 enum btrace_function_flag flags)
215 {
216 if (bfun->up != NULL)
217 ftrace_debug (bfun, "updating caller");
218
219 bfun->up = caller;
220 bfun->flags = flags;
221
222 ftrace_debug (bfun, "set caller");
223 }
224
225 /* Fix up the caller for all segments of a function. */
226
227 static void
228 ftrace_fixup_caller (struct btrace_function *bfun,
229 struct btrace_function *caller,
230 enum btrace_function_flag flags)
231 {
232 struct btrace_function *prev, *next;
233
234 ftrace_update_caller (bfun, caller, flags);
235
236 /* Update all function segments belonging to the same function. */
237 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
238 ftrace_update_caller (prev, caller, flags);
239
240 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
241 ftrace_update_caller (next, caller, flags);
242 }
243
244 /* Add a new function segment for a call.
245 CALLER is the chronologically preceding function segment.
246 MFUN and FUN are the symbol information we have for this function. */
247
248 static struct btrace_function *
249 ftrace_new_call (struct btrace_function *caller,
250 struct minimal_symbol *mfun,
251 struct symbol *fun)
252 {
253 struct btrace_function *bfun;
254
255 bfun = ftrace_new_function (caller, mfun, fun);
256 bfun->up = caller;
257 bfun->level += 1;
258
259 ftrace_debug (bfun, "new call");
260
261 return bfun;
262 }
263
264 /* Add a new function segment for a tail call.
265 CALLER is the chronologically preceding function segment.
266 MFUN and FUN are the symbol information we have for this function. */
267
268 static struct btrace_function *
269 ftrace_new_tailcall (struct btrace_function *caller,
270 struct minimal_symbol *mfun,
271 struct symbol *fun)
272 {
273 struct btrace_function *bfun;
274
275 bfun = ftrace_new_function (caller, mfun, fun);
276 bfun->up = caller;
277 bfun->level += 1;
278 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
279
280 ftrace_debug (bfun, "new tail call");
281
282 return bfun;
283 }
284
285 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
286 symbol information. */
287
288 static struct btrace_function *
289 ftrace_find_caller (struct btrace_function *bfun,
290 struct minimal_symbol *mfun,
291 struct symbol *fun)
292 {
293 for (; bfun != NULL; bfun = bfun->up)
294 {
295 /* Skip functions with incompatible symbol information. */
296 if (ftrace_function_switched (bfun, mfun, fun))
297 continue;
298
299 /* This is the function segment we're looking for. */
300 break;
301 }
302
303 return bfun;
304 }
305
306 /* Find the innermost caller in the back trace of BFUN, skipping all
307 function segments that do not end with a call instruction (e.g.
308 tail calls ending with a jump). */
309
310 static struct btrace_function *
311 ftrace_find_call (struct btrace_function *bfun)
312 {
313 for (; bfun != NULL; bfun = bfun->up)
314 {
315 struct btrace_insn *last;
316
317 /* Skip gaps. */
318 if (bfun->errcode != 0)
319 continue;
320
321 last = VEC_last (btrace_insn_s, bfun->insn);
322
323 if (last->iclass == BTRACE_INSN_CALL)
324 break;
325 }
326
327 return bfun;
328 }
329
330 /* Add a continuation segment for a function into which we return.
331 PREV is the chronologically preceding function segment.
332 MFUN and FUN are the symbol information we have for this function. */
333
334 static struct btrace_function *
335 ftrace_new_return (struct btrace_function *prev,
336 struct minimal_symbol *mfun,
337 struct symbol *fun)
338 {
339 struct btrace_function *bfun, *caller;
340
341 bfun = ftrace_new_function (prev, mfun, fun);
342
343 /* It is important to start at PREV's caller. Otherwise, we might find
344 PREV itself, if PREV is a recursive function. */
345 caller = ftrace_find_caller (prev->up, mfun, fun);
346 if (caller != NULL)
347 {
348 /* The caller of PREV is the preceding btrace function segment in this
349 function instance. */
350 gdb_assert (caller->segment.next == NULL);
351
352 caller->segment.next = bfun;
353 bfun->segment.prev = caller;
354
355 /* Maintain the function level. */
356 bfun->level = caller->level;
357
358 /* Maintain the call stack. */
359 bfun->up = caller->up;
360 bfun->flags = caller->flags;
361
362 ftrace_debug (bfun, "new return");
363 }
364 else
365 {
366 /* We did not find a caller. This could mean that something went
367 wrong or that the call is simply not included in the trace. */
368
369 /* Let's search for some actual call. */
370 caller = ftrace_find_call (prev->up);
371 if (caller == NULL)
372 {
373 /* There is no call in PREV's back trace. We assume that the
374 branch trace did not include it. */
375
376 /* Let's find the topmost call function - this skips tail calls. */
377 while (prev->up != NULL)
378 prev = prev->up;
379
380 /* We maintain levels for a series of returns for which we have
381 not seen the calls.
382 We start at the preceding function's level in case this has
383 already been a return for which we have not seen the call.
384 We start at level 0 otherwise, to handle tail calls correctly. */
385 bfun->level = min (0, prev->level) - 1;
386
387 /* Fix up the call stack for PREV. */
388 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
389
390 ftrace_debug (bfun, "new return - no caller");
391 }
392 else
393 {
394 /* There is a call in PREV's back trace to which we should have
395 returned. Let's remain at this level. */
396 bfun->level = prev->level;
397
398 ftrace_debug (bfun, "new return - unknown caller");
399 }
400 }
401
402 return bfun;
403 }
404
405 /* Add a new function segment for a function switch.
406 PREV is the chronologically preceding function segment.
407 MFUN and FUN are the symbol information we have for this function. */
408
409 static struct btrace_function *
410 ftrace_new_switch (struct btrace_function *prev,
411 struct minimal_symbol *mfun,
412 struct symbol *fun)
413 {
414 struct btrace_function *bfun;
415
416 /* This is an unexplained function switch. The call stack will likely
417 be wrong at this point. */
418 bfun = ftrace_new_function (prev, mfun, fun);
419
420 ftrace_debug (bfun, "new switch");
421
422 return bfun;
423 }
424
425 /* Add a new function segment for a gap in the trace due to a decode error.
426 PREV is the chronologically preceding function segment.
427 ERRCODE is the format-specific error code. */
428
429 static struct btrace_function *
430 ftrace_new_gap (struct btrace_function *prev, int errcode)
431 {
432 struct btrace_function *bfun;
433
434 /* We hijack prev if it was empty. */
435 if (prev != NULL && prev->errcode == 0
436 && VEC_empty (btrace_insn_s, prev->insn))
437 bfun = prev;
438 else
439 bfun = ftrace_new_function (prev, NULL, NULL);
440
441 bfun->errcode = errcode;
442
443 ftrace_debug (bfun, "new gap");
444
445 return bfun;
446 }
447
448 /* Update BFUN with respect to the instruction at PC. This may create new
449 function segments.
450 Return the chronologically latest function segment, never NULL. */
451
452 static struct btrace_function *
453 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
454 {
455 struct bound_minimal_symbol bmfun;
456 struct minimal_symbol *mfun;
457 struct symbol *fun;
458 struct btrace_insn *last;
459
460 /* Try to determine the function we're in. We use both types of symbols
461 to avoid surprises when we sometimes get a full symbol and sometimes
462 only a minimal symbol. */
463 fun = find_pc_function (pc);
464 bmfun = lookup_minimal_symbol_by_pc (pc);
465 mfun = bmfun.minsym;
466
467 if (fun == NULL && mfun == NULL)
468 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
469
470 /* If we didn't have a function or if we had a gap before, we create one. */
471 if (bfun == NULL || bfun->errcode != 0)
472 return ftrace_new_function (bfun, mfun, fun);
473
474 /* Check the last instruction, if we have one.
475 We do this check first, since it allows us to fill in the call stack
476 links in addition to the normal flow links. */
477 last = NULL;
478 if (!VEC_empty (btrace_insn_s, bfun->insn))
479 last = VEC_last (btrace_insn_s, bfun->insn);
480
481 if (last != NULL)
482 {
483 switch (last->iclass)
484 {
485 case BTRACE_INSN_RETURN:
486 {
487 const char *fname;
488
489 /* On some systems, _dl_runtime_resolve returns to the resolved
490 function instead of jumping to it. From our perspective,
491 however, this is a tailcall.
492 If we treated it as return, we wouldn't be able to find the
493 resolved function in our stack back trace. Hence, we would
494 lose the current stack back trace and start anew with an empty
495 back trace. When the resolved function returns, we would then
496 create a stack back trace with the same function names but
497 different frame id's. This will confuse stepping. */
498 fname = ftrace_print_function_name (bfun);
499 if (strcmp (fname, "_dl_runtime_resolve") == 0)
500 return ftrace_new_tailcall (bfun, mfun, fun);
501
502 return ftrace_new_return (bfun, mfun, fun);
503 }
504
505 case BTRACE_INSN_CALL:
506 /* Ignore calls to the next instruction. They are used for PIC. */
507 if (last->pc + last->size == pc)
508 break;
509
510 return ftrace_new_call (bfun, mfun, fun);
511
512 case BTRACE_INSN_JUMP:
513 {
514 CORE_ADDR start;
515
516 start = get_pc_function_start (pc);
517
518 /* If we can't determine the function for PC, we treat a jump at
519 the end of the block as tail call. */
520 if (start == 0 || start == pc)
521 return ftrace_new_tailcall (bfun, mfun, fun);
522 }
523 }
524 }
525
526 /* Check if we're switching functions for some other reason. */
527 if (ftrace_function_switched (bfun, mfun, fun))
528 {
529 DEBUG_FTRACE ("switching from %s in %s at %s",
530 ftrace_print_insn_addr (last),
531 ftrace_print_function_name (bfun),
532 ftrace_print_filename (bfun));
533
534 return ftrace_new_switch (bfun, mfun, fun);
535 }
536
537 return bfun;
538 }
539
540 /* Add the instruction at PC to BFUN's instructions. */
541
542 static void
543 ftrace_update_insns (struct btrace_function *bfun,
544 const struct btrace_insn *insn)
545 {
546 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
547
548 if (record_debug > 1)
549 ftrace_debug (bfun, "update insn");
550 }
551
552 /* Classify the instruction at PC. */
553
554 static enum btrace_insn_class
555 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
556 {
557 enum btrace_insn_class iclass;
558
559 iclass = BTRACE_INSN_OTHER;
560 TRY
561 {
562 if (gdbarch_insn_is_call (gdbarch, pc))
563 iclass = BTRACE_INSN_CALL;
564 else if (gdbarch_insn_is_ret (gdbarch, pc))
565 iclass = BTRACE_INSN_RETURN;
566 else if (gdbarch_insn_is_jump (gdbarch, pc))
567 iclass = BTRACE_INSN_JUMP;
568 }
569 CATCH (error, RETURN_MASK_ERROR)
570 {
571 }
572 END_CATCH
573
574 return iclass;
575 }
576
577 /* Compute the function branch trace from BTS trace. */
578
579 static void
580 btrace_compute_ftrace_bts (struct thread_info *tp,
581 const struct btrace_data_bts *btrace)
582 {
583 struct btrace_thread_info *btinfo;
584 struct btrace_function *begin, *end;
585 struct gdbarch *gdbarch;
586 unsigned int blk, ngaps;
587 int level;
588
589 gdbarch = target_gdbarch ();
590 btinfo = &tp->btrace;
591 begin = btinfo->begin;
592 end = btinfo->end;
593 ngaps = btinfo->ngaps;
594 level = begin != NULL ? -btinfo->level : INT_MAX;
595 blk = VEC_length (btrace_block_s, btrace->blocks);
596
597 while (blk != 0)
598 {
599 btrace_block_s *block;
600 CORE_ADDR pc;
601
602 blk -= 1;
603
604 block = VEC_index (btrace_block_s, btrace->blocks, blk);
605 pc = block->begin;
606
607 for (;;)
608 {
609 struct btrace_insn insn;
610 int size;
611
612 /* We should hit the end of the block. Warn if we went too far. */
613 if (block->end < pc)
614 {
615 /* Indicate the gap in the trace - unless we're at the
616 beginning. */
617 if (begin != NULL)
618 {
619 warning (_("Recorded trace may be corrupted around %s."),
620 core_addr_to_string_nz (pc));
621
622 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
623 ngaps += 1;
624 }
625 break;
626 }
627
628 end = ftrace_update_function (end, pc);
629 if (begin == NULL)
630 begin = end;
631
632 /* Maintain the function level offset.
633 For all but the last block, we do it here. */
634 if (blk != 0)
635 level = min (level, end->level);
636
637 size = 0;
638 TRY
639 {
640 size = gdb_insn_length (gdbarch, pc);
641 }
642 CATCH (error, RETURN_MASK_ERROR)
643 {
644 }
645 END_CATCH
646
647 insn.pc = pc;
648 insn.size = size;
649 insn.iclass = ftrace_classify_insn (gdbarch, pc);
650
651 ftrace_update_insns (end, &insn);
652
653 /* We're done once we pushed the instruction at the end. */
654 if (block->end == pc)
655 break;
656
657 /* We can't continue if we fail to compute the size. */
658 if (size <= 0)
659 {
660 warning (_("Recorded trace may be incomplete around %s."),
661 core_addr_to_string_nz (pc));
662
663 /* Indicate the gap in the trace. We just added INSN so we're
664 not at the beginning. */
665 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
666 ngaps += 1;
667
668 break;
669 }
670
671 pc += size;
672
673 /* Maintain the function level offset.
674 For the last block, we do it here to not consider the last
675 instruction.
676 Since the last instruction corresponds to the current instruction
677 and is not really part of the execution history, it shouldn't
678 affect the level. */
679 if (blk == 0)
680 level = min (level, end->level);
681 }
682 }
683
684 btinfo->begin = begin;
685 btinfo->end = end;
686 btinfo->ngaps = ngaps;
687
688 /* LEVEL is the minimal function level of all btrace function segments.
689 Define the global level offset to -LEVEL so all function levels are
690 normalized to start at zero. */
691 btinfo->level = -level;
692 }
693
694 #if defined (HAVE_LIBIPT)
695
696 static enum btrace_insn_class
697 pt_reclassify_insn (enum pt_insn_class iclass)
698 {
699 switch (iclass)
700 {
701 case ptic_call:
702 return BTRACE_INSN_CALL;
703
704 case ptic_return:
705 return BTRACE_INSN_RETURN;
706
707 case ptic_jump:
708 return BTRACE_INSN_JUMP;
709
710 default:
711 return BTRACE_INSN_OTHER;
712 }
713 }
714
715 /* Add function branch trace using DECODER. */
716
717 static void
718 ftrace_add_pt (struct pt_insn_decoder *decoder,
719 struct btrace_function **pbegin,
720 struct btrace_function **pend, int *plevel,
721 unsigned int *ngaps)
722 {
723 struct btrace_function *begin, *end, *upd;
724 uint64_t offset;
725 int errcode, nerrors;
726
727 begin = *pbegin;
728 end = *pend;
729 nerrors = 0;
730 for (;;)
731 {
732 struct btrace_insn btinsn;
733 struct pt_insn insn;
734
735 errcode = pt_insn_sync_forward (decoder);
736 if (errcode < 0)
737 {
738 if (errcode != -pte_eos)
739 warning (_("Failed to synchronize onto the Intel(R) Processor "
740 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
741 break;
742 }
743
744 memset (&btinsn, 0, sizeof (btinsn));
745 for (;;)
746 {
747 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
748 if (errcode < 0)
749 break;
750
751 /* Look for gaps in the trace - unless we're at the beginning. */
752 if (begin != NULL)
753 {
754 /* Tracing is disabled and re-enabled each time we enter the
755 kernel. Most times, we continue from the same instruction we
756 stopped before. This is indicated via the RESUMED instruction
757 flag. The ENABLED instruction flag means that we continued
758 from some other instruction. Indicate this as a trace gap. */
759 if (insn.enabled)
760 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
761
762 /* Indicate trace overflows. */
763 if (insn.resynced)
764 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
765 }
766
767 upd = ftrace_update_function (end, insn.ip);
768 if (upd != end)
769 {
770 *pend = end = upd;
771
772 if (begin == NULL)
773 *pbegin = begin = upd;
774 }
775
776 /* Maintain the function level offset. */
777 *plevel = min (*plevel, end->level);
778
779 btinsn.pc = (CORE_ADDR) insn.ip;
780 btinsn.size = (gdb_byte) insn.size;
781 btinsn.iclass = pt_reclassify_insn (insn.iclass);
782
783 ftrace_update_insns (end, &btinsn);
784 }
785
786 if (errcode == -pte_eos)
787 break;
788
789 /* If the gap is at the very beginning, we ignore it - we will have
790 less trace, but we won't have any holes in the trace. */
791 if (begin == NULL)
792 continue;
793
794 pt_insn_get_offset (decoder, &offset);
795
796 warning (_("Failed to decode Intel(R) Processor Trace near trace "
797 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
798 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
799
800 /* Indicate the gap in the trace. */
801 *pend = end = ftrace_new_gap (end, errcode);
802 *ngaps += 1;
803 }
804
805 if (nerrors > 0)
806 warning (_("The recorded execution trace may have gaps."));
807 }
808
809 /* A callback function to allow the trace decoder to read the inferior's
810 memory. */
811
812 static int
813 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
814 const struct pt_asid *asid, CORE_ADDR pc,
815 void *context)
816 {
817 int errcode;
818
819 TRY
820 {
821 errcode = target_read_code (pc, buffer, size);
822 if (errcode != 0)
823 return -pte_nomap;
824 }
825 CATCH (error, RETURN_MASK_ERROR)
826 {
827 return -pte_nomap;
828 }
829 END_CATCH
830
831 return size;
832 }
833
834 /* Translate the vendor from one enum to another. */
835
836 static enum pt_cpu_vendor
837 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
838 {
839 switch (vendor)
840 {
841 default:
842 return pcv_unknown;
843
844 case CV_INTEL:
845 return pcv_intel;
846 }
847 }
848
849 /* Finalize the function branch trace after decode. */
850
851 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
852 struct thread_info *tp, int level)
853 {
854 pt_insn_free_decoder (decoder);
855
856 /* LEVEL is the minimal function level of all btrace function segments.
857 Define the global level offset to -LEVEL so all function levels are
858 normalized to start at zero. */
859 tp->btrace.level = -level;
860
861 /* Add a single last instruction entry for the current PC.
862 This allows us to compute the backtrace at the current PC using both
863 standard unwind and btrace unwind.
864 This extra entry is ignored by all record commands. */
865 btrace_add_pc (tp);
866 }
867
868 /* Compute the function branch trace from Intel(R) Processor Trace. */
869
870 static void
871 btrace_compute_ftrace_pt (struct thread_info *tp,
872 const struct btrace_data_pt *btrace)
873 {
874 struct btrace_thread_info *btinfo;
875 struct pt_insn_decoder *decoder;
876 struct pt_config config;
877 int level, errcode;
878
879 if (btrace->size == 0)
880 return;
881
882 btinfo = &tp->btrace;
883 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
884
885 pt_config_init(&config);
886 config.begin = btrace->data;
887 config.end = btrace->data + btrace->size;
888
889 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
890 config.cpu.family = btrace->config.cpu.family;
891 config.cpu.model = btrace->config.cpu.model;
892 config.cpu.stepping = btrace->config.cpu.stepping;
893
894 errcode = pt_cpu_errata (&config.errata, &config.cpu);
895 if (errcode < 0)
896 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
897 pt_errstr (pt_errcode (errcode)));
898
899 decoder = pt_insn_alloc_decoder (&config);
900 if (decoder == NULL)
901 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
902
903 TRY
904 {
905 struct pt_image *image;
906
907 image = pt_insn_get_image(decoder);
908 if (image == NULL)
909 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
910
911 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
912 if (errcode < 0)
913 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
914 "%s."), pt_errstr (pt_errcode (errcode)));
915
916 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
917 &btinfo->ngaps);
918 }
919 CATCH (error, RETURN_MASK_ALL)
920 {
921 /* Indicate a gap in the trace if we quit trace processing. */
922 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
923 {
924 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
925 btinfo->ngaps++;
926 }
927
928 btrace_finalize_ftrace_pt (decoder, tp, level);
929
930 throw_exception (error);
931 }
932 END_CATCH
933
934 btrace_finalize_ftrace_pt (decoder, tp, level);
935 }
936
937 #else /* defined (HAVE_LIBIPT) */
938
939 static void
940 btrace_compute_ftrace_pt (struct thread_info *tp,
941 const struct btrace_data_pt *btrace)
942 {
943 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
944 }
945
946 #endif /* defined (HAVE_LIBIPT) */
947
948 /* Compute the function branch trace from a block branch trace BTRACE for
949 a thread given by BTINFO. */
950
951 static void
952 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
953 {
954 DEBUG ("compute ftrace");
955
956 switch (btrace->format)
957 {
958 case BTRACE_FORMAT_NONE:
959 return;
960
961 case BTRACE_FORMAT_BTS:
962 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
963 return;
964
965 case BTRACE_FORMAT_PT:
966 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
967 return;
968 }
969
970 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
971 }
972
973 /* Add an entry for the current PC. */
974
975 static void
976 btrace_add_pc (struct thread_info *tp)
977 {
978 struct btrace_data btrace;
979 struct btrace_block *block;
980 struct regcache *regcache;
981 struct cleanup *cleanup;
982 CORE_ADDR pc;
983
984 regcache = get_thread_regcache (tp->ptid);
985 pc = regcache_read_pc (regcache);
986
987 btrace_data_init (&btrace);
988 btrace.format = BTRACE_FORMAT_BTS;
989 btrace.variant.bts.blocks = NULL;
990
991 cleanup = make_cleanup_btrace_data (&btrace);
992
993 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
994 block->begin = pc;
995 block->end = pc;
996
997 btrace_compute_ftrace (tp, &btrace);
998
999 do_cleanups (cleanup);
1000 }
1001
1002 /* See btrace.h. */
1003
1004 void
1005 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1006 {
1007 if (tp->btrace.target != NULL)
1008 return;
1009
1010 if (!target_supports_btrace (conf->format))
1011 error (_("Target does not support branch tracing."));
1012
1013 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1014
1015 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1016
1017 /* Add an entry for the current PC so we start tracing from where we
1018 enabled it. */
1019 if (tp->btrace.target != NULL)
1020 btrace_add_pc (tp);
1021 }
1022
1023 /* See btrace.h. */
1024
1025 const struct btrace_config *
1026 btrace_conf (const struct btrace_thread_info *btinfo)
1027 {
1028 if (btinfo->target == NULL)
1029 return NULL;
1030
1031 return target_btrace_conf (btinfo->target);
1032 }
1033
1034 /* See btrace.h. */
1035
1036 void
1037 btrace_disable (struct thread_info *tp)
1038 {
1039 struct btrace_thread_info *btp = &tp->btrace;
1040 int errcode = 0;
1041
1042 if (btp->target == NULL)
1043 return;
1044
1045 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1046
1047 target_disable_btrace (btp->target);
1048 btp->target = NULL;
1049
1050 btrace_clear (tp);
1051 }
1052
1053 /* See btrace.h. */
1054
1055 void
1056 btrace_teardown (struct thread_info *tp)
1057 {
1058 struct btrace_thread_info *btp = &tp->btrace;
1059 int errcode = 0;
1060
1061 if (btp->target == NULL)
1062 return;
1063
1064 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1065
1066 target_teardown_btrace (btp->target);
1067 btp->target = NULL;
1068
1069 btrace_clear (tp);
1070 }
1071
1072 /* Stitch branch trace in BTS format. */
1073
1074 static int
1075 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1076 {
1077 struct btrace_thread_info *btinfo;
1078 struct btrace_function *last_bfun;
1079 struct btrace_insn *last_insn;
1080 btrace_block_s *first_new_block;
1081
1082 btinfo = &tp->btrace;
1083 last_bfun = btinfo->end;
1084 gdb_assert (last_bfun != NULL);
1085 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1086
1087 /* If the existing trace ends with a gap, we just glue the traces
1088 together. We need to drop the last (i.e. chronologically first) block
1089 of the new trace, though, since we can't fill in the start address.*/
1090 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1091 {
1092 VEC_pop (btrace_block_s, btrace->blocks);
1093 return 0;
1094 }
1095
1096 /* Beware that block trace starts with the most recent block, so the
1097 chronologically first block in the new trace is the last block in
1098 the new trace's block vector. */
1099 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1100 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1101
1102 /* If the current PC at the end of the block is the same as in our current
1103 trace, there are two explanations:
1104 1. we executed the instruction and some branch brought us back.
1105 2. we have not made any progress.
1106 In the first case, the delta trace vector should contain at least two
1107 entries.
1108 In the second case, the delta trace vector should contain exactly one
1109 entry for the partial block containing the current PC. Remove it. */
1110 if (first_new_block->end == last_insn->pc
1111 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1112 {
1113 VEC_pop (btrace_block_s, btrace->blocks);
1114 return 0;
1115 }
1116
1117 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1118 core_addr_to_string_nz (first_new_block->end));
1119
1120 /* Do a simple sanity check to make sure we don't accidentally end up
1121 with a bad block. This should not occur in practice. */
1122 if (first_new_block->end < last_insn->pc)
1123 {
1124 warning (_("Error while trying to read delta trace. Falling back to "
1125 "a full read."));
1126 return -1;
1127 }
1128
1129 /* We adjust the last block to start at the end of our current trace. */
1130 gdb_assert (first_new_block->begin == 0);
1131 first_new_block->begin = last_insn->pc;
1132
1133 /* We simply pop the last insn so we can insert it again as part of
1134 the normal branch trace computation.
1135 Since instruction iterators are based on indices in the instructions
1136 vector, we don't leave any pointers dangling. */
1137 DEBUG ("pruning insn at %s for stitching",
1138 ftrace_print_insn_addr (last_insn));
1139
1140 VEC_pop (btrace_insn_s, last_bfun->insn);
1141
1142 /* The instructions vector may become empty temporarily if this has
1143 been the only instruction in this function segment.
1144 This violates the invariant but will be remedied shortly by
1145 btrace_compute_ftrace when we add the new trace. */
1146
1147 /* The only case where this would hurt is if the entire trace consisted
1148 of just that one instruction. If we remove it, we might turn the now
1149 empty btrace function segment into a gap. But we don't want gaps at
1150 the beginning. To avoid this, we remove the entire old trace. */
1151 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1152 btrace_clear (tp);
1153
1154 return 0;
1155 }
1156
1157 /* Adjust the block trace in order to stitch old and new trace together.
1158 BTRACE is the new delta trace between the last and the current stop.
1159 TP is the traced thread.
1160 May modifx BTRACE as well as the existing trace in TP.
1161 Return 0 on success, -1 otherwise. */
1162
1163 static int
1164 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1165 {
1166 /* If we don't have trace, there's nothing to do. */
1167 if (btrace_data_empty (btrace))
1168 return 0;
1169
1170 switch (btrace->format)
1171 {
1172 case BTRACE_FORMAT_NONE:
1173 return 0;
1174
1175 case BTRACE_FORMAT_BTS:
1176 return btrace_stitch_bts (&btrace->variant.bts, tp);
1177
1178 case BTRACE_FORMAT_PT:
1179 /* Delta reads are not supported. */
1180 return -1;
1181 }
1182
1183 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1184 }
1185
1186 /* Clear the branch trace histories in BTINFO. */
1187
1188 static void
1189 btrace_clear_history (struct btrace_thread_info *btinfo)
1190 {
1191 xfree (btinfo->insn_history);
1192 xfree (btinfo->call_history);
1193 xfree (btinfo->replay);
1194
1195 btinfo->insn_history = NULL;
1196 btinfo->call_history = NULL;
1197 btinfo->replay = NULL;
1198 }
1199
1200 /* See btrace.h. */
1201
1202 void
1203 btrace_fetch (struct thread_info *tp)
1204 {
1205 struct btrace_thread_info *btinfo;
1206 struct btrace_target_info *tinfo;
1207 struct btrace_data btrace;
1208 struct cleanup *cleanup;
1209 int errcode;
1210
1211 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1212
1213 btinfo = &tp->btrace;
1214 tinfo = btinfo->target;
1215 if (tinfo == NULL)
1216 return;
1217
1218 /* There's no way we could get new trace while replaying.
1219 On the other hand, delta trace would return a partial record with the
1220 current PC, which is the replay PC, not the last PC, as expected. */
1221 if (btinfo->replay != NULL)
1222 return;
1223
1224 btrace_data_init (&btrace);
1225 cleanup = make_cleanup_btrace_data (&btrace);
1226
1227 /* Let's first try to extend the trace we already have. */
1228 if (btinfo->end != NULL)
1229 {
1230 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1231 if (errcode == 0)
1232 {
1233 /* Success. Let's try to stitch the traces together. */
1234 errcode = btrace_stitch_trace (&btrace, tp);
1235 }
1236 else
1237 {
1238 /* We failed to read delta trace. Let's try to read new trace. */
1239 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1240
1241 /* If we got any new trace, discard what we have. */
1242 if (errcode == 0 && !btrace_data_empty (&btrace))
1243 btrace_clear (tp);
1244 }
1245
1246 /* If we were not able to read the trace, we start over. */
1247 if (errcode != 0)
1248 {
1249 btrace_clear (tp);
1250 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1251 }
1252 }
1253 else
1254 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1255
1256 /* If we were not able to read the branch trace, signal an error. */
1257 if (errcode != 0)
1258 error (_("Failed to read branch trace."));
1259
1260 /* Compute the trace, provided we have any. */
1261 if (!btrace_data_empty (&btrace))
1262 {
1263 /* Store the raw trace data. The stored data will be cleared in
1264 btrace_clear, so we always append the new trace. */
1265 btrace_data_append (&btinfo->data, &btrace);
1266
1267 btrace_clear_history (btinfo);
1268 btrace_compute_ftrace (tp, &btrace);
1269 }
1270
1271 do_cleanups (cleanup);
1272 }
1273
1274 /* See btrace.h. */
1275
1276 void
1277 btrace_clear (struct thread_info *tp)
1278 {
1279 struct btrace_thread_info *btinfo;
1280 struct btrace_function *it, *trash;
1281
1282 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1283
1284 /* Make sure btrace frames that may hold a pointer into the branch
1285 trace data are destroyed. */
1286 reinit_frame_cache ();
1287
1288 btinfo = &tp->btrace;
1289
1290 it = btinfo->begin;
1291 while (it != NULL)
1292 {
1293 trash = it;
1294 it = it->flow.next;
1295
1296 xfree (trash);
1297 }
1298
1299 btinfo->begin = NULL;
1300 btinfo->end = NULL;
1301 btinfo->ngaps = 0;
1302
1303 btrace_data_clear (&btinfo->data);
1304 btrace_clear_history (btinfo);
1305 }
1306
1307 /* See btrace.h. */
1308
1309 void
1310 btrace_free_objfile (struct objfile *objfile)
1311 {
1312 struct thread_info *tp;
1313
1314 DEBUG ("free objfile");
1315
1316 ALL_NON_EXITED_THREADS (tp)
1317 btrace_clear (tp);
1318 }
1319
1320 #if defined (HAVE_LIBEXPAT)
1321
1322 /* Check the btrace document version. */
1323
1324 static void
1325 check_xml_btrace_version (struct gdb_xml_parser *parser,
1326 const struct gdb_xml_element *element,
1327 void *user_data, VEC (gdb_xml_value_s) *attributes)
1328 {
1329 const char *version = xml_find_attribute (attributes, "version")->value;
1330
1331 if (strcmp (version, "1.0") != 0)
1332 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1333 }
1334
1335 /* Parse a btrace "block" xml record. */
1336
1337 static void
1338 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1339 const struct gdb_xml_element *element,
1340 void *user_data, VEC (gdb_xml_value_s) *attributes)
1341 {
1342 struct btrace_data *btrace;
1343 struct btrace_block *block;
1344 ULONGEST *begin, *end;
1345
1346 btrace = user_data;
1347
1348 switch (btrace->format)
1349 {
1350 case BTRACE_FORMAT_BTS:
1351 break;
1352
1353 case BTRACE_FORMAT_NONE:
1354 btrace->format = BTRACE_FORMAT_BTS;
1355 btrace->variant.bts.blocks = NULL;
1356 break;
1357
1358 default:
1359 gdb_xml_error (parser, _("Btrace format error."));
1360 }
1361
1362 begin = xml_find_attribute (attributes, "begin")->value;
1363 end = xml_find_attribute (attributes, "end")->value;
1364
1365 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1366 block->begin = *begin;
1367 block->end = *end;
1368 }
1369
1370 /* Parse a "raw" xml record. */
1371
1372 static void
1373 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1374 gdb_byte **pdata, unsigned long *psize)
1375 {
1376 struct cleanup *cleanup;
1377 gdb_byte *data, *bin;
1378 unsigned long size;
1379 size_t len;
1380
1381 len = strlen (body_text);
1382 size = len / 2;
1383
1384 if ((size_t) size * 2 != len)
1385 gdb_xml_error (parser, _("Bad raw data size."));
1386
1387 bin = data = xmalloc (size);
1388 cleanup = make_cleanup (xfree, data);
1389
1390 /* We use hex encoding - see common/rsp-low.h. */
1391 while (len > 0)
1392 {
1393 char hi, lo;
1394
1395 hi = *body_text++;
1396 lo = *body_text++;
1397
1398 if (hi == 0 || lo == 0)
1399 gdb_xml_error (parser, _("Bad hex encoding."));
1400
1401 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1402 len -= 2;
1403 }
1404
1405 discard_cleanups (cleanup);
1406
1407 *pdata = data;
1408 *psize = size;
1409 }
1410
1411 /* Parse a btrace pt-config "cpu" xml record. */
1412
1413 static void
1414 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1415 const struct gdb_xml_element *element,
1416 void *user_data,
1417 VEC (gdb_xml_value_s) *attributes)
1418 {
1419 struct btrace_data *btrace;
1420 const char *vendor;
1421 ULONGEST *family, *model, *stepping;
1422
1423 vendor = xml_find_attribute (attributes, "vendor")->value;
1424 family = xml_find_attribute (attributes, "family")->value;
1425 model = xml_find_attribute (attributes, "model")->value;
1426 stepping = xml_find_attribute (attributes, "stepping")->value;
1427
1428 btrace = user_data;
1429
1430 if (strcmp (vendor, "GenuineIntel") == 0)
1431 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1432
1433 btrace->variant.pt.config.cpu.family = *family;
1434 btrace->variant.pt.config.cpu.model = *model;
1435 btrace->variant.pt.config.cpu.stepping = *stepping;
1436 }
1437
1438 /* Parse a btrace pt "raw" xml record. */
1439
1440 static void
1441 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1442 const struct gdb_xml_element *element,
1443 void *user_data, const char *body_text)
1444 {
1445 struct btrace_data *btrace;
1446
1447 btrace = user_data;
1448 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1449 &btrace->variant.pt.size);
1450 }
1451
1452 /* Parse a btrace "pt" xml record. */
1453
1454 static void
1455 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1456 const struct gdb_xml_element *element,
1457 void *user_data, VEC (gdb_xml_value_s) *attributes)
1458 {
1459 struct btrace_data *btrace;
1460
1461 btrace = user_data;
1462 btrace->format = BTRACE_FORMAT_PT;
1463 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1464 btrace->variant.pt.data = NULL;
1465 btrace->variant.pt.size = 0;
1466 }
1467
1468 static const struct gdb_xml_attribute block_attributes[] = {
1469 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1470 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1471 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1472 };
1473
1474 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1475 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1476 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1477 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1478 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1479 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1480 };
1481
1482 static const struct gdb_xml_element btrace_pt_config_children[] = {
1483 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1484 parse_xml_btrace_pt_config_cpu, NULL },
1485 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1486 };
1487
1488 static const struct gdb_xml_element btrace_pt_children[] = {
1489 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1490 NULL },
1491 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1492 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1493 };
1494
1495 static const struct gdb_xml_attribute btrace_attributes[] = {
1496 { "version", GDB_XML_AF_NONE, NULL, NULL },
1497 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1498 };
1499
1500 static const struct gdb_xml_element btrace_children[] = {
1501 { "block", block_attributes, NULL,
1502 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1503 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1504 NULL },
1505 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1506 };
1507
1508 static const struct gdb_xml_element btrace_elements[] = {
1509 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1510 check_xml_btrace_version, NULL },
1511 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1512 };
1513
1514 #endif /* defined (HAVE_LIBEXPAT) */
1515
1516 /* See btrace.h. */
1517
1518 void
1519 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1520 {
1521 struct cleanup *cleanup;
1522 int errcode;
1523
1524 #if defined (HAVE_LIBEXPAT)
1525
1526 btrace->format = BTRACE_FORMAT_NONE;
1527
1528 cleanup = make_cleanup_btrace_data (btrace);
1529 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1530 buffer, btrace);
1531 if (errcode != 0)
1532 error (_("Error parsing branch trace."));
1533
1534 /* Keep parse results. */
1535 discard_cleanups (cleanup);
1536
1537 #else /* !defined (HAVE_LIBEXPAT) */
1538
1539 error (_("Cannot process branch trace. XML parsing is not supported."));
1540
1541 #endif /* !defined (HAVE_LIBEXPAT) */
1542 }
1543
1544 #if defined (HAVE_LIBEXPAT)
1545
1546 /* Parse a btrace-conf "bts" xml record. */
1547
1548 static void
1549 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1550 const struct gdb_xml_element *element,
1551 void *user_data, VEC (gdb_xml_value_s) *attributes)
1552 {
1553 struct btrace_config *conf;
1554 struct gdb_xml_value *size;
1555
1556 conf = user_data;
1557 conf->format = BTRACE_FORMAT_BTS;
1558 conf->bts.size = 0;
1559
1560 size = xml_find_attribute (attributes, "size");
1561 if (size != NULL)
1562 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1563 }
1564
1565 /* Parse a btrace-conf "pt" xml record. */
1566
1567 static void
1568 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1569 const struct gdb_xml_element *element,
1570 void *user_data, VEC (gdb_xml_value_s) *attributes)
1571 {
1572 struct btrace_config *conf;
1573 struct gdb_xml_value *size;
1574
1575 conf = user_data;
1576 conf->format = BTRACE_FORMAT_PT;
1577 conf->pt.size = 0;
1578
1579 size = xml_find_attribute (attributes, "size");
1580 if (size != NULL)
1581 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1582 }
1583
1584 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1585 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1586 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1587 };
1588
1589 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1590 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1591 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1592 };
1593
1594 static const struct gdb_xml_element btrace_conf_children[] = {
1595 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1596 parse_xml_btrace_conf_bts, NULL },
1597 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1598 parse_xml_btrace_conf_pt, NULL },
1599 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1600 };
1601
1602 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1603 { "version", GDB_XML_AF_NONE, NULL, NULL },
1604 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1605 };
1606
1607 static const struct gdb_xml_element btrace_conf_elements[] = {
1608 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1609 GDB_XML_EF_NONE, NULL, NULL },
1610 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1611 };
1612
1613 #endif /* defined (HAVE_LIBEXPAT) */
1614
1615 /* See btrace.h. */
1616
1617 void
1618 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1619 {
1620 int errcode;
1621
1622 #if defined (HAVE_LIBEXPAT)
1623
1624 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1625 btrace_conf_elements, xml, conf);
1626 if (errcode != 0)
1627 error (_("Error parsing branch trace configuration."));
1628
1629 #else /* !defined (HAVE_LIBEXPAT) */
1630
1631 error (_("XML parsing is not supported."));
1632
1633 #endif /* !defined (HAVE_LIBEXPAT) */
1634 }
1635
1636 /* See btrace.h. */
1637
1638 const struct btrace_insn *
1639 btrace_insn_get (const struct btrace_insn_iterator *it)
1640 {
1641 const struct btrace_function *bfun;
1642 unsigned int index, end;
1643
1644 index = it->index;
1645 bfun = it->function;
1646
1647 /* Check if the iterator points to a gap in the trace. */
1648 if (bfun->errcode != 0)
1649 return NULL;
1650
1651 /* The index is within the bounds of this function's instruction vector. */
1652 end = VEC_length (btrace_insn_s, bfun->insn);
1653 gdb_assert (0 < end);
1654 gdb_assert (index < end);
1655
1656 return VEC_index (btrace_insn_s, bfun->insn, index);
1657 }
1658
1659 /* See btrace.h. */
1660
1661 unsigned int
1662 btrace_insn_number (const struct btrace_insn_iterator *it)
1663 {
1664 const struct btrace_function *bfun;
1665
1666 bfun = it->function;
1667
1668 /* Return zero if the iterator points to a gap in the trace. */
1669 if (bfun->errcode != 0)
1670 return 0;
1671
1672 return bfun->insn_offset + it->index;
1673 }
1674
1675 /* See btrace.h. */
1676
1677 void
1678 btrace_insn_begin (struct btrace_insn_iterator *it,
1679 const struct btrace_thread_info *btinfo)
1680 {
1681 const struct btrace_function *bfun;
1682
1683 bfun = btinfo->begin;
1684 if (bfun == NULL)
1685 error (_("No trace."));
1686
1687 it->function = bfun;
1688 it->index = 0;
1689 }
1690
1691 /* See btrace.h. */
1692
1693 void
1694 btrace_insn_end (struct btrace_insn_iterator *it,
1695 const struct btrace_thread_info *btinfo)
1696 {
1697 const struct btrace_function *bfun;
1698 unsigned int length;
1699
1700 bfun = btinfo->end;
1701 if (bfun == NULL)
1702 error (_("No trace."));
1703
1704 length = VEC_length (btrace_insn_s, bfun->insn);
1705
1706 /* The last function may either be a gap or it contains the current
1707 instruction, which is one past the end of the execution trace; ignore
1708 it. */
1709 if (length > 0)
1710 length -= 1;
1711
1712 it->function = bfun;
1713 it->index = length;
1714 }
1715
1716 /* See btrace.h. */
1717
1718 unsigned int
1719 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1720 {
1721 const struct btrace_function *bfun;
1722 unsigned int index, steps;
1723
1724 bfun = it->function;
1725 steps = 0;
1726 index = it->index;
1727
1728 while (stride != 0)
1729 {
1730 unsigned int end, space, adv;
1731
1732 end = VEC_length (btrace_insn_s, bfun->insn);
1733
1734 /* An empty function segment represents a gap in the trace. We count
1735 it as one instruction. */
1736 if (end == 0)
1737 {
1738 const struct btrace_function *next;
1739
1740 next = bfun->flow.next;
1741 if (next == NULL)
1742 break;
1743
1744 stride -= 1;
1745 steps += 1;
1746
1747 bfun = next;
1748 index = 0;
1749
1750 continue;
1751 }
1752
1753 gdb_assert (0 < end);
1754 gdb_assert (index < end);
1755
1756 /* Compute the number of instructions remaining in this segment. */
1757 space = end - index;
1758
1759 /* Advance the iterator as far as possible within this segment. */
1760 adv = min (space, stride);
1761 stride -= adv;
1762 index += adv;
1763 steps += adv;
1764
1765 /* Move to the next function if we're at the end of this one. */
1766 if (index == end)
1767 {
1768 const struct btrace_function *next;
1769
1770 next = bfun->flow.next;
1771 if (next == NULL)
1772 {
1773 /* We stepped past the last function.
1774
1775 Let's adjust the index to point to the last instruction in
1776 the previous function. */
1777 index -= 1;
1778 steps -= 1;
1779 break;
1780 }
1781
1782 /* We now point to the first instruction in the new function. */
1783 bfun = next;
1784 index = 0;
1785 }
1786
1787 /* We did make progress. */
1788 gdb_assert (adv > 0);
1789 }
1790
1791 /* Update the iterator. */
1792 it->function = bfun;
1793 it->index = index;
1794
1795 return steps;
1796 }
1797
1798 /* See btrace.h. */
1799
1800 unsigned int
1801 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1802 {
1803 const struct btrace_function *bfun;
1804 unsigned int index, steps;
1805
1806 bfun = it->function;
1807 steps = 0;
1808 index = it->index;
1809
1810 while (stride != 0)
1811 {
1812 unsigned int adv;
1813
1814 /* Move to the previous function if we're at the start of this one. */
1815 if (index == 0)
1816 {
1817 const struct btrace_function *prev;
1818
1819 prev = bfun->flow.prev;
1820 if (prev == NULL)
1821 break;
1822
1823 /* We point to one after the last instruction in the new function. */
1824 bfun = prev;
1825 index = VEC_length (btrace_insn_s, bfun->insn);
1826
1827 /* An empty function segment represents a gap in the trace. We count
1828 it as one instruction. */
1829 if (index == 0)
1830 {
1831 stride -= 1;
1832 steps += 1;
1833
1834 continue;
1835 }
1836 }
1837
1838 /* Advance the iterator as far as possible within this segment. */
1839 adv = min (index, stride);
1840
1841 stride -= adv;
1842 index -= adv;
1843 steps += adv;
1844
1845 /* We did make progress. */
1846 gdb_assert (adv > 0);
1847 }
1848
1849 /* Update the iterator. */
1850 it->function = bfun;
1851 it->index = index;
1852
1853 return steps;
1854 }
1855
1856 /* See btrace.h. */
1857
1858 int
1859 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1860 const struct btrace_insn_iterator *rhs)
1861 {
1862 unsigned int lnum, rnum;
1863
1864 lnum = btrace_insn_number (lhs);
1865 rnum = btrace_insn_number (rhs);
1866
1867 /* A gap has an instruction number of zero. Things are getting more
1868 complicated if gaps are involved.
1869
1870 We take the instruction number offset from the iterator's function.
1871 This is the number of the first instruction after the gap.
1872
1873 This is OK as long as both lhs and rhs point to gaps. If only one of
1874 them does, we need to adjust the number based on the other's regular
1875 instruction number. Otherwise, a gap might compare equal to an
1876 instruction. */
1877
1878 if (lnum == 0 && rnum == 0)
1879 {
1880 lnum = lhs->function->insn_offset;
1881 rnum = rhs->function->insn_offset;
1882 }
1883 else if (lnum == 0)
1884 {
1885 lnum = lhs->function->insn_offset;
1886
1887 if (lnum == rnum)
1888 lnum -= 1;
1889 }
1890 else if (rnum == 0)
1891 {
1892 rnum = rhs->function->insn_offset;
1893
1894 if (rnum == lnum)
1895 rnum -= 1;
1896 }
1897
1898 return (int) (lnum - rnum);
1899 }
1900
1901 /* See btrace.h. */
1902
1903 int
1904 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1905 const struct btrace_thread_info *btinfo,
1906 unsigned int number)
1907 {
1908 const struct btrace_function *bfun;
1909 unsigned int end, length;
1910
1911 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1912 {
1913 /* Skip gaps. */
1914 if (bfun->errcode != 0)
1915 continue;
1916
1917 if (bfun->insn_offset <= number)
1918 break;
1919 }
1920
1921 if (bfun == NULL)
1922 return 0;
1923
1924 length = VEC_length (btrace_insn_s, bfun->insn);
1925 gdb_assert (length > 0);
1926
1927 end = bfun->insn_offset + length;
1928 if (end <= number)
1929 return 0;
1930
1931 it->function = bfun;
1932 it->index = number - bfun->insn_offset;
1933
1934 return 1;
1935 }
1936
1937 /* See btrace.h. */
1938
1939 const struct btrace_function *
1940 btrace_call_get (const struct btrace_call_iterator *it)
1941 {
1942 return it->function;
1943 }
1944
1945 /* See btrace.h. */
1946
1947 unsigned int
1948 btrace_call_number (const struct btrace_call_iterator *it)
1949 {
1950 const struct btrace_thread_info *btinfo;
1951 const struct btrace_function *bfun;
1952 unsigned int insns;
1953
1954 btinfo = it->btinfo;
1955 bfun = it->function;
1956 if (bfun != NULL)
1957 return bfun->number;
1958
1959 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1960 number of the last function. */
1961 bfun = btinfo->end;
1962 insns = VEC_length (btrace_insn_s, bfun->insn);
1963
1964 /* If the function contains only a single instruction (i.e. the current
1965 instruction), it will be skipped and its number is already the number
1966 we seek. */
1967 if (insns == 1)
1968 return bfun->number;
1969
1970 /* Otherwise, return one more than the number of the last function. */
1971 return bfun->number + 1;
1972 }
1973
1974 /* See btrace.h. */
1975
1976 void
1977 btrace_call_begin (struct btrace_call_iterator *it,
1978 const struct btrace_thread_info *btinfo)
1979 {
1980 const struct btrace_function *bfun;
1981
1982 bfun = btinfo->begin;
1983 if (bfun == NULL)
1984 error (_("No trace."));
1985
1986 it->btinfo = btinfo;
1987 it->function = bfun;
1988 }
1989
1990 /* See btrace.h. */
1991
1992 void
1993 btrace_call_end (struct btrace_call_iterator *it,
1994 const struct btrace_thread_info *btinfo)
1995 {
1996 const struct btrace_function *bfun;
1997
1998 bfun = btinfo->end;
1999 if (bfun == NULL)
2000 error (_("No trace."));
2001
2002 it->btinfo = btinfo;
2003 it->function = NULL;
2004 }
2005
2006 /* See btrace.h. */
2007
2008 unsigned int
2009 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2010 {
2011 const struct btrace_function *bfun;
2012 unsigned int steps;
2013
2014 bfun = it->function;
2015 steps = 0;
2016 while (bfun != NULL)
2017 {
2018 const struct btrace_function *next;
2019 unsigned int insns;
2020
2021 next = bfun->flow.next;
2022 if (next == NULL)
2023 {
2024 /* Ignore the last function if it only contains a single
2025 (i.e. the current) instruction. */
2026 insns = VEC_length (btrace_insn_s, bfun->insn);
2027 if (insns == 1)
2028 steps -= 1;
2029 }
2030
2031 if (stride == steps)
2032 break;
2033
2034 bfun = next;
2035 steps += 1;
2036 }
2037
2038 it->function = bfun;
2039 return steps;
2040 }
2041
2042 /* See btrace.h. */
2043
2044 unsigned int
2045 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2046 {
2047 const struct btrace_thread_info *btinfo;
2048 const struct btrace_function *bfun;
2049 unsigned int steps;
2050
2051 bfun = it->function;
2052 steps = 0;
2053
2054 if (bfun == NULL)
2055 {
2056 unsigned int insns;
2057
2058 btinfo = it->btinfo;
2059 bfun = btinfo->end;
2060 if (bfun == NULL)
2061 return 0;
2062
2063 /* Ignore the last function if it only contains a single
2064 (i.e. the current) instruction. */
2065 insns = VEC_length (btrace_insn_s, bfun->insn);
2066 if (insns == 1)
2067 bfun = bfun->flow.prev;
2068
2069 if (bfun == NULL)
2070 return 0;
2071
2072 steps += 1;
2073 }
2074
2075 while (steps < stride)
2076 {
2077 const struct btrace_function *prev;
2078
2079 prev = bfun->flow.prev;
2080 if (prev == NULL)
2081 break;
2082
2083 bfun = prev;
2084 steps += 1;
2085 }
2086
2087 it->function = bfun;
2088 return steps;
2089 }
2090
2091 /* See btrace.h. */
2092
2093 int
2094 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2095 const struct btrace_call_iterator *rhs)
2096 {
2097 unsigned int lnum, rnum;
2098
2099 lnum = btrace_call_number (lhs);
2100 rnum = btrace_call_number (rhs);
2101
2102 return (int) (lnum - rnum);
2103 }
2104
2105 /* See btrace.h. */
2106
2107 int
2108 btrace_find_call_by_number (struct btrace_call_iterator *it,
2109 const struct btrace_thread_info *btinfo,
2110 unsigned int number)
2111 {
2112 const struct btrace_function *bfun;
2113
2114 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2115 {
2116 unsigned int bnum;
2117
2118 bnum = bfun->number;
2119 if (number == bnum)
2120 {
2121 it->btinfo = btinfo;
2122 it->function = bfun;
2123 return 1;
2124 }
2125
2126 /* Functions are ordered and numbered consecutively. We could bail out
2127 earlier. On the other hand, it is very unlikely that we search for
2128 a nonexistent function. */
2129 }
2130
2131 return 0;
2132 }
2133
2134 /* See btrace.h. */
2135
2136 void
2137 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2138 const struct btrace_insn_iterator *begin,
2139 const struct btrace_insn_iterator *end)
2140 {
2141 if (btinfo->insn_history == NULL)
2142 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
2143
2144 btinfo->insn_history->begin = *begin;
2145 btinfo->insn_history->end = *end;
2146 }
2147
2148 /* See btrace.h. */
2149
2150 void
2151 btrace_set_call_history (struct btrace_thread_info *btinfo,
2152 const struct btrace_call_iterator *begin,
2153 const struct btrace_call_iterator *end)
2154 {
2155 gdb_assert (begin->btinfo == end->btinfo);
2156
2157 if (btinfo->call_history == NULL)
2158 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
2159
2160 btinfo->call_history->begin = *begin;
2161 btinfo->call_history->end = *end;
2162 }
2163
2164 /* See btrace.h. */
2165
2166 int
2167 btrace_is_replaying (struct thread_info *tp)
2168 {
2169 return tp->btrace.replay != NULL;
2170 }
2171
2172 /* See btrace.h. */
2173
2174 int
2175 btrace_is_empty (struct thread_info *tp)
2176 {
2177 struct btrace_insn_iterator begin, end;
2178 struct btrace_thread_info *btinfo;
2179
2180 btinfo = &tp->btrace;
2181
2182 if (btinfo->begin == NULL)
2183 return 1;
2184
2185 btrace_insn_begin (&begin, btinfo);
2186 btrace_insn_end (&end, btinfo);
2187
2188 return btrace_insn_cmp (&begin, &end) == 0;
2189 }
2190
2191 /* Forward the cleanup request. */
2192
2193 static void
2194 do_btrace_data_cleanup (void *arg)
2195 {
2196 btrace_data_fini (arg);
2197 }
2198
2199 /* See btrace.h. */
2200
2201 struct cleanup *
2202 make_cleanup_btrace_data (struct btrace_data *data)
2203 {
2204 return make_cleanup (do_btrace_data_cleanup, data);
2205 }
This page took 0.109491 seconds and 4 git commands to generate.