Automatic date update in version.in
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 static void btrace_add_pc (struct thread_info *tp);
53
54 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57 #define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
68 /* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71 static const char *
72 ftrace_print_function_name (const struct btrace_function *bfun)
73 {
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
84 return MSYMBOL_PRINT_NAME (msym);
85
86 return "<unknown>";
87 }
88
89 /* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_filename (const struct btrace_function *bfun)
94 {
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
102 else
103 filename = "<unknown>";
104
105 return filename;
106 }
107
108 /* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
110
111 static const char *
112 ftrace_print_insn_addr (const struct btrace_insn *insn)
113 {
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
118 }
119
120 /* Print an ftrace debug status message. */
121
122 static void
123 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
124 {
125 const char *fun, *file;
126 unsigned int ibegin, iend;
127 int level;
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
138 }
139
140 /* Return non-zero if BFUN does not match MFUN and FUN,
141 return zero otherwise. */
142
143 static int
144 ftrace_function_switched (const struct btrace_function *bfun,
145 const struct minimal_symbol *mfun,
146 const struct symbol *fun)
147 {
148 struct minimal_symbol *msym;
149 struct symbol *sym;
150
151 msym = bfun->msym;
152 sym = bfun->sym;
153
154 /* If the minimal symbol changed, we certainly switched functions. */
155 if (mfun != NULL && msym != NULL
156 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
157 return 1;
158
159 /* If the symbol changed, we certainly switched functions. */
160 if (fun != NULL && sym != NULL)
161 {
162 const char *bfname, *fname;
163
164 /* Check the function name. */
165 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
166 return 1;
167
168 /* Check the location of those functions, as well. */
169 bfname = symtab_to_fullname (symbol_symtab (sym));
170 fname = symtab_to_fullname (symbol_symtab (fun));
171 if (filename_cmp (fname, bfname) != 0)
172 return 1;
173 }
174
175 /* If we lost symbol information, we switched functions. */
176 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
177 return 1;
178
179 /* If we gained symbol information, we switched functions. */
180 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
181 return 1;
182
183 return 0;
184 }
185
186 /* Allocate and initialize a new branch trace function segment.
187 PREV is the chronologically preceding function segment.
188 MFUN and FUN are the symbol information we have for this function. */
189
190 static struct btrace_function *
191 ftrace_new_function (struct btrace_function *prev,
192 struct minimal_symbol *mfun,
193 struct symbol *fun)
194 {
195 struct btrace_function *bfun;
196
197 bfun = XCNEW (struct btrace_function);
198
199 bfun->msym = mfun;
200 bfun->sym = fun;
201 bfun->flow.prev = prev;
202
203 if (prev == NULL)
204 {
205 /* Start counting at one. */
206 bfun->number = 1;
207 bfun->insn_offset = 1;
208 }
209 else
210 {
211 gdb_assert (prev->flow.next == NULL);
212 prev->flow.next = bfun;
213
214 bfun->number = prev->number + 1;
215 bfun->insn_offset = (prev->insn_offset
216 + VEC_length (btrace_insn_s, prev->insn));
217 bfun->level = prev->level;
218 }
219
220 return bfun;
221 }
222
223 /* Update the UP field of a function segment. */
224
225 static void
226 ftrace_update_caller (struct btrace_function *bfun,
227 struct btrace_function *caller,
228 enum btrace_function_flag flags)
229 {
230 if (bfun->up != NULL)
231 ftrace_debug (bfun, "updating caller");
232
233 bfun->up = caller;
234 bfun->flags = flags;
235
236 ftrace_debug (bfun, "set caller");
237 }
238
239 /* Fix up the caller for all segments of a function. */
240
241 static void
242 ftrace_fixup_caller (struct btrace_function *bfun,
243 struct btrace_function *caller,
244 enum btrace_function_flag flags)
245 {
246 struct btrace_function *prev, *next;
247
248 ftrace_update_caller (bfun, caller, flags);
249
250 /* Update all function segments belonging to the same function. */
251 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
252 ftrace_update_caller (prev, caller, flags);
253
254 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
255 ftrace_update_caller (next, caller, flags);
256 }
257
258 /* Add a new function segment for a call.
259 CALLER is the chronologically preceding function segment.
260 MFUN and FUN are the symbol information we have for this function. */
261
262 static struct btrace_function *
263 ftrace_new_call (struct btrace_function *caller,
264 struct minimal_symbol *mfun,
265 struct symbol *fun)
266 {
267 struct btrace_function *bfun;
268
269 bfun = ftrace_new_function (caller, mfun, fun);
270 bfun->up = caller;
271 bfun->level += 1;
272
273 ftrace_debug (bfun, "new call");
274
275 return bfun;
276 }
277
278 /* Add a new function segment for a tail call.
279 CALLER is the chronologically preceding function segment.
280 MFUN and FUN are the symbol information we have for this function. */
281
282 static struct btrace_function *
283 ftrace_new_tailcall (struct btrace_function *caller,
284 struct minimal_symbol *mfun,
285 struct symbol *fun)
286 {
287 struct btrace_function *bfun;
288
289 bfun = ftrace_new_function (caller, mfun, fun);
290 bfun->up = caller;
291 bfun->level += 1;
292 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
293
294 ftrace_debug (bfun, "new tail call");
295
296 return bfun;
297 }
298
299 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
300 symbol information. */
301
302 static struct btrace_function *
303 ftrace_find_caller (struct btrace_function *bfun,
304 struct minimal_symbol *mfun,
305 struct symbol *fun)
306 {
307 for (; bfun != NULL; bfun = bfun->up)
308 {
309 /* Skip functions with incompatible symbol information. */
310 if (ftrace_function_switched (bfun, mfun, fun))
311 continue;
312
313 /* This is the function segment we're looking for. */
314 break;
315 }
316
317 return bfun;
318 }
319
320 /* Find the innermost caller in the back trace of BFUN, skipping all
321 function segments that do not end with a call instruction (e.g.
322 tail calls ending with a jump). */
323
324 static struct btrace_function *
325 ftrace_find_call (struct btrace_function *bfun)
326 {
327 for (; bfun != NULL; bfun = bfun->up)
328 {
329 struct btrace_insn *last;
330
331 /* Skip gaps. */
332 if (bfun->errcode != 0)
333 continue;
334
335 last = VEC_last (btrace_insn_s, bfun->insn);
336
337 if (last->iclass == BTRACE_INSN_CALL)
338 break;
339 }
340
341 return bfun;
342 }
343
344 /* Add a continuation segment for a function into which we return.
345 PREV is the chronologically preceding function segment.
346 MFUN and FUN are the symbol information we have for this function. */
347
348 static struct btrace_function *
349 ftrace_new_return (struct btrace_function *prev,
350 struct minimal_symbol *mfun,
351 struct symbol *fun)
352 {
353 struct btrace_function *bfun, *caller;
354
355 bfun = ftrace_new_function (prev, mfun, fun);
356
357 /* It is important to start at PREV's caller. Otherwise, we might find
358 PREV itself, if PREV is a recursive function. */
359 caller = ftrace_find_caller (prev->up, mfun, fun);
360 if (caller != NULL)
361 {
362 /* The caller of PREV is the preceding btrace function segment in this
363 function instance. */
364 gdb_assert (caller->segment.next == NULL);
365
366 caller->segment.next = bfun;
367 bfun->segment.prev = caller;
368
369 /* Maintain the function level. */
370 bfun->level = caller->level;
371
372 /* Maintain the call stack. */
373 bfun->up = caller->up;
374 bfun->flags = caller->flags;
375
376 ftrace_debug (bfun, "new return");
377 }
378 else
379 {
380 /* We did not find a caller. This could mean that something went
381 wrong or that the call is simply not included in the trace. */
382
383 /* Let's search for some actual call. */
384 caller = ftrace_find_call (prev->up);
385 if (caller == NULL)
386 {
387 /* There is no call in PREV's back trace. We assume that the
388 branch trace did not include it. */
389
390 /* Let's find the topmost call function - this skips tail calls. */
391 while (prev->up != NULL)
392 prev = prev->up;
393
394 /* We maintain levels for a series of returns for which we have
395 not seen the calls.
396 We start at the preceding function's level in case this has
397 already been a return for which we have not seen the call.
398 We start at level 0 otherwise, to handle tail calls correctly. */
399 bfun->level = std::min (0, prev->level) - 1;
400
401 /* Fix up the call stack for PREV. */
402 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
403
404 ftrace_debug (bfun, "new return - no caller");
405 }
406 else
407 {
408 /* There is a call in PREV's back trace to which we should have
409 returned. Let's remain at this level. */
410 bfun->level = prev->level;
411
412 ftrace_debug (bfun, "new return - unknown caller");
413 }
414 }
415
416 return bfun;
417 }
418
419 /* Add a new function segment for a function switch.
420 PREV is the chronologically preceding function segment.
421 MFUN and FUN are the symbol information we have for this function. */
422
423 static struct btrace_function *
424 ftrace_new_switch (struct btrace_function *prev,
425 struct minimal_symbol *mfun,
426 struct symbol *fun)
427 {
428 struct btrace_function *bfun;
429
430 /* This is an unexplained function switch. The call stack will likely
431 be wrong at this point. */
432 bfun = ftrace_new_function (prev, mfun, fun);
433
434 ftrace_debug (bfun, "new switch");
435
436 return bfun;
437 }
438
439 /* Add a new function segment for a gap in the trace due to a decode error.
440 PREV is the chronologically preceding function segment.
441 ERRCODE is the format-specific error code. */
442
443 static struct btrace_function *
444 ftrace_new_gap (struct btrace_function *prev, int errcode)
445 {
446 struct btrace_function *bfun;
447
448 /* We hijack prev if it was empty. */
449 if (prev != NULL && prev->errcode == 0
450 && VEC_empty (btrace_insn_s, prev->insn))
451 bfun = prev;
452 else
453 bfun = ftrace_new_function (prev, NULL, NULL);
454
455 bfun->errcode = errcode;
456
457 ftrace_debug (bfun, "new gap");
458
459 return bfun;
460 }
461
462 /* Update BFUN with respect to the instruction at PC. This may create new
463 function segments.
464 Return the chronologically latest function segment, never NULL. */
465
466 static struct btrace_function *
467 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
468 {
469 struct bound_minimal_symbol bmfun;
470 struct minimal_symbol *mfun;
471 struct symbol *fun;
472 struct btrace_insn *last;
473
474 /* Try to determine the function we're in. We use both types of symbols
475 to avoid surprises when we sometimes get a full symbol and sometimes
476 only a minimal symbol. */
477 fun = find_pc_function (pc);
478 bmfun = lookup_minimal_symbol_by_pc (pc);
479 mfun = bmfun.minsym;
480
481 if (fun == NULL && mfun == NULL)
482 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
483
484 /* If we didn't have a function or if we had a gap before, we create one. */
485 if (bfun == NULL || bfun->errcode != 0)
486 return ftrace_new_function (bfun, mfun, fun);
487
488 /* Check the last instruction, if we have one.
489 We do this check first, since it allows us to fill in the call stack
490 links in addition to the normal flow links. */
491 last = NULL;
492 if (!VEC_empty (btrace_insn_s, bfun->insn))
493 last = VEC_last (btrace_insn_s, bfun->insn);
494
495 if (last != NULL)
496 {
497 switch (last->iclass)
498 {
499 case BTRACE_INSN_RETURN:
500 {
501 const char *fname;
502
503 /* On some systems, _dl_runtime_resolve returns to the resolved
504 function instead of jumping to it. From our perspective,
505 however, this is a tailcall.
506 If we treated it as return, we wouldn't be able to find the
507 resolved function in our stack back trace. Hence, we would
508 lose the current stack back trace and start anew with an empty
509 back trace. When the resolved function returns, we would then
510 create a stack back trace with the same function names but
511 different frame id's. This will confuse stepping. */
512 fname = ftrace_print_function_name (bfun);
513 if (strcmp (fname, "_dl_runtime_resolve") == 0)
514 return ftrace_new_tailcall (bfun, mfun, fun);
515
516 return ftrace_new_return (bfun, mfun, fun);
517 }
518
519 case BTRACE_INSN_CALL:
520 /* Ignore calls to the next instruction. They are used for PIC. */
521 if (last->pc + last->size == pc)
522 break;
523
524 return ftrace_new_call (bfun, mfun, fun);
525
526 case BTRACE_INSN_JUMP:
527 {
528 CORE_ADDR start;
529
530 start = get_pc_function_start (pc);
531
532 /* If we can't determine the function for PC, we treat a jump at
533 the end of the block as tail call. */
534 if (start == 0 || start == pc)
535 return ftrace_new_tailcall (bfun, mfun, fun);
536 }
537 }
538 }
539
540 /* Check if we're switching functions for some other reason. */
541 if (ftrace_function_switched (bfun, mfun, fun))
542 {
543 DEBUG_FTRACE ("switching from %s in %s at %s",
544 ftrace_print_insn_addr (last),
545 ftrace_print_function_name (bfun),
546 ftrace_print_filename (bfun));
547
548 return ftrace_new_switch (bfun, mfun, fun);
549 }
550
551 return bfun;
552 }
553
554 /* Add the instruction at PC to BFUN's instructions. */
555
556 static void
557 ftrace_update_insns (struct btrace_function *bfun,
558 const struct btrace_insn *insn)
559 {
560 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
561
562 if (record_debug > 1)
563 ftrace_debug (bfun, "update insn");
564 }
565
566 /* Classify the instruction at PC. */
567
568 static enum btrace_insn_class
569 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
570 {
571 enum btrace_insn_class iclass;
572
573 iclass = BTRACE_INSN_OTHER;
574 TRY
575 {
576 if (gdbarch_insn_is_call (gdbarch, pc))
577 iclass = BTRACE_INSN_CALL;
578 else if (gdbarch_insn_is_ret (gdbarch, pc))
579 iclass = BTRACE_INSN_RETURN;
580 else if (gdbarch_insn_is_jump (gdbarch, pc))
581 iclass = BTRACE_INSN_JUMP;
582 }
583 CATCH (error, RETURN_MASK_ERROR)
584 {
585 }
586 END_CATCH
587
588 return iclass;
589 }
590
591 /* Compute the function branch trace from BTS trace. */
592
593 static void
594 btrace_compute_ftrace_bts (struct thread_info *tp,
595 const struct btrace_data_bts *btrace)
596 {
597 struct btrace_thread_info *btinfo;
598 struct btrace_function *begin, *end;
599 struct gdbarch *gdbarch;
600 unsigned int blk, ngaps;
601 int level;
602
603 gdbarch = target_gdbarch ();
604 btinfo = &tp->btrace;
605 begin = btinfo->begin;
606 end = btinfo->end;
607 ngaps = btinfo->ngaps;
608 level = begin != NULL ? -btinfo->level : INT_MAX;
609 blk = VEC_length (btrace_block_s, btrace->blocks);
610
611 while (blk != 0)
612 {
613 btrace_block_s *block;
614 CORE_ADDR pc;
615
616 blk -= 1;
617
618 block = VEC_index (btrace_block_s, btrace->blocks, blk);
619 pc = block->begin;
620
621 for (;;)
622 {
623 struct btrace_insn insn;
624 int size;
625
626 /* We should hit the end of the block. Warn if we went too far. */
627 if (block->end < pc)
628 {
629 /* Indicate the gap in the trace - unless we're at the
630 beginning. */
631 if (begin != NULL)
632 {
633 warning (_("Recorded trace may be corrupted around %s."),
634 core_addr_to_string_nz (pc));
635
636 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
637 ngaps += 1;
638 }
639 break;
640 }
641
642 end = ftrace_update_function (end, pc);
643 if (begin == NULL)
644 begin = end;
645
646 /* Maintain the function level offset.
647 For all but the last block, we do it here. */
648 if (blk != 0)
649 level = std::min (level, end->level);
650
651 size = 0;
652 TRY
653 {
654 size = gdb_insn_length (gdbarch, pc);
655 }
656 CATCH (error, RETURN_MASK_ERROR)
657 {
658 }
659 END_CATCH
660
661 insn.pc = pc;
662 insn.size = size;
663 insn.iclass = ftrace_classify_insn (gdbarch, pc);
664 insn.flags = 0;
665
666 ftrace_update_insns (end, &insn);
667
668 /* We're done once we pushed the instruction at the end. */
669 if (block->end == pc)
670 break;
671
672 /* We can't continue if we fail to compute the size. */
673 if (size <= 0)
674 {
675 warning (_("Recorded trace may be incomplete around %s."),
676 core_addr_to_string_nz (pc));
677
678 /* Indicate the gap in the trace. We just added INSN so we're
679 not at the beginning. */
680 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
681 ngaps += 1;
682
683 break;
684 }
685
686 pc += size;
687
688 /* Maintain the function level offset.
689 For the last block, we do it here to not consider the last
690 instruction.
691 Since the last instruction corresponds to the current instruction
692 and is not really part of the execution history, it shouldn't
693 affect the level. */
694 if (blk == 0)
695 level = std::min (level, end->level);
696 }
697 }
698
699 btinfo->begin = begin;
700 btinfo->end = end;
701 btinfo->ngaps = ngaps;
702
703 /* LEVEL is the minimal function level of all btrace function segments.
704 Define the global level offset to -LEVEL so all function levels are
705 normalized to start at zero. */
706 btinfo->level = -level;
707 }
708
709 #if defined (HAVE_LIBIPT)
710
711 static enum btrace_insn_class
712 pt_reclassify_insn (enum pt_insn_class iclass)
713 {
714 switch (iclass)
715 {
716 case ptic_call:
717 return BTRACE_INSN_CALL;
718
719 case ptic_return:
720 return BTRACE_INSN_RETURN;
721
722 case ptic_jump:
723 return BTRACE_INSN_JUMP;
724
725 default:
726 return BTRACE_INSN_OTHER;
727 }
728 }
729
730 /* Return the btrace instruction flags for INSN. */
731
732 static btrace_insn_flags
733 pt_btrace_insn_flags (const struct pt_insn *insn)
734 {
735 btrace_insn_flags flags = 0;
736
737 if (insn->speculative)
738 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
739
740 return flags;
741 }
742
743 /* Add function branch trace using DECODER. */
744
745 static void
746 ftrace_add_pt (struct pt_insn_decoder *decoder,
747 struct btrace_function **pbegin,
748 struct btrace_function **pend, int *plevel,
749 unsigned int *ngaps)
750 {
751 struct btrace_function *begin, *end, *upd;
752 uint64_t offset;
753 int errcode, nerrors;
754
755 begin = *pbegin;
756 end = *pend;
757 nerrors = 0;
758 for (;;)
759 {
760 struct btrace_insn btinsn;
761 struct pt_insn insn;
762
763 errcode = pt_insn_sync_forward (decoder);
764 if (errcode < 0)
765 {
766 if (errcode != -pte_eos)
767 warning (_("Failed to synchronize onto the Intel Processor "
768 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
769 break;
770 }
771
772 memset (&btinsn, 0, sizeof (btinsn));
773 for (;;)
774 {
775 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
776 if (errcode < 0)
777 break;
778
779 /* Look for gaps in the trace - unless we're at the beginning. */
780 if (begin != NULL)
781 {
782 /* Tracing is disabled and re-enabled each time we enter the
783 kernel. Most times, we continue from the same instruction we
784 stopped before. This is indicated via the RESUMED instruction
785 flag. The ENABLED instruction flag means that we continued
786 from some other instruction. Indicate this as a trace gap. */
787 if (insn.enabled)
788 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
789
790 /* Indicate trace overflows. */
791 if (insn.resynced)
792 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
793 }
794
795 upd = ftrace_update_function (end, insn.ip);
796 if (upd != end)
797 {
798 *pend = end = upd;
799
800 if (begin == NULL)
801 *pbegin = begin = upd;
802 }
803
804 /* Maintain the function level offset. */
805 *plevel = std::min (*plevel, end->level);
806
807 btinsn.pc = (CORE_ADDR) insn.ip;
808 btinsn.size = (gdb_byte) insn.size;
809 btinsn.iclass = pt_reclassify_insn (insn.iclass);
810 btinsn.flags = pt_btrace_insn_flags (&insn);
811
812 ftrace_update_insns (end, &btinsn);
813 }
814
815 if (errcode == -pte_eos)
816 break;
817
818 /* If the gap is at the very beginning, we ignore it - we will have
819 less trace, but we won't have any holes in the trace. */
820 if (begin == NULL)
821 continue;
822
823 pt_insn_get_offset (decoder, &offset);
824
825 warning (_("Failed to decode Intel Processor Trace near trace "
826 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
827 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
828
829 /* Indicate the gap in the trace. */
830 *pend = end = ftrace_new_gap (end, errcode);
831 *ngaps += 1;
832 }
833
834 if (nerrors > 0)
835 warning (_("The recorded execution trace may have gaps."));
836 }
837
838 /* A callback function to allow the trace decoder to read the inferior's
839 memory. */
840
841 static int
842 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
843 const struct pt_asid *asid, uint64_t pc,
844 void *context)
845 {
846 int result, errcode;
847
848 result = (int) size;
849 TRY
850 {
851 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
852 if (errcode != 0)
853 result = -pte_nomap;
854 }
855 CATCH (error, RETURN_MASK_ERROR)
856 {
857 result = -pte_nomap;
858 }
859 END_CATCH
860
861 return result;
862 }
863
864 /* Translate the vendor from one enum to another. */
865
866 static enum pt_cpu_vendor
867 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
868 {
869 switch (vendor)
870 {
871 default:
872 return pcv_unknown;
873
874 case CV_INTEL:
875 return pcv_intel;
876 }
877 }
878
879 /* Finalize the function branch trace after decode. */
880
881 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
882 struct thread_info *tp, int level)
883 {
884 pt_insn_free_decoder (decoder);
885
886 /* LEVEL is the minimal function level of all btrace function segments.
887 Define the global level offset to -LEVEL so all function levels are
888 normalized to start at zero. */
889 tp->btrace.level = -level;
890
891 /* Add a single last instruction entry for the current PC.
892 This allows us to compute the backtrace at the current PC using both
893 standard unwind and btrace unwind.
894 This extra entry is ignored by all record commands. */
895 btrace_add_pc (tp);
896 }
897
898 /* Compute the function branch trace from Intel Processor Trace
899 format. */
900
901 static void
902 btrace_compute_ftrace_pt (struct thread_info *tp,
903 const struct btrace_data_pt *btrace)
904 {
905 struct btrace_thread_info *btinfo;
906 struct pt_insn_decoder *decoder;
907 struct pt_config config;
908 int level, errcode;
909
910 if (btrace->size == 0)
911 return;
912
913 btinfo = &tp->btrace;
914 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
915
916 pt_config_init(&config);
917 config.begin = btrace->data;
918 config.end = btrace->data + btrace->size;
919
920 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
921 config.cpu.family = btrace->config.cpu.family;
922 config.cpu.model = btrace->config.cpu.model;
923 config.cpu.stepping = btrace->config.cpu.stepping;
924
925 errcode = pt_cpu_errata (&config.errata, &config.cpu);
926 if (errcode < 0)
927 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
928 pt_errstr (pt_errcode (errcode)));
929
930 decoder = pt_insn_alloc_decoder (&config);
931 if (decoder == NULL)
932 error (_("Failed to allocate the Intel Processor Trace decoder."));
933
934 TRY
935 {
936 struct pt_image *image;
937
938 image = pt_insn_get_image(decoder);
939 if (image == NULL)
940 error (_("Failed to configure the Intel Processor Trace decoder."));
941
942 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
943 if (errcode < 0)
944 error (_("Failed to configure the Intel Processor Trace decoder: "
945 "%s."), pt_errstr (pt_errcode (errcode)));
946
947 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
948 &btinfo->ngaps);
949 }
950 CATCH (error, RETURN_MASK_ALL)
951 {
952 /* Indicate a gap in the trace if we quit trace processing. */
953 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
954 {
955 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
956 btinfo->ngaps++;
957 }
958
959 btrace_finalize_ftrace_pt (decoder, tp, level);
960
961 throw_exception (error);
962 }
963 END_CATCH
964
965 btrace_finalize_ftrace_pt (decoder, tp, level);
966 }
967
968 #else /* defined (HAVE_LIBIPT) */
969
970 static void
971 btrace_compute_ftrace_pt (struct thread_info *tp,
972 const struct btrace_data_pt *btrace)
973 {
974 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
975 }
976
977 #endif /* defined (HAVE_LIBIPT) */
978
979 /* Compute the function branch trace from a block branch trace BTRACE for
980 a thread given by BTINFO. */
981
982 static void
983 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
984 {
985 DEBUG ("compute ftrace");
986
987 switch (btrace->format)
988 {
989 case BTRACE_FORMAT_NONE:
990 return;
991
992 case BTRACE_FORMAT_BTS:
993 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
994 return;
995
996 case BTRACE_FORMAT_PT:
997 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
998 return;
999 }
1000
1001 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1002 }
1003
1004 /* Add an entry for the current PC. */
1005
1006 static void
1007 btrace_add_pc (struct thread_info *tp)
1008 {
1009 struct btrace_data btrace;
1010 struct btrace_block *block;
1011 struct regcache *regcache;
1012 struct cleanup *cleanup;
1013 CORE_ADDR pc;
1014
1015 regcache = get_thread_regcache (tp->ptid);
1016 pc = regcache_read_pc (regcache);
1017
1018 btrace_data_init (&btrace);
1019 btrace.format = BTRACE_FORMAT_BTS;
1020 btrace.variant.bts.blocks = NULL;
1021
1022 cleanup = make_cleanup_btrace_data (&btrace);
1023
1024 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1025 block->begin = pc;
1026 block->end = pc;
1027
1028 btrace_compute_ftrace (tp, &btrace);
1029
1030 do_cleanups (cleanup);
1031 }
1032
1033 /* See btrace.h. */
1034
1035 void
1036 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1037 {
1038 if (tp->btrace.target != NULL)
1039 return;
1040
1041 #if !defined (HAVE_LIBIPT)
1042 if (conf->format == BTRACE_FORMAT_PT)
1043 error (_("GDB does not support Intel Processor Trace."));
1044 #endif /* !defined (HAVE_LIBIPT) */
1045
1046 if (!target_supports_btrace (conf->format))
1047 error (_("Target does not support branch tracing."));
1048
1049 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1050 target_pid_to_str (tp->ptid));
1051
1052 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1053
1054 /* Add an entry for the current PC so we start tracing from where we
1055 enabled it. */
1056 if (tp->btrace.target != NULL)
1057 btrace_add_pc (tp);
1058 }
1059
1060 /* See btrace.h. */
1061
1062 const struct btrace_config *
1063 btrace_conf (const struct btrace_thread_info *btinfo)
1064 {
1065 if (btinfo->target == NULL)
1066 return NULL;
1067
1068 return target_btrace_conf (btinfo->target);
1069 }
1070
1071 /* See btrace.h. */
1072
1073 void
1074 btrace_disable (struct thread_info *tp)
1075 {
1076 struct btrace_thread_info *btp = &tp->btrace;
1077 int errcode = 0;
1078
1079 if (btp->target == NULL)
1080 return;
1081
1082 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1083 target_pid_to_str (tp->ptid));
1084
1085 target_disable_btrace (btp->target);
1086 btp->target = NULL;
1087
1088 btrace_clear (tp);
1089 }
1090
1091 /* See btrace.h. */
1092
1093 void
1094 btrace_teardown (struct thread_info *tp)
1095 {
1096 struct btrace_thread_info *btp = &tp->btrace;
1097 int errcode = 0;
1098
1099 if (btp->target == NULL)
1100 return;
1101
1102 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1103 target_pid_to_str (tp->ptid));
1104
1105 target_teardown_btrace (btp->target);
1106 btp->target = NULL;
1107
1108 btrace_clear (tp);
1109 }
1110
1111 /* Stitch branch trace in BTS format. */
1112
1113 static int
1114 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1115 {
1116 struct btrace_thread_info *btinfo;
1117 struct btrace_function *last_bfun;
1118 struct btrace_insn *last_insn;
1119 btrace_block_s *first_new_block;
1120
1121 btinfo = &tp->btrace;
1122 last_bfun = btinfo->end;
1123 gdb_assert (last_bfun != NULL);
1124 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1125
1126 /* If the existing trace ends with a gap, we just glue the traces
1127 together. We need to drop the last (i.e. chronologically first) block
1128 of the new trace, though, since we can't fill in the start address.*/
1129 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1130 {
1131 VEC_pop (btrace_block_s, btrace->blocks);
1132 return 0;
1133 }
1134
1135 /* Beware that block trace starts with the most recent block, so the
1136 chronologically first block in the new trace is the last block in
1137 the new trace's block vector. */
1138 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1139 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1140
1141 /* If the current PC at the end of the block is the same as in our current
1142 trace, there are two explanations:
1143 1. we executed the instruction and some branch brought us back.
1144 2. we have not made any progress.
1145 In the first case, the delta trace vector should contain at least two
1146 entries.
1147 In the second case, the delta trace vector should contain exactly one
1148 entry for the partial block containing the current PC. Remove it. */
1149 if (first_new_block->end == last_insn->pc
1150 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1151 {
1152 VEC_pop (btrace_block_s, btrace->blocks);
1153 return 0;
1154 }
1155
1156 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1157 core_addr_to_string_nz (first_new_block->end));
1158
1159 /* Do a simple sanity check to make sure we don't accidentally end up
1160 with a bad block. This should not occur in practice. */
1161 if (first_new_block->end < last_insn->pc)
1162 {
1163 warning (_("Error while trying to read delta trace. Falling back to "
1164 "a full read."));
1165 return -1;
1166 }
1167
1168 /* We adjust the last block to start at the end of our current trace. */
1169 gdb_assert (first_new_block->begin == 0);
1170 first_new_block->begin = last_insn->pc;
1171
1172 /* We simply pop the last insn so we can insert it again as part of
1173 the normal branch trace computation.
1174 Since instruction iterators are based on indices in the instructions
1175 vector, we don't leave any pointers dangling. */
1176 DEBUG ("pruning insn at %s for stitching",
1177 ftrace_print_insn_addr (last_insn));
1178
1179 VEC_pop (btrace_insn_s, last_bfun->insn);
1180
1181 /* The instructions vector may become empty temporarily if this has
1182 been the only instruction in this function segment.
1183 This violates the invariant but will be remedied shortly by
1184 btrace_compute_ftrace when we add the new trace. */
1185
1186 /* The only case where this would hurt is if the entire trace consisted
1187 of just that one instruction. If we remove it, we might turn the now
1188 empty btrace function segment into a gap. But we don't want gaps at
1189 the beginning. To avoid this, we remove the entire old trace. */
1190 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1191 btrace_clear (tp);
1192
1193 return 0;
1194 }
1195
1196 /* Adjust the block trace in order to stitch old and new trace together.
1197 BTRACE is the new delta trace between the last and the current stop.
1198 TP is the traced thread.
1199 May modifx BTRACE as well as the existing trace in TP.
1200 Return 0 on success, -1 otherwise. */
1201
1202 static int
1203 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1204 {
1205 /* If we don't have trace, there's nothing to do. */
1206 if (btrace_data_empty (btrace))
1207 return 0;
1208
1209 switch (btrace->format)
1210 {
1211 case BTRACE_FORMAT_NONE:
1212 return 0;
1213
1214 case BTRACE_FORMAT_BTS:
1215 return btrace_stitch_bts (&btrace->variant.bts, tp);
1216
1217 case BTRACE_FORMAT_PT:
1218 /* Delta reads are not supported. */
1219 return -1;
1220 }
1221
1222 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1223 }
1224
1225 /* Clear the branch trace histories in BTINFO. */
1226
1227 static void
1228 btrace_clear_history (struct btrace_thread_info *btinfo)
1229 {
1230 xfree (btinfo->insn_history);
1231 xfree (btinfo->call_history);
1232 xfree (btinfo->replay);
1233
1234 btinfo->insn_history = NULL;
1235 btinfo->call_history = NULL;
1236 btinfo->replay = NULL;
1237 }
1238
1239 /* Clear the branch trace maintenance histories in BTINFO. */
1240
1241 static void
1242 btrace_maint_clear (struct btrace_thread_info *btinfo)
1243 {
1244 switch (btinfo->data.format)
1245 {
1246 default:
1247 break;
1248
1249 case BTRACE_FORMAT_BTS:
1250 btinfo->maint.variant.bts.packet_history.begin = 0;
1251 btinfo->maint.variant.bts.packet_history.end = 0;
1252 break;
1253
1254 #if defined (HAVE_LIBIPT)
1255 case BTRACE_FORMAT_PT:
1256 xfree (btinfo->maint.variant.pt.packets);
1257
1258 btinfo->maint.variant.pt.packets = NULL;
1259 btinfo->maint.variant.pt.packet_history.begin = 0;
1260 btinfo->maint.variant.pt.packet_history.end = 0;
1261 break;
1262 #endif /* defined (HAVE_LIBIPT) */
1263 }
1264 }
1265
1266 /* See btrace.h. */
1267
1268 void
1269 btrace_fetch (struct thread_info *tp)
1270 {
1271 struct btrace_thread_info *btinfo;
1272 struct btrace_target_info *tinfo;
1273 struct btrace_data btrace;
1274 struct cleanup *cleanup;
1275 int errcode;
1276
1277 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1278 target_pid_to_str (tp->ptid));
1279
1280 btinfo = &tp->btrace;
1281 tinfo = btinfo->target;
1282 if (tinfo == NULL)
1283 return;
1284
1285 /* There's no way we could get new trace while replaying.
1286 On the other hand, delta trace would return a partial record with the
1287 current PC, which is the replay PC, not the last PC, as expected. */
1288 if (btinfo->replay != NULL)
1289 return;
1290
1291 btrace_data_init (&btrace);
1292 cleanup = make_cleanup_btrace_data (&btrace);
1293
1294 /* Let's first try to extend the trace we already have. */
1295 if (btinfo->end != NULL)
1296 {
1297 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1298 if (errcode == 0)
1299 {
1300 /* Success. Let's try to stitch the traces together. */
1301 errcode = btrace_stitch_trace (&btrace, tp);
1302 }
1303 else
1304 {
1305 /* We failed to read delta trace. Let's try to read new trace. */
1306 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1307
1308 /* If we got any new trace, discard what we have. */
1309 if (errcode == 0 && !btrace_data_empty (&btrace))
1310 btrace_clear (tp);
1311 }
1312
1313 /* If we were not able to read the trace, we start over. */
1314 if (errcode != 0)
1315 {
1316 btrace_clear (tp);
1317 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1318 }
1319 }
1320 else
1321 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1322
1323 /* If we were not able to read the branch trace, signal an error. */
1324 if (errcode != 0)
1325 error (_("Failed to read branch trace."));
1326
1327 /* Compute the trace, provided we have any. */
1328 if (!btrace_data_empty (&btrace))
1329 {
1330 /* Store the raw trace data. The stored data will be cleared in
1331 btrace_clear, so we always append the new trace. */
1332 btrace_data_append (&btinfo->data, &btrace);
1333 btrace_maint_clear (btinfo);
1334
1335 btrace_clear_history (btinfo);
1336 btrace_compute_ftrace (tp, &btrace);
1337 }
1338
1339 do_cleanups (cleanup);
1340 }
1341
1342 /* See btrace.h. */
1343
1344 void
1345 btrace_clear (struct thread_info *tp)
1346 {
1347 struct btrace_thread_info *btinfo;
1348 struct btrace_function *it, *trash;
1349
1350 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1351 target_pid_to_str (tp->ptid));
1352
1353 /* Make sure btrace frames that may hold a pointer into the branch
1354 trace data are destroyed. */
1355 reinit_frame_cache ();
1356
1357 btinfo = &tp->btrace;
1358
1359 it = btinfo->begin;
1360 while (it != NULL)
1361 {
1362 trash = it;
1363 it = it->flow.next;
1364
1365 xfree (trash);
1366 }
1367
1368 btinfo->begin = NULL;
1369 btinfo->end = NULL;
1370 btinfo->ngaps = 0;
1371
1372 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1373 btrace_maint_clear (btinfo);
1374 btrace_data_clear (&btinfo->data);
1375 btrace_clear_history (btinfo);
1376 }
1377
1378 /* See btrace.h. */
1379
1380 void
1381 btrace_free_objfile (struct objfile *objfile)
1382 {
1383 struct thread_info *tp;
1384
1385 DEBUG ("free objfile");
1386
1387 ALL_NON_EXITED_THREADS (tp)
1388 btrace_clear (tp);
1389 }
1390
1391 #if defined (HAVE_LIBEXPAT)
1392
1393 /* Check the btrace document version. */
1394
1395 static void
1396 check_xml_btrace_version (struct gdb_xml_parser *parser,
1397 const struct gdb_xml_element *element,
1398 void *user_data, VEC (gdb_xml_value_s) *attributes)
1399 {
1400 const char *version
1401 = (const char *) xml_find_attribute (attributes, "version")->value;
1402
1403 if (strcmp (version, "1.0") != 0)
1404 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1405 }
1406
1407 /* Parse a btrace "block" xml record. */
1408
1409 static void
1410 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1411 const struct gdb_xml_element *element,
1412 void *user_data, VEC (gdb_xml_value_s) *attributes)
1413 {
1414 struct btrace_data *btrace;
1415 struct btrace_block *block;
1416 ULONGEST *begin, *end;
1417
1418 btrace = (struct btrace_data *) user_data;
1419
1420 switch (btrace->format)
1421 {
1422 case BTRACE_FORMAT_BTS:
1423 break;
1424
1425 case BTRACE_FORMAT_NONE:
1426 btrace->format = BTRACE_FORMAT_BTS;
1427 btrace->variant.bts.blocks = NULL;
1428 break;
1429
1430 default:
1431 gdb_xml_error (parser, _("Btrace format error."));
1432 }
1433
1434 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1435 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1436
1437 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1438 block->begin = *begin;
1439 block->end = *end;
1440 }
1441
1442 /* Parse a "raw" xml record. */
1443
1444 static void
1445 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1446 gdb_byte **pdata, size_t *psize)
1447 {
1448 struct cleanup *cleanup;
1449 gdb_byte *data, *bin;
1450 size_t len, size;
1451
1452 len = strlen (body_text);
1453 if (len % 2 != 0)
1454 gdb_xml_error (parser, _("Bad raw data size."));
1455
1456 size = len / 2;
1457
1458 bin = data = (gdb_byte *) xmalloc (size);
1459 cleanup = make_cleanup (xfree, data);
1460
1461 /* We use hex encoding - see common/rsp-low.h. */
1462 while (len > 0)
1463 {
1464 char hi, lo;
1465
1466 hi = *body_text++;
1467 lo = *body_text++;
1468
1469 if (hi == 0 || lo == 0)
1470 gdb_xml_error (parser, _("Bad hex encoding."));
1471
1472 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1473 len -= 2;
1474 }
1475
1476 discard_cleanups (cleanup);
1477
1478 *pdata = data;
1479 *psize = size;
1480 }
1481
1482 /* Parse a btrace pt-config "cpu" xml record. */
1483
1484 static void
1485 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1486 const struct gdb_xml_element *element,
1487 void *user_data,
1488 VEC (gdb_xml_value_s) *attributes)
1489 {
1490 struct btrace_data *btrace;
1491 const char *vendor;
1492 ULONGEST *family, *model, *stepping;
1493
1494 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1495 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1496 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1497 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1498
1499 btrace = (struct btrace_data *) user_data;
1500
1501 if (strcmp (vendor, "GenuineIntel") == 0)
1502 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1503
1504 btrace->variant.pt.config.cpu.family = *family;
1505 btrace->variant.pt.config.cpu.model = *model;
1506 btrace->variant.pt.config.cpu.stepping = *stepping;
1507 }
1508
1509 /* Parse a btrace pt "raw" xml record. */
1510
1511 static void
1512 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1513 const struct gdb_xml_element *element,
1514 void *user_data, const char *body_text)
1515 {
1516 struct btrace_data *btrace;
1517
1518 btrace = (struct btrace_data *) user_data;
1519 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1520 &btrace->variant.pt.size);
1521 }
1522
1523 /* Parse a btrace "pt" xml record. */
1524
1525 static void
1526 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1527 const struct gdb_xml_element *element,
1528 void *user_data, VEC (gdb_xml_value_s) *attributes)
1529 {
1530 struct btrace_data *btrace;
1531
1532 btrace = (struct btrace_data *) user_data;
1533 btrace->format = BTRACE_FORMAT_PT;
1534 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1535 btrace->variant.pt.data = NULL;
1536 btrace->variant.pt.size = 0;
1537 }
1538
1539 static const struct gdb_xml_attribute block_attributes[] = {
1540 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1541 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1542 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1543 };
1544
1545 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1546 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1547 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1548 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1549 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1550 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1551 };
1552
1553 static const struct gdb_xml_element btrace_pt_config_children[] = {
1554 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1555 parse_xml_btrace_pt_config_cpu, NULL },
1556 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1557 };
1558
1559 static const struct gdb_xml_element btrace_pt_children[] = {
1560 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1561 NULL },
1562 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1563 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1564 };
1565
1566 static const struct gdb_xml_attribute btrace_attributes[] = {
1567 { "version", GDB_XML_AF_NONE, NULL, NULL },
1568 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1569 };
1570
1571 static const struct gdb_xml_element btrace_children[] = {
1572 { "block", block_attributes, NULL,
1573 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1574 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1575 NULL },
1576 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1577 };
1578
1579 static const struct gdb_xml_element btrace_elements[] = {
1580 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1581 check_xml_btrace_version, NULL },
1582 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1583 };
1584
1585 #endif /* defined (HAVE_LIBEXPAT) */
1586
1587 /* See btrace.h. */
1588
1589 void
1590 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1591 {
1592 struct cleanup *cleanup;
1593 int errcode;
1594
1595 #if defined (HAVE_LIBEXPAT)
1596
1597 btrace->format = BTRACE_FORMAT_NONE;
1598
1599 cleanup = make_cleanup_btrace_data (btrace);
1600 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1601 buffer, btrace);
1602 if (errcode != 0)
1603 error (_("Error parsing branch trace."));
1604
1605 /* Keep parse results. */
1606 discard_cleanups (cleanup);
1607
1608 #else /* !defined (HAVE_LIBEXPAT) */
1609
1610 error (_("Cannot process branch trace. XML parsing is not supported."));
1611
1612 #endif /* !defined (HAVE_LIBEXPAT) */
1613 }
1614
1615 #if defined (HAVE_LIBEXPAT)
1616
1617 /* Parse a btrace-conf "bts" xml record. */
1618
1619 static void
1620 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1621 const struct gdb_xml_element *element,
1622 void *user_data, VEC (gdb_xml_value_s) *attributes)
1623 {
1624 struct btrace_config *conf;
1625 struct gdb_xml_value *size;
1626
1627 conf = (struct btrace_config *) user_data;
1628 conf->format = BTRACE_FORMAT_BTS;
1629 conf->bts.size = 0;
1630
1631 size = xml_find_attribute (attributes, "size");
1632 if (size != NULL)
1633 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1634 }
1635
1636 /* Parse a btrace-conf "pt" xml record. */
1637
1638 static void
1639 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1640 const struct gdb_xml_element *element,
1641 void *user_data, VEC (gdb_xml_value_s) *attributes)
1642 {
1643 struct btrace_config *conf;
1644 struct gdb_xml_value *size;
1645
1646 conf = (struct btrace_config *) user_data;
1647 conf->format = BTRACE_FORMAT_PT;
1648 conf->pt.size = 0;
1649
1650 size = xml_find_attribute (attributes, "size");
1651 if (size != NULL)
1652 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1653 }
1654
1655 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1656 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1657 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1658 };
1659
1660 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1661 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1662 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1663 };
1664
1665 static const struct gdb_xml_element btrace_conf_children[] = {
1666 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1667 parse_xml_btrace_conf_bts, NULL },
1668 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1669 parse_xml_btrace_conf_pt, NULL },
1670 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1671 };
1672
1673 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1674 { "version", GDB_XML_AF_NONE, NULL, NULL },
1675 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1676 };
1677
1678 static const struct gdb_xml_element btrace_conf_elements[] = {
1679 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1680 GDB_XML_EF_NONE, NULL, NULL },
1681 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1682 };
1683
1684 #endif /* defined (HAVE_LIBEXPAT) */
1685
1686 /* See btrace.h. */
1687
1688 void
1689 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1690 {
1691 int errcode;
1692
1693 #if defined (HAVE_LIBEXPAT)
1694
1695 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1696 btrace_conf_elements, xml, conf);
1697 if (errcode != 0)
1698 error (_("Error parsing branch trace configuration."));
1699
1700 #else /* !defined (HAVE_LIBEXPAT) */
1701
1702 error (_("XML parsing is not supported."));
1703
1704 #endif /* !defined (HAVE_LIBEXPAT) */
1705 }
1706
1707 /* See btrace.h. */
1708
1709 const struct btrace_insn *
1710 btrace_insn_get (const struct btrace_insn_iterator *it)
1711 {
1712 const struct btrace_function *bfun;
1713 unsigned int index, end;
1714
1715 index = it->index;
1716 bfun = it->function;
1717
1718 /* Check if the iterator points to a gap in the trace. */
1719 if (bfun->errcode != 0)
1720 return NULL;
1721
1722 /* The index is within the bounds of this function's instruction vector. */
1723 end = VEC_length (btrace_insn_s, bfun->insn);
1724 gdb_assert (0 < end);
1725 gdb_assert (index < end);
1726
1727 return VEC_index (btrace_insn_s, bfun->insn, index);
1728 }
1729
1730 /* See btrace.h. */
1731
1732 unsigned int
1733 btrace_insn_number (const struct btrace_insn_iterator *it)
1734 {
1735 const struct btrace_function *bfun;
1736
1737 bfun = it->function;
1738
1739 /* Return zero if the iterator points to a gap in the trace. */
1740 if (bfun->errcode != 0)
1741 return 0;
1742
1743 return bfun->insn_offset + it->index;
1744 }
1745
1746 /* See btrace.h. */
1747
1748 void
1749 btrace_insn_begin (struct btrace_insn_iterator *it,
1750 const struct btrace_thread_info *btinfo)
1751 {
1752 const struct btrace_function *bfun;
1753
1754 bfun = btinfo->begin;
1755 if (bfun == NULL)
1756 error (_("No trace."));
1757
1758 it->function = bfun;
1759 it->index = 0;
1760 }
1761
1762 /* See btrace.h. */
1763
1764 void
1765 btrace_insn_end (struct btrace_insn_iterator *it,
1766 const struct btrace_thread_info *btinfo)
1767 {
1768 const struct btrace_function *bfun;
1769 unsigned int length;
1770
1771 bfun = btinfo->end;
1772 if (bfun == NULL)
1773 error (_("No trace."));
1774
1775 length = VEC_length (btrace_insn_s, bfun->insn);
1776
1777 /* The last function may either be a gap or it contains the current
1778 instruction, which is one past the end of the execution trace; ignore
1779 it. */
1780 if (length > 0)
1781 length -= 1;
1782
1783 it->function = bfun;
1784 it->index = length;
1785 }
1786
1787 /* See btrace.h. */
1788
1789 unsigned int
1790 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1791 {
1792 const struct btrace_function *bfun;
1793 unsigned int index, steps;
1794
1795 bfun = it->function;
1796 steps = 0;
1797 index = it->index;
1798
1799 while (stride != 0)
1800 {
1801 unsigned int end, space, adv;
1802
1803 end = VEC_length (btrace_insn_s, bfun->insn);
1804
1805 /* An empty function segment represents a gap in the trace. We count
1806 it as one instruction. */
1807 if (end == 0)
1808 {
1809 const struct btrace_function *next;
1810
1811 next = bfun->flow.next;
1812 if (next == NULL)
1813 break;
1814
1815 stride -= 1;
1816 steps += 1;
1817
1818 bfun = next;
1819 index = 0;
1820
1821 continue;
1822 }
1823
1824 gdb_assert (0 < end);
1825 gdb_assert (index < end);
1826
1827 /* Compute the number of instructions remaining in this segment. */
1828 space = end - index;
1829
1830 /* Advance the iterator as far as possible within this segment. */
1831 adv = std::min (space, stride);
1832 stride -= adv;
1833 index += adv;
1834 steps += adv;
1835
1836 /* Move to the next function if we're at the end of this one. */
1837 if (index == end)
1838 {
1839 const struct btrace_function *next;
1840
1841 next = bfun->flow.next;
1842 if (next == NULL)
1843 {
1844 /* We stepped past the last function.
1845
1846 Let's adjust the index to point to the last instruction in
1847 the previous function. */
1848 index -= 1;
1849 steps -= 1;
1850 break;
1851 }
1852
1853 /* We now point to the first instruction in the new function. */
1854 bfun = next;
1855 index = 0;
1856 }
1857
1858 /* We did make progress. */
1859 gdb_assert (adv > 0);
1860 }
1861
1862 /* Update the iterator. */
1863 it->function = bfun;
1864 it->index = index;
1865
1866 return steps;
1867 }
1868
1869 /* See btrace.h. */
1870
1871 unsigned int
1872 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1873 {
1874 const struct btrace_function *bfun;
1875 unsigned int index, steps;
1876
1877 bfun = it->function;
1878 steps = 0;
1879 index = it->index;
1880
1881 while (stride != 0)
1882 {
1883 unsigned int adv;
1884
1885 /* Move to the previous function if we're at the start of this one. */
1886 if (index == 0)
1887 {
1888 const struct btrace_function *prev;
1889
1890 prev = bfun->flow.prev;
1891 if (prev == NULL)
1892 break;
1893
1894 /* We point to one after the last instruction in the new function. */
1895 bfun = prev;
1896 index = VEC_length (btrace_insn_s, bfun->insn);
1897
1898 /* An empty function segment represents a gap in the trace. We count
1899 it as one instruction. */
1900 if (index == 0)
1901 {
1902 stride -= 1;
1903 steps += 1;
1904
1905 continue;
1906 }
1907 }
1908
1909 /* Advance the iterator as far as possible within this segment. */
1910 adv = std::min (index, stride);
1911
1912 stride -= adv;
1913 index -= adv;
1914 steps += adv;
1915
1916 /* We did make progress. */
1917 gdb_assert (adv > 0);
1918 }
1919
1920 /* Update the iterator. */
1921 it->function = bfun;
1922 it->index = index;
1923
1924 return steps;
1925 }
1926
1927 /* See btrace.h. */
1928
1929 int
1930 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1931 const struct btrace_insn_iterator *rhs)
1932 {
1933 unsigned int lnum, rnum;
1934
1935 lnum = btrace_insn_number (lhs);
1936 rnum = btrace_insn_number (rhs);
1937
1938 /* A gap has an instruction number of zero. Things are getting more
1939 complicated if gaps are involved.
1940
1941 We take the instruction number offset from the iterator's function.
1942 This is the number of the first instruction after the gap.
1943
1944 This is OK as long as both lhs and rhs point to gaps. If only one of
1945 them does, we need to adjust the number based on the other's regular
1946 instruction number. Otherwise, a gap might compare equal to an
1947 instruction. */
1948
1949 if (lnum == 0 && rnum == 0)
1950 {
1951 lnum = lhs->function->insn_offset;
1952 rnum = rhs->function->insn_offset;
1953 }
1954 else if (lnum == 0)
1955 {
1956 lnum = lhs->function->insn_offset;
1957
1958 if (lnum == rnum)
1959 lnum -= 1;
1960 }
1961 else if (rnum == 0)
1962 {
1963 rnum = rhs->function->insn_offset;
1964
1965 if (rnum == lnum)
1966 rnum -= 1;
1967 }
1968
1969 return (int) (lnum - rnum);
1970 }
1971
1972 /* See btrace.h. */
1973
1974 int
1975 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1976 const struct btrace_thread_info *btinfo,
1977 unsigned int number)
1978 {
1979 const struct btrace_function *bfun;
1980 unsigned int end, length;
1981
1982 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1983 {
1984 /* Skip gaps. */
1985 if (bfun->errcode != 0)
1986 continue;
1987
1988 if (bfun->insn_offset <= number)
1989 break;
1990 }
1991
1992 if (bfun == NULL)
1993 return 0;
1994
1995 length = VEC_length (btrace_insn_s, bfun->insn);
1996 gdb_assert (length > 0);
1997
1998 end = bfun->insn_offset + length;
1999 if (end <= number)
2000 return 0;
2001
2002 it->function = bfun;
2003 it->index = number - bfun->insn_offset;
2004
2005 return 1;
2006 }
2007
2008 /* See btrace.h. */
2009
2010 const struct btrace_function *
2011 btrace_call_get (const struct btrace_call_iterator *it)
2012 {
2013 return it->function;
2014 }
2015
2016 /* See btrace.h. */
2017
2018 unsigned int
2019 btrace_call_number (const struct btrace_call_iterator *it)
2020 {
2021 const struct btrace_thread_info *btinfo;
2022 const struct btrace_function *bfun;
2023 unsigned int insns;
2024
2025 btinfo = it->btinfo;
2026 bfun = it->function;
2027 if (bfun != NULL)
2028 return bfun->number;
2029
2030 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2031 number of the last function. */
2032 bfun = btinfo->end;
2033 insns = VEC_length (btrace_insn_s, bfun->insn);
2034
2035 /* If the function contains only a single instruction (i.e. the current
2036 instruction), it will be skipped and its number is already the number
2037 we seek. */
2038 if (insns == 1)
2039 return bfun->number;
2040
2041 /* Otherwise, return one more than the number of the last function. */
2042 return bfun->number + 1;
2043 }
2044
2045 /* See btrace.h. */
2046
2047 void
2048 btrace_call_begin (struct btrace_call_iterator *it,
2049 const struct btrace_thread_info *btinfo)
2050 {
2051 const struct btrace_function *bfun;
2052
2053 bfun = btinfo->begin;
2054 if (bfun == NULL)
2055 error (_("No trace."));
2056
2057 it->btinfo = btinfo;
2058 it->function = bfun;
2059 }
2060
2061 /* See btrace.h. */
2062
2063 void
2064 btrace_call_end (struct btrace_call_iterator *it,
2065 const struct btrace_thread_info *btinfo)
2066 {
2067 const struct btrace_function *bfun;
2068
2069 bfun = btinfo->end;
2070 if (bfun == NULL)
2071 error (_("No trace."));
2072
2073 it->btinfo = btinfo;
2074 it->function = NULL;
2075 }
2076
2077 /* See btrace.h. */
2078
2079 unsigned int
2080 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2081 {
2082 const struct btrace_function *bfun;
2083 unsigned int steps;
2084
2085 bfun = it->function;
2086 steps = 0;
2087 while (bfun != NULL)
2088 {
2089 const struct btrace_function *next;
2090 unsigned int insns;
2091
2092 next = bfun->flow.next;
2093 if (next == NULL)
2094 {
2095 /* Ignore the last function if it only contains a single
2096 (i.e. the current) instruction. */
2097 insns = VEC_length (btrace_insn_s, bfun->insn);
2098 if (insns == 1)
2099 steps -= 1;
2100 }
2101
2102 if (stride == steps)
2103 break;
2104
2105 bfun = next;
2106 steps += 1;
2107 }
2108
2109 it->function = bfun;
2110 return steps;
2111 }
2112
2113 /* See btrace.h. */
2114
2115 unsigned int
2116 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2117 {
2118 const struct btrace_thread_info *btinfo;
2119 const struct btrace_function *bfun;
2120 unsigned int steps;
2121
2122 bfun = it->function;
2123 steps = 0;
2124
2125 if (bfun == NULL)
2126 {
2127 unsigned int insns;
2128
2129 btinfo = it->btinfo;
2130 bfun = btinfo->end;
2131 if (bfun == NULL)
2132 return 0;
2133
2134 /* Ignore the last function if it only contains a single
2135 (i.e. the current) instruction. */
2136 insns = VEC_length (btrace_insn_s, bfun->insn);
2137 if (insns == 1)
2138 bfun = bfun->flow.prev;
2139
2140 if (bfun == NULL)
2141 return 0;
2142
2143 steps += 1;
2144 }
2145
2146 while (steps < stride)
2147 {
2148 const struct btrace_function *prev;
2149
2150 prev = bfun->flow.prev;
2151 if (prev == NULL)
2152 break;
2153
2154 bfun = prev;
2155 steps += 1;
2156 }
2157
2158 it->function = bfun;
2159 return steps;
2160 }
2161
2162 /* See btrace.h. */
2163
2164 int
2165 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2166 const struct btrace_call_iterator *rhs)
2167 {
2168 unsigned int lnum, rnum;
2169
2170 lnum = btrace_call_number (lhs);
2171 rnum = btrace_call_number (rhs);
2172
2173 return (int) (lnum - rnum);
2174 }
2175
2176 /* See btrace.h. */
2177
2178 int
2179 btrace_find_call_by_number (struct btrace_call_iterator *it,
2180 const struct btrace_thread_info *btinfo,
2181 unsigned int number)
2182 {
2183 const struct btrace_function *bfun;
2184
2185 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2186 {
2187 unsigned int bnum;
2188
2189 bnum = bfun->number;
2190 if (number == bnum)
2191 {
2192 it->btinfo = btinfo;
2193 it->function = bfun;
2194 return 1;
2195 }
2196
2197 /* Functions are ordered and numbered consecutively. We could bail out
2198 earlier. On the other hand, it is very unlikely that we search for
2199 a nonexistent function. */
2200 }
2201
2202 return 0;
2203 }
2204
2205 /* See btrace.h. */
2206
2207 void
2208 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2209 const struct btrace_insn_iterator *begin,
2210 const struct btrace_insn_iterator *end)
2211 {
2212 if (btinfo->insn_history == NULL)
2213 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2214
2215 btinfo->insn_history->begin = *begin;
2216 btinfo->insn_history->end = *end;
2217 }
2218
2219 /* See btrace.h. */
2220
2221 void
2222 btrace_set_call_history (struct btrace_thread_info *btinfo,
2223 const struct btrace_call_iterator *begin,
2224 const struct btrace_call_iterator *end)
2225 {
2226 gdb_assert (begin->btinfo == end->btinfo);
2227
2228 if (btinfo->call_history == NULL)
2229 btinfo->call_history = XCNEW (struct btrace_call_history);
2230
2231 btinfo->call_history->begin = *begin;
2232 btinfo->call_history->end = *end;
2233 }
2234
2235 /* See btrace.h. */
2236
2237 int
2238 btrace_is_replaying (struct thread_info *tp)
2239 {
2240 return tp->btrace.replay != NULL;
2241 }
2242
2243 /* See btrace.h. */
2244
2245 int
2246 btrace_is_empty (struct thread_info *tp)
2247 {
2248 struct btrace_insn_iterator begin, end;
2249 struct btrace_thread_info *btinfo;
2250
2251 btinfo = &tp->btrace;
2252
2253 if (btinfo->begin == NULL)
2254 return 1;
2255
2256 btrace_insn_begin (&begin, btinfo);
2257 btrace_insn_end (&end, btinfo);
2258
2259 return btrace_insn_cmp (&begin, &end) == 0;
2260 }
2261
2262 /* Forward the cleanup request. */
2263
2264 static void
2265 do_btrace_data_cleanup (void *arg)
2266 {
2267 btrace_data_fini ((struct btrace_data *) arg);
2268 }
2269
2270 /* See btrace.h. */
2271
2272 struct cleanup *
2273 make_cleanup_btrace_data (struct btrace_data *data)
2274 {
2275 return make_cleanup (do_btrace_data_cleanup, data);
2276 }
2277
2278 #if defined (HAVE_LIBIPT)
2279
2280 /* Print a single packet. */
2281
2282 static void
2283 pt_print_packet (const struct pt_packet *packet)
2284 {
2285 switch (packet->type)
2286 {
2287 default:
2288 printf_unfiltered (("[??: %x]"), packet->type);
2289 break;
2290
2291 case ppt_psb:
2292 printf_unfiltered (("psb"));
2293 break;
2294
2295 case ppt_psbend:
2296 printf_unfiltered (("psbend"));
2297 break;
2298
2299 case ppt_pad:
2300 printf_unfiltered (("pad"));
2301 break;
2302
2303 case ppt_tip:
2304 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2305 packet->payload.ip.ipc,
2306 packet->payload.ip.ip);
2307 break;
2308
2309 case ppt_tip_pge:
2310 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2311 packet->payload.ip.ipc,
2312 packet->payload.ip.ip);
2313 break;
2314
2315 case ppt_tip_pgd:
2316 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2317 packet->payload.ip.ipc,
2318 packet->payload.ip.ip);
2319 break;
2320
2321 case ppt_fup:
2322 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2323 packet->payload.ip.ipc,
2324 packet->payload.ip.ip);
2325 break;
2326
2327 case ppt_tnt_8:
2328 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2329 packet->payload.tnt.bit_size,
2330 packet->payload.tnt.payload);
2331 break;
2332
2333 case ppt_tnt_64:
2334 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2335 packet->payload.tnt.bit_size,
2336 packet->payload.tnt.payload);
2337 break;
2338
2339 case ppt_pip:
2340 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2341 packet->payload.pip.nr ? (" nr") : (""));
2342 break;
2343
2344 case ppt_tsc:
2345 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2346 break;
2347
2348 case ppt_cbr:
2349 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2350 break;
2351
2352 case ppt_mode:
2353 switch (packet->payload.mode.leaf)
2354 {
2355 default:
2356 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2357 break;
2358
2359 case pt_mol_exec:
2360 printf_unfiltered (("mode.exec%s%s"),
2361 packet->payload.mode.bits.exec.csl
2362 ? (" cs.l") : (""),
2363 packet->payload.mode.bits.exec.csd
2364 ? (" cs.d") : (""));
2365 break;
2366
2367 case pt_mol_tsx:
2368 printf_unfiltered (("mode.tsx%s%s"),
2369 packet->payload.mode.bits.tsx.intx
2370 ? (" intx") : (""),
2371 packet->payload.mode.bits.tsx.abrt
2372 ? (" abrt") : (""));
2373 break;
2374 }
2375 break;
2376
2377 case ppt_ovf:
2378 printf_unfiltered (("ovf"));
2379 break;
2380
2381 case ppt_stop:
2382 printf_unfiltered (("stop"));
2383 break;
2384
2385 case ppt_vmcs:
2386 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2387 break;
2388
2389 case ppt_tma:
2390 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2391 packet->payload.tma.fc);
2392 break;
2393
2394 case ppt_mtc:
2395 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2396 break;
2397
2398 case ppt_cyc:
2399 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2400 break;
2401
2402 case ppt_mnt:
2403 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2404 break;
2405 }
2406 }
2407
2408 /* Decode packets into MAINT using DECODER. */
2409
2410 static void
2411 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2412 struct pt_packet_decoder *decoder)
2413 {
2414 int errcode;
2415
2416 for (;;)
2417 {
2418 struct btrace_pt_packet packet;
2419
2420 errcode = pt_pkt_sync_forward (decoder);
2421 if (errcode < 0)
2422 break;
2423
2424 for (;;)
2425 {
2426 pt_pkt_get_offset (decoder, &packet.offset);
2427
2428 errcode = pt_pkt_next (decoder, &packet.packet,
2429 sizeof(packet.packet));
2430 if (errcode < 0)
2431 break;
2432
2433 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2434 {
2435 packet.errcode = pt_errcode (errcode);
2436 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2437 &packet);
2438 }
2439 }
2440
2441 if (errcode == -pte_eos)
2442 break;
2443
2444 packet.errcode = pt_errcode (errcode);
2445 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2446 &packet);
2447
2448 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2449 packet.offset, pt_errstr (packet.errcode));
2450 }
2451
2452 if (errcode != -pte_eos)
2453 warning (_("Failed to synchronize onto the Intel Processor Trace "
2454 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2455 }
2456
2457 /* Update the packet history in BTINFO. */
2458
2459 static void
2460 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2461 {
2462 volatile struct gdb_exception except;
2463 struct pt_packet_decoder *decoder;
2464 struct btrace_data_pt *pt;
2465 struct pt_config config;
2466 int errcode;
2467
2468 pt = &btinfo->data.variant.pt;
2469
2470 /* Nothing to do if there is no trace. */
2471 if (pt->size == 0)
2472 return;
2473
2474 memset (&config, 0, sizeof(config));
2475
2476 config.size = sizeof (config);
2477 config.begin = pt->data;
2478 config.end = pt->data + pt->size;
2479
2480 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2481 config.cpu.family = pt->config.cpu.family;
2482 config.cpu.model = pt->config.cpu.model;
2483 config.cpu.stepping = pt->config.cpu.stepping;
2484
2485 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2486 if (errcode < 0)
2487 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2488 pt_errstr (pt_errcode (errcode)));
2489
2490 decoder = pt_pkt_alloc_decoder (&config);
2491 if (decoder == NULL)
2492 error (_("Failed to allocate the Intel Processor Trace decoder."));
2493
2494 TRY
2495 {
2496 btrace_maint_decode_pt (&btinfo->maint, decoder);
2497 }
2498 CATCH (except, RETURN_MASK_ALL)
2499 {
2500 pt_pkt_free_decoder (decoder);
2501
2502 if (except.reason < 0)
2503 throw_exception (except);
2504 }
2505 END_CATCH
2506
2507 pt_pkt_free_decoder (decoder);
2508 }
2509
2510 #endif /* !defined (HAVE_LIBIPT) */
2511
2512 /* Update the packet maintenance information for BTINFO and store the
2513 low and high bounds into BEGIN and END, respectively.
2514 Store the current iterator state into FROM and TO. */
2515
2516 static void
2517 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2518 unsigned int *begin, unsigned int *end,
2519 unsigned int *from, unsigned int *to)
2520 {
2521 switch (btinfo->data.format)
2522 {
2523 default:
2524 *begin = 0;
2525 *end = 0;
2526 *from = 0;
2527 *to = 0;
2528 break;
2529
2530 case BTRACE_FORMAT_BTS:
2531 /* Nothing to do - we operate directly on BTINFO->DATA. */
2532 *begin = 0;
2533 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2534 *from = btinfo->maint.variant.bts.packet_history.begin;
2535 *to = btinfo->maint.variant.bts.packet_history.end;
2536 break;
2537
2538 #if defined (HAVE_LIBIPT)
2539 case BTRACE_FORMAT_PT:
2540 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2541 btrace_maint_update_pt_packets (btinfo);
2542
2543 *begin = 0;
2544 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2545 *from = btinfo->maint.variant.pt.packet_history.begin;
2546 *to = btinfo->maint.variant.pt.packet_history.end;
2547 break;
2548 #endif /* defined (HAVE_LIBIPT) */
2549 }
2550 }
2551
2552 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2553 update the current iterator position. */
2554
2555 static void
2556 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2557 unsigned int begin, unsigned int end)
2558 {
2559 switch (btinfo->data.format)
2560 {
2561 default:
2562 break;
2563
2564 case BTRACE_FORMAT_BTS:
2565 {
2566 VEC (btrace_block_s) *blocks;
2567 unsigned int blk;
2568
2569 blocks = btinfo->data.variant.bts.blocks;
2570 for (blk = begin; blk < end; ++blk)
2571 {
2572 const btrace_block_s *block;
2573
2574 block = VEC_index (btrace_block_s, blocks, blk);
2575
2576 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2577 core_addr_to_string_nz (block->begin),
2578 core_addr_to_string_nz (block->end));
2579 }
2580
2581 btinfo->maint.variant.bts.packet_history.begin = begin;
2582 btinfo->maint.variant.bts.packet_history.end = end;
2583 }
2584 break;
2585
2586 #if defined (HAVE_LIBIPT)
2587 case BTRACE_FORMAT_PT:
2588 {
2589 VEC (btrace_pt_packet_s) *packets;
2590 unsigned int pkt;
2591
2592 packets = btinfo->maint.variant.pt.packets;
2593 for (pkt = begin; pkt < end; ++pkt)
2594 {
2595 const struct btrace_pt_packet *packet;
2596
2597 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2598
2599 printf_unfiltered ("%u\t", pkt);
2600 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2601
2602 if (packet->errcode == pte_ok)
2603 pt_print_packet (&packet->packet);
2604 else
2605 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2606
2607 printf_unfiltered ("\n");
2608 }
2609
2610 btinfo->maint.variant.pt.packet_history.begin = begin;
2611 btinfo->maint.variant.pt.packet_history.end = end;
2612 }
2613 break;
2614 #endif /* defined (HAVE_LIBIPT) */
2615 }
2616 }
2617
2618 /* Read a number from an argument string. */
2619
2620 static unsigned int
2621 get_uint (char **arg)
2622 {
2623 char *begin, *end, *pos;
2624 unsigned long number;
2625
2626 begin = *arg;
2627 pos = skip_spaces (begin);
2628
2629 if (!isdigit (*pos))
2630 error (_("Expected positive number, got: %s."), pos);
2631
2632 number = strtoul (pos, &end, 10);
2633 if (number > UINT_MAX)
2634 error (_("Number too big."));
2635
2636 *arg += (end - begin);
2637
2638 return (unsigned int) number;
2639 }
2640
2641 /* Read a context size from an argument string. */
2642
2643 static int
2644 get_context_size (char **arg)
2645 {
2646 char *pos;
2647 int number;
2648
2649 pos = skip_spaces (*arg);
2650
2651 if (!isdigit (*pos))
2652 error (_("Expected positive number, got: %s."), pos);
2653
2654 return strtol (pos, arg, 10);
2655 }
2656
2657 /* Complain about junk at the end of an argument string. */
2658
2659 static void
2660 no_chunk (char *arg)
2661 {
2662 if (*arg != 0)
2663 error (_("Junk after argument: %s."), arg);
2664 }
2665
2666 /* The "maintenance btrace packet-history" command. */
2667
2668 static void
2669 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2670 {
2671 struct btrace_thread_info *btinfo;
2672 struct thread_info *tp;
2673 unsigned int size, begin, end, from, to;
2674
2675 tp = find_thread_ptid (inferior_ptid);
2676 if (tp == NULL)
2677 error (_("No thread."));
2678
2679 size = 10;
2680 btinfo = &tp->btrace;
2681
2682 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2683 if (begin == end)
2684 {
2685 printf_unfiltered (_("No trace.\n"));
2686 return;
2687 }
2688
2689 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2690 {
2691 from = to;
2692
2693 if (end - from < size)
2694 size = end - from;
2695 to = from + size;
2696 }
2697 else if (strcmp (arg, "-") == 0)
2698 {
2699 to = from;
2700
2701 if (to - begin < size)
2702 size = to - begin;
2703 from = to - size;
2704 }
2705 else
2706 {
2707 from = get_uint (&arg);
2708 if (end <= from)
2709 error (_("'%u' is out of range."), from);
2710
2711 arg = skip_spaces (arg);
2712 if (*arg == ',')
2713 {
2714 arg = skip_spaces (++arg);
2715
2716 if (*arg == '+')
2717 {
2718 arg += 1;
2719 size = get_context_size (&arg);
2720
2721 no_chunk (arg);
2722
2723 if (end - from < size)
2724 size = end - from;
2725 to = from + size;
2726 }
2727 else if (*arg == '-')
2728 {
2729 arg += 1;
2730 size = get_context_size (&arg);
2731
2732 no_chunk (arg);
2733
2734 /* Include the packet given as first argument. */
2735 from += 1;
2736 to = from;
2737
2738 if (to - begin < size)
2739 size = to - begin;
2740 from = to - size;
2741 }
2742 else
2743 {
2744 to = get_uint (&arg);
2745
2746 /* Include the packet at the second argument and silently
2747 truncate the range. */
2748 if (to < end)
2749 to += 1;
2750 else
2751 to = end;
2752
2753 no_chunk (arg);
2754 }
2755 }
2756 else
2757 {
2758 no_chunk (arg);
2759
2760 if (end - from < size)
2761 size = end - from;
2762 to = from + size;
2763 }
2764
2765 dont_repeat ();
2766 }
2767
2768 btrace_maint_print_packets (btinfo, from, to);
2769 }
2770
2771 /* The "maintenance btrace clear-packet-history" command. */
2772
2773 static void
2774 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2775 {
2776 struct btrace_thread_info *btinfo;
2777 struct thread_info *tp;
2778
2779 if (args != NULL && *args != 0)
2780 error (_("Invalid argument."));
2781
2782 tp = find_thread_ptid (inferior_ptid);
2783 if (tp == NULL)
2784 error (_("No thread."));
2785
2786 btinfo = &tp->btrace;
2787
2788 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2789 btrace_maint_clear (btinfo);
2790 btrace_data_clear (&btinfo->data);
2791 }
2792
2793 /* The "maintenance btrace clear" command. */
2794
2795 static void
2796 maint_btrace_clear_cmd (char *args, int from_tty)
2797 {
2798 struct btrace_thread_info *btinfo;
2799 struct thread_info *tp;
2800
2801 if (args != NULL && *args != 0)
2802 error (_("Invalid argument."));
2803
2804 tp = find_thread_ptid (inferior_ptid);
2805 if (tp == NULL)
2806 error (_("No thread."));
2807
2808 btrace_clear (tp);
2809 }
2810
2811 /* The "maintenance btrace" command. */
2812
2813 static void
2814 maint_btrace_cmd (char *args, int from_tty)
2815 {
2816 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2817 gdb_stdout);
2818 }
2819
2820 /* The "maintenance set btrace" command. */
2821
2822 static void
2823 maint_btrace_set_cmd (char *args, int from_tty)
2824 {
2825 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2826 gdb_stdout);
2827 }
2828
2829 /* The "maintenance show btrace" command. */
2830
2831 static void
2832 maint_btrace_show_cmd (char *args, int from_tty)
2833 {
2834 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2835 all_commands, gdb_stdout);
2836 }
2837
2838 /* The "maintenance set btrace pt" command. */
2839
2840 static void
2841 maint_btrace_pt_set_cmd (char *args, int from_tty)
2842 {
2843 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2844 all_commands, gdb_stdout);
2845 }
2846
2847 /* The "maintenance show btrace pt" command. */
2848
2849 static void
2850 maint_btrace_pt_show_cmd (char *args, int from_tty)
2851 {
2852 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2853 all_commands, gdb_stdout);
2854 }
2855
2856 /* The "maintenance info btrace" command. */
2857
2858 static void
2859 maint_info_btrace_cmd (char *args, int from_tty)
2860 {
2861 struct btrace_thread_info *btinfo;
2862 struct thread_info *tp;
2863 const struct btrace_config *conf;
2864
2865 if (args != NULL && *args != 0)
2866 error (_("Invalid argument."));
2867
2868 tp = find_thread_ptid (inferior_ptid);
2869 if (tp == NULL)
2870 error (_("No thread."));
2871
2872 btinfo = &tp->btrace;
2873
2874 conf = btrace_conf (btinfo);
2875 if (conf == NULL)
2876 error (_("No btrace configuration."));
2877
2878 printf_unfiltered (_("Format: %s.\n"),
2879 btrace_format_string (conf->format));
2880
2881 switch (conf->format)
2882 {
2883 default:
2884 break;
2885
2886 case BTRACE_FORMAT_BTS:
2887 printf_unfiltered (_("Number of packets: %u.\n"),
2888 VEC_length (btrace_block_s,
2889 btinfo->data.variant.bts.blocks));
2890 break;
2891
2892 #if defined (HAVE_LIBIPT)
2893 case BTRACE_FORMAT_PT:
2894 {
2895 struct pt_version version;
2896
2897 version = pt_library_version ();
2898 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2899 version.minor, version.build,
2900 version.ext != NULL ? version.ext : "");
2901
2902 btrace_maint_update_pt_packets (btinfo);
2903 printf_unfiltered (_("Number of packets: %u.\n"),
2904 VEC_length (btrace_pt_packet_s,
2905 btinfo->maint.variant.pt.packets));
2906 }
2907 break;
2908 #endif /* defined (HAVE_LIBIPT) */
2909 }
2910 }
2911
2912 /* The "maint show btrace pt skip-pad" show value function. */
2913
2914 static void
2915 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2916 struct cmd_list_element *c,
2917 const char *value)
2918 {
2919 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2920 }
2921
2922
2923 /* Initialize btrace maintenance commands. */
2924
2925 void _initialize_btrace (void);
2926 void
2927 _initialize_btrace (void)
2928 {
2929 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2930 _("Info about branch tracing data."), &maintenanceinfolist);
2931
2932 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2933 _("Branch tracing maintenance commands."),
2934 &maint_btrace_cmdlist, "maintenance btrace ",
2935 0, &maintenancelist);
2936
2937 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2938 Set branch tracing specific variables."),
2939 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2940 0, &maintenance_set_cmdlist);
2941
2942 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2943 Set Intel Processor Trace specific variables."),
2944 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2945 0, &maint_btrace_set_cmdlist);
2946
2947 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2948 Show branch tracing specific variables."),
2949 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2950 0, &maintenance_show_cmdlist);
2951
2952 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2953 Show Intel Processor Trace specific variables."),
2954 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2955 0, &maint_btrace_show_cmdlist);
2956
2957 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2958 &maint_btrace_pt_skip_pad, _("\
2959 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2960 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2961 When enabled, PAD packets are ignored in the btrace packet history."),
2962 NULL, show_maint_btrace_pt_skip_pad,
2963 &maint_btrace_pt_set_cmdlist,
2964 &maint_btrace_pt_show_cmdlist);
2965
2966 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2967 _("Print the raw branch tracing data.\n\
2968 With no argument, print ten more packets after the previous ten-line print.\n\
2969 With '-' as argument print ten packets before a previous ten-line print.\n\
2970 One argument specifies the starting packet of a ten-line print.\n\
2971 Two arguments with comma between specify starting and ending packets to \
2972 print.\n\
2973 Preceded with '+'/'-' the second argument specifies the distance from the \
2974 first.\n"),
2975 &maint_btrace_cmdlist);
2976
2977 add_cmd ("clear-packet-history", class_maintenance,
2978 maint_btrace_clear_packet_history_cmd,
2979 _("Clears the branch tracing packet history.\n\
2980 Discards the raw branch tracing data but not the execution history data.\n\
2981 "),
2982 &maint_btrace_cmdlist);
2983
2984 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2985 _("Clears the branch tracing data.\n\
2986 Discards the raw branch tracing data and the execution history data.\n\
2987 The next 'record' command will fetch the branch tracing data anew.\n\
2988 "),
2989 &maint_btrace_cmdlist);
2990
2991 }
This page took 0.092822 seconds and 4 git commands to generate.