gdb/copyright.py: Adapt after move of gnulib from gdb to toplevel
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "gdbsupport/rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37 #include "gdbarch.h"
38
39 /* For maintenance commands. */
40 #include "record-btrace.h"
41
42 #include <inttypes.h>
43 #include <ctype.h>
44 #include <algorithm>
45
46 /* Command lists for btrace maintenance commands. */
47 static struct cmd_list_element *maint_btrace_cmdlist;
48 static struct cmd_list_element *maint_btrace_set_cmdlist;
49 static struct cmd_list_element *maint_btrace_show_cmdlist;
50 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
51 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
52
53 /* Control whether to skip PAD packets when computing the packet history. */
54 static bool maint_btrace_pt_skip_pad = true;
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return sym->print_name ();
86
87 if (msym != NULL)
88 return msym->print_name ();
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + bfun->insn.size ();
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return bfun->insn.size ();
157 }
158
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
161
162 static struct btrace_function *
163 ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
164 unsigned int number)
165 {
166 if (number == 0 || number > btinfo->functions.size ())
167 return NULL;
168
169 return &btinfo->functions[number - 1];
170 }
171
172 /* A const version of the function above. */
173
174 static const struct btrace_function *
175 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
176 unsigned int number)
177 {
178 if (number == 0 || number > btinfo->functions.size ())
179 return NULL;
180
181 return &btinfo->functions[number - 1];
182 }
183
184 /* Return non-zero if BFUN does not match MFUN and FUN,
185 return zero otherwise. */
186
187 static int
188 ftrace_function_switched (const struct btrace_function *bfun,
189 const struct minimal_symbol *mfun,
190 const struct symbol *fun)
191 {
192 struct minimal_symbol *msym;
193 struct symbol *sym;
194
195 msym = bfun->msym;
196 sym = bfun->sym;
197
198 /* If the minimal symbol changed, we certainly switched functions. */
199 if (mfun != NULL && msym != NULL
200 && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
201 return 1;
202
203 /* If the symbol changed, we certainly switched functions. */
204 if (fun != NULL && sym != NULL)
205 {
206 const char *bfname, *fname;
207
208 /* Check the function name. */
209 if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
210 return 1;
211
212 /* Check the location of those functions, as well. */
213 bfname = symtab_to_fullname (symbol_symtab (sym));
214 fname = symtab_to_fullname (symbol_symtab (fun));
215 if (filename_cmp (fname, bfname) != 0)
216 return 1;
217 }
218
219 /* If we lost symbol information, we switched functions. */
220 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
221 return 1;
222
223 /* If we gained symbol information, we switched functions. */
224 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
225 return 1;
226
227 return 0;
228 }
229
230 /* Allocate and initialize a new branch trace function segment at the end of
231 the trace.
232 BTINFO is the branch trace information for the current thread.
233 MFUN and FUN are the symbol information we have for this function.
234 This invalidates all struct btrace_function pointer currently held. */
235
236 static struct btrace_function *
237 ftrace_new_function (struct btrace_thread_info *btinfo,
238 struct minimal_symbol *mfun,
239 struct symbol *fun)
240 {
241 int level;
242 unsigned int number, insn_offset;
243
244 if (btinfo->functions.empty ())
245 {
246 /* Start counting NUMBER and INSN_OFFSET at one. */
247 level = 0;
248 number = 1;
249 insn_offset = 1;
250 }
251 else
252 {
253 const struct btrace_function *prev = &btinfo->functions.back ();
254 level = prev->level;
255 number = prev->number + 1;
256 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
257 }
258
259 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
260 return &btinfo->functions.back ();
261 }
262
263 /* Update the UP field of a function segment. */
264
265 static void
266 ftrace_update_caller (struct btrace_function *bfun,
267 struct btrace_function *caller,
268 enum btrace_function_flag flags)
269 {
270 if (bfun->up != 0)
271 ftrace_debug (bfun, "updating caller");
272
273 bfun->up = caller->number;
274 bfun->flags = flags;
275
276 ftrace_debug (bfun, "set caller");
277 ftrace_debug (caller, "..to");
278 }
279
280 /* Fix up the caller for all segments of a function. */
281
282 static void
283 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
284 struct btrace_function *bfun,
285 struct btrace_function *caller,
286 enum btrace_function_flag flags)
287 {
288 unsigned int prev, next;
289
290 prev = bfun->prev;
291 next = bfun->next;
292 ftrace_update_caller (bfun, caller, flags);
293
294 /* Update all function segments belonging to the same function. */
295 for (; prev != 0; prev = bfun->prev)
296 {
297 bfun = ftrace_find_call_by_number (btinfo, prev);
298 ftrace_update_caller (bfun, caller, flags);
299 }
300
301 for (; next != 0; next = bfun->next)
302 {
303 bfun = ftrace_find_call_by_number (btinfo, next);
304 ftrace_update_caller (bfun, caller, flags);
305 }
306 }
307
308 /* Add a new function segment for a call at the end of the trace.
309 BTINFO is the branch trace information for the current thread.
310 MFUN and FUN are the symbol information we have for this function. */
311
312 static struct btrace_function *
313 ftrace_new_call (struct btrace_thread_info *btinfo,
314 struct minimal_symbol *mfun,
315 struct symbol *fun)
316 {
317 const unsigned int length = btinfo->functions.size ();
318 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
319
320 bfun->up = length;
321 bfun->level += 1;
322
323 ftrace_debug (bfun, "new call");
324
325 return bfun;
326 }
327
328 /* Add a new function segment for a tail call at the end of the trace.
329 BTINFO is the branch trace information for the current thread.
330 MFUN and FUN are the symbol information we have for this function. */
331
332 static struct btrace_function *
333 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
334 struct minimal_symbol *mfun,
335 struct symbol *fun)
336 {
337 const unsigned int length = btinfo->functions.size ();
338 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
339
340 bfun->up = length;
341 bfun->level += 1;
342 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
343
344 ftrace_debug (bfun, "new tail call");
345
346 return bfun;
347 }
348
349 /* Return the caller of BFUN or NULL if there is none. This function skips
350 tail calls in the call chain. BTINFO is the branch trace information for
351 the current thread. */
352 static struct btrace_function *
353 ftrace_get_caller (struct btrace_thread_info *btinfo,
354 struct btrace_function *bfun)
355 {
356 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
357 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
358 return ftrace_find_call_by_number (btinfo, bfun->up);
359
360 return NULL;
361 }
362
363 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364 symbol information. BTINFO is the branch trace information for the current
365 thread. */
366
367 static struct btrace_function *
368 ftrace_find_caller (struct btrace_thread_info *btinfo,
369 struct btrace_function *bfun,
370 struct minimal_symbol *mfun,
371 struct symbol *fun)
372 {
373 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
374 {
375 /* Skip functions with incompatible symbol information. */
376 if (ftrace_function_switched (bfun, mfun, fun))
377 continue;
378
379 /* This is the function segment we're looking for. */
380 break;
381 }
382
383 return bfun;
384 }
385
386 /* Find the innermost caller in the back trace of BFUN, skipping all
387 function segments that do not end with a call instruction (e.g.
388 tail calls ending with a jump). BTINFO is the branch trace information for
389 the current thread. */
390
391 static struct btrace_function *
392 ftrace_find_call (struct btrace_thread_info *btinfo,
393 struct btrace_function *bfun)
394 {
395 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
396 {
397 /* Skip gaps. */
398 if (bfun->errcode != 0)
399 continue;
400
401 btrace_insn &last = bfun->insn.back ();
402
403 if (last.iclass == BTRACE_INSN_CALL)
404 break;
405 }
406
407 return bfun;
408 }
409
410 /* Add a continuation segment for a function into which we return at the end of
411 the trace.
412 BTINFO is the branch trace information for the current thread.
413 MFUN and FUN are the symbol information we have for this function. */
414
415 static struct btrace_function *
416 ftrace_new_return (struct btrace_thread_info *btinfo,
417 struct minimal_symbol *mfun,
418 struct symbol *fun)
419 {
420 struct btrace_function *prev, *bfun, *caller;
421
422 bfun = ftrace_new_function (btinfo, mfun, fun);
423 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
424
425 /* It is important to start at PREV's caller. Otherwise, we might find
426 PREV itself, if PREV is a recursive function. */
427 caller = ftrace_find_call_by_number (btinfo, prev->up);
428 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
429 if (caller != NULL)
430 {
431 /* The caller of PREV is the preceding btrace function segment in this
432 function instance. */
433 gdb_assert (caller->next == 0);
434
435 caller->next = bfun->number;
436 bfun->prev = caller->number;
437
438 /* Maintain the function level. */
439 bfun->level = caller->level;
440
441 /* Maintain the call stack. */
442 bfun->up = caller->up;
443 bfun->flags = caller->flags;
444
445 ftrace_debug (bfun, "new return");
446 }
447 else
448 {
449 /* We did not find a caller. This could mean that something went
450 wrong or that the call is simply not included in the trace. */
451
452 /* Let's search for some actual call. */
453 caller = ftrace_find_call_by_number (btinfo, prev->up);
454 caller = ftrace_find_call (btinfo, caller);
455 if (caller == NULL)
456 {
457 /* There is no call in PREV's back trace. We assume that the
458 branch trace did not include it. */
459
460 /* Let's find the topmost function and add a new caller for it.
461 This should handle a series of initial tail calls. */
462 while (prev->up != 0)
463 prev = ftrace_find_call_by_number (btinfo, prev->up);
464
465 bfun->level = prev->level - 1;
466
467 /* Fix up the call stack for PREV. */
468 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
469
470 ftrace_debug (bfun, "new return - no caller");
471 }
472 else
473 {
474 /* There is a call in PREV's back trace to which we should have
475 returned but didn't. Let's start a new, separate back trace
476 from PREV's level. */
477 bfun->level = prev->level - 1;
478
479 /* We fix up the back trace for PREV but leave other function segments
480 on the same level as they are.
481 This should handle things like schedule () correctly where we're
482 switching contexts. */
483 prev->up = bfun->number;
484 prev->flags = BFUN_UP_LINKS_TO_RET;
485
486 ftrace_debug (bfun, "new return - unknown caller");
487 }
488 }
489
490 return bfun;
491 }
492
493 /* Add a new function segment for a function switch at the end of the trace.
494 BTINFO is the branch trace information for the current thread.
495 MFUN and FUN are the symbol information we have for this function. */
496
497 static struct btrace_function *
498 ftrace_new_switch (struct btrace_thread_info *btinfo,
499 struct minimal_symbol *mfun,
500 struct symbol *fun)
501 {
502 struct btrace_function *prev, *bfun;
503
504 /* This is an unexplained function switch. We can't really be sure about the
505 call stack, yet the best I can think of right now is to preserve it. */
506 bfun = ftrace_new_function (btinfo, mfun, fun);
507 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
508 bfun->up = prev->up;
509 bfun->flags = prev->flags;
510
511 ftrace_debug (bfun, "new switch");
512
513 return bfun;
514 }
515
516 /* Add a new function segment for a gap in the trace due to a decode error at
517 the end of the trace.
518 BTINFO is the branch trace information for the current thread.
519 ERRCODE is the format-specific error code. */
520
521 static struct btrace_function *
522 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
523 std::vector<unsigned int> &gaps)
524 {
525 struct btrace_function *bfun;
526
527 if (btinfo->functions.empty ())
528 bfun = ftrace_new_function (btinfo, NULL, NULL);
529 else
530 {
531 /* We hijack the previous function segment if it was empty. */
532 bfun = &btinfo->functions.back ();
533 if (bfun->errcode != 0 || !bfun->insn.empty ())
534 bfun = ftrace_new_function (btinfo, NULL, NULL);
535 }
536
537 bfun->errcode = errcode;
538 gaps.push_back (bfun->number);
539
540 ftrace_debug (bfun, "new gap");
541
542 return bfun;
543 }
544
545 /* Update the current function segment at the end of the trace in BTINFO with
546 respect to the instruction at PC. This may create new function segments.
547 Return the chronologically latest function segment, never NULL. */
548
549 static struct btrace_function *
550 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
551 {
552 struct bound_minimal_symbol bmfun;
553 struct minimal_symbol *mfun;
554 struct symbol *fun;
555 struct btrace_function *bfun;
556
557 /* Try to determine the function we're in. We use both types of symbols
558 to avoid surprises when we sometimes get a full symbol and sometimes
559 only a minimal symbol. */
560 fun = find_pc_function (pc);
561 bmfun = lookup_minimal_symbol_by_pc (pc);
562 mfun = bmfun.minsym;
563
564 if (fun == NULL && mfun == NULL)
565 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
566
567 /* If we didn't have a function, we create one. */
568 if (btinfo->functions.empty ())
569 return ftrace_new_function (btinfo, mfun, fun);
570
571 /* If we had a gap before, we create a function. */
572 bfun = &btinfo->functions.back ();
573 if (bfun->errcode != 0)
574 return ftrace_new_function (btinfo, mfun, fun);
575
576 /* Check the last instruction, if we have one.
577 We do this check first, since it allows us to fill in the call stack
578 links in addition to the normal flow links. */
579 btrace_insn *last = NULL;
580 if (!bfun->insn.empty ())
581 last = &bfun->insn.back ();
582
583 if (last != NULL)
584 {
585 switch (last->iclass)
586 {
587 case BTRACE_INSN_RETURN:
588 {
589 const char *fname;
590
591 /* On some systems, _dl_runtime_resolve returns to the resolved
592 function instead of jumping to it. From our perspective,
593 however, this is a tailcall.
594 If we treated it as return, we wouldn't be able to find the
595 resolved function in our stack back trace. Hence, we would
596 lose the current stack back trace and start anew with an empty
597 back trace. When the resolved function returns, we would then
598 create a stack back trace with the same function names but
599 different frame id's. This will confuse stepping. */
600 fname = ftrace_print_function_name (bfun);
601 if (strcmp (fname, "_dl_runtime_resolve") == 0)
602 return ftrace_new_tailcall (btinfo, mfun, fun);
603
604 return ftrace_new_return (btinfo, mfun, fun);
605 }
606
607 case BTRACE_INSN_CALL:
608 /* Ignore calls to the next instruction. They are used for PIC. */
609 if (last->pc + last->size == pc)
610 break;
611
612 return ftrace_new_call (btinfo, mfun, fun);
613
614 case BTRACE_INSN_JUMP:
615 {
616 CORE_ADDR start;
617
618 start = get_pc_function_start (pc);
619
620 /* A jump to the start of a function is (typically) a tail call. */
621 if (start == pc)
622 return ftrace_new_tailcall (btinfo, mfun, fun);
623
624 /* Some versions of _Unwind_RaiseException use an indirect
625 jump to 'return' to the exception handler of the caller
626 handling the exception instead of a return. Let's restrict
627 this heuristic to that and related functions. */
628 const char *fname = ftrace_print_function_name (bfun);
629 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
630 {
631 struct btrace_function *caller
632 = ftrace_find_call_by_number (btinfo, bfun->up);
633 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
634 if (caller != NULL)
635 return ftrace_new_return (btinfo, mfun, fun);
636 }
637
638 /* If we can't determine the function for PC, we treat a jump at
639 the end of the block as tail call if we're switching functions
640 and as an intra-function branch if we don't. */
641 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
642 return ftrace_new_tailcall (btinfo, mfun, fun);
643
644 break;
645 }
646 }
647 }
648
649 /* Check if we're switching functions for some other reason. */
650 if (ftrace_function_switched (bfun, mfun, fun))
651 {
652 DEBUG_FTRACE ("switching from %s in %s at %s",
653 ftrace_print_insn_addr (last),
654 ftrace_print_function_name (bfun),
655 ftrace_print_filename (bfun));
656
657 return ftrace_new_switch (btinfo, mfun, fun);
658 }
659
660 return bfun;
661 }
662
663 /* Add the instruction at PC to BFUN's instructions. */
664
665 static void
666 ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
667 {
668 bfun->insn.push_back (insn);
669
670 if (record_debug > 1)
671 ftrace_debug (bfun, "update insn");
672 }
673
674 /* Classify the instruction at PC. */
675
676 static enum btrace_insn_class
677 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
678 {
679 enum btrace_insn_class iclass;
680
681 iclass = BTRACE_INSN_OTHER;
682 try
683 {
684 if (gdbarch_insn_is_call (gdbarch, pc))
685 iclass = BTRACE_INSN_CALL;
686 else if (gdbarch_insn_is_ret (gdbarch, pc))
687 iclass = BTRACE_INSN_RETURN;
688 else if (gdbarch_insn_is_jump (gdbarch, pc))
689 iclass = BTRACE_INSN_JUMP;
690 }
691 catch (const gdb_exception_error &error)
692 {
693 }
694
695 return iclass;
696 }
697
698 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
699 number of matching function segments or zero if the back traces do not
700 match. BTINFO is the branch trace information for the current thread. */
701
702 static int
703 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
704 struct btrace_function *lhs,
705 struct btrace_function *rhs)
706 {
707 int matches;
708
709 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
710 {
711 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
712 return 0;
713
714 lhs = ftrace_get_caller (btinfo, lhs);
715 rhs = ftrace_get_caller (btinfo, rhs);
716 }
717
718 return matches;
719 }
720
721 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
722 BTINFO is the branch trace information for the current thread. */
723
724 static void
725 ftrace_fixup_level (struct btrace_thread_info *btinfo,
726 struct btrace_function *bfun, int adjustment)
727 {
728 if (adjustment == 0)
729 return;
730
731 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
732 ftrace_debug (bfun, "..bfun");
733
734 while (bfun != NULL)
735 {
736 bfun->level += adjustment;
737 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
738 }
739 }
740
741 /* Recompute the global level offset. Traverse the function trace and compute
742 the global level offset as the negative of the minimal function level. */
743
744 static void
745 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
746 {
747 int level = INT_MAX;
748
749 if (btinfo == NULL)
750 return;
751
752 if (btinfo->functions.empty ())
753 return;
754
755 unsigned int length = btinfo->functions.size() - 1;
756 for (unsigned int i = 0; i < length; ++i)
757 level = std::min (level, btinfo->functions[i].level);
758
759 /* The last function segment contains the current instruction, which is not
760 really part of the trace. If it contains just this one instruction, we
761 ignore the segment. */
762 struct btrace_function *last = &btinfo->functions.back();
763 if (last->insn.size () != 1)
764 level = std::min (level, last->level);
765
766 DEBUG_FTRACE ("setting global level offset: %d", -level);
767 btinfo->level = -level;
768 }
769
770 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
771 ftrace_connect_backtrace. BTINFO is the branch trace information for the
772 current thread. */
773
774 static void
775 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
776 struct btrace_function *prev,
777 struct btrace_function *next)
778 {
779 DEBUG_FTRACE ("connecting...");
780 ftrace_debug (prev, "..prev");
781 ftrace_debug (next, "..next");
782
783 /* The function segments are not yet connected. */
784 gdb_assert (prev->next == 0);
785 gdb_assert (next->prev == 0);
786
787 prev->next = next->number;
788 next->prev = prev->number;
789
790 /* We may have moved NEXT to a different function level. */
791 ftrace_fixup_level (btinfo, next, prev->level - next->level);
792
793 /* If we run out of back trace for one, let's use the other's. */
794 if (prev->up == 0)
795 {
796 const btrace_function_flags flags = next->flags;
797
798 next = ftrace_find_call_by_number (btinfo, next->up);
799 if (next != NULL)
800 {
801 DEBUG_FTRACE ("using next's callers");
802 ftrace_fixup_caller (btinfo, prev, next, flags);
803 }
804 }
805 else if (next->up == 0)
806 {
807 const btrace_function_flags flags = prev->flags;
808
809 prev = ftrace_find_call_by_number (btinfo, prev->up);
810 if (prev != NULL)
811 {
812 DEBUG_FTRACE ("using prev's callers");
813 ftrace_fixup_caller (btinfo, next, prev, flags);
814 }
815 }
816 else
817 {
818 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
819 link to add the tail callers to NEXT's back trace.
820
821 This removes NEXT->UP from NEXT's back trace. It will be added back
822 when connecting NEXT and PREV's callers - provided they exist.
823
824 If PREV's back trace consists of a series of tail calls without an
825 actual call, there will be no further connection and NEXT's caller will
826 be removed for good. To catch this case, we handle it here and connect
827 the top of PREV's back trace to NEXT's caller. */
828 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
829 {
830 struct btrace_function *caller;
831 btrace_function_flags next_flags, prev_flags;
832
833 /* We checked NEXT->UP above so CALLER can't be NULL. */
834 caller = ftrace_find_call_by_number (btinfo, next->up);
835 next_flags = next->flags;
836 prev_flags = prev->flags;
837
838 DEBUG_FTRACE ("adding prev's tail calls to next");
839
840 prev = ftrace_find_call_by_number (btinfo, prev->up);
841 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
842
843 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
844 prev->up))
845 {
846 /* At the end of PREV's back trace, continue with CALLER. */
847 if (prev->up == 0)
848 {
849 DEBUG_FTRACE ("fixing up link for tailcall chain");
850 ftrace_debug (prev, "..top");
851 ftrace_debug (caller, "..up");
852
853 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
854
855 /* If we skipped any tail calls, this may move CALLER to a
856 different function level.
857
858 Note that changing CALLER's level is only OK because we
859 know that this is the last iteration of the bottom-to-top
860 walk in ftrace_connect_backtrace.
861
862 Otherwise we will fix up CALLER's level when we connect it
863 to PREV's caller in the next iteration. */
864 ftrace_fixup_level (btinfo, caller,
865 prev->level - caller->level - 1);
866 break;
867 }
868
869 /* There's nothing to do if we find a real call. */
870 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
871 {
872 DEBUG_FTRACE ("will fix up link in next iteration");
873 break;
874 }
875 }
876 }
877 }
878 }
879
880 /* Connect function segments on the same level in the back trace at LHS and RHS.
881 The back traces at LHS and RHS are expected to match according to
882 ftrace_match_backtrace. BTINFO is the branch trace information for the
883 current thread. */
884
885 static void
886 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
887 struct btrace_function *lhs,
888 struct btrace_function *rhs)
889 {
890 while (lhs != NULL && rhs != NULL)
891 {
892 struct btrace_function *prev, *next;
893
894 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
895
896 /* Connecting LHS and RHS may change the up link. */
897 prev = lhs;
898 next = rhs;
899
900 lhs = ftrace_get_caller (btinfo, lhs);
901 rhs = ftrace_get_caller (btinfo, rhs);
902
903 ftrace_connect_bfun (btinfo, prev, next);
904 }
905 }
906
907 /* Bridge the gap between two function segments left and right of a gap if their
908 respective back traces match in at least MIN_MATCHES functions. BTINFO is
909 the branch trace information for the current thread.
910
911 Returns non-zero if the gap could be bridged, zero otherwise. */
912
913 static int
914 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
915 struct btrace_function *lhs, struct btrace_function *rhs,
916 int min_matches)
917 {
918 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
919 int best_matches;
920
921 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
922 rhs->insn_offset - 1, min_matches);
923
924 best_matches = 0;
925 best_l = NULL;
926 best_r = NULL;
927
928 /* We search the back traces of LHS and RHS for valid connections and connect
929 the two function segments that give the longest combined back trace. */
930
931 for (cand_l = lhs; cand_l != NULL;
932 cand_l = ftrace_get_caller (btinfo, cand_l))
933 for (cand_r = rhs; cand_r != NULL;
934 cand_r = ftrace_get_caller (btinfo, cand_r))
935 {
936 int matches;
937
938 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
939 if (best_matches < matches)
940 {
941 best_matches = matches;
942 best_l = cand_l;
943 best_r = cand_r;
944 }
945 }
946
947 /* We need at least MIN_MATCHES matches. */
948 gdb_assert (min_matches > 0);
949 if (best_matches < min_matches)
950 return 0;
951
952 DEBUG_FTRACE ("..matches: %d", best_matches);
953
954 /* We will fix up the level of BEST_R and succeeding function segments such
955 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
956
957 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
958 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
959
960 To catch this, we already fix up the level here where we can start at RHS
961 instead of at BEST_R. We will ignore the level fixup when connecting
962 BEST_L to BEST_R as they will already be on the same level. */
963 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
964
965 ftrace_connect_backtrace (btinfo, best_l, best_r);
966
967 return best_matches;
968 }
969
970 /* Try to bridge gaps due to overflow or decode errors by connecting the
971 function segments that are separated by the gap. */
972
973 static void
974 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
975 {
976 struct btrace_thread_info *btinfo = &tp->btrace;
977 std::vector<unsigned int> remaining;
978 int min_matches;
979
980 DEBUG ("bridge gaps");
981
982 /* We require a minimum amount of matches for bridging a gap. The number of
983 required matches will be lowered with each iteration.
984
985 The more matches the higher our confidence that the bridging is correct.
986 For big gaps or small traces, however, it may not be feasible to require a
987 high number of matches. */
988 for (min_matches = 5; min_matches > 0; --min_matches)
989 {
990 /* Let's try to bridge as many gaps as we can. In some cases, we need to
991 skip a gap and revisit it again after we closed later gaps. */
992 while (!gaps.empty ())
993 {
994 for (const unsigned int number : gaps)
995 {
996 struct btrace_function *gap, *lhs, *rhs;
997 int bridged;
998
999 gap = ftrace_find_call_by_number (btinfo, number);
1000
1001 /* We may have a sequence of gaps if we run from one error into
1002 the next as we try to re-sync onto the trace stream. Ignore
1003 all but the leftmost gap in such a sequence.
1004
1005 Also ignore gaps at the beginning of the trace. */
1006 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1007 if (lhs == NULL || lhs->errcode != 0)
1008 continue;
1009
1010 /* Skip gaps to the right. */
1011 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1012 while (rhs != NULL && rhs->errcode != 0)
1013 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1014
1015 /* Ignore gaps at the end of the trace. */
1016 if (rhs == NULL)
1017 continue;
1018
1019 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1020
1021 /* Keep track of gaps we were not able to bridge and try again.
1022 If we just pushed them to the end of GAPS we would risk an
1023 infinite loop in case we simply cannot bridge a gap. */
1024 if (bridged == 0)
1025 remaining.push_back (number);
1026 }
1027
1028 /* Let's see if we made any progress. */
1029 if (remaining.size () == gaps.size ())
1030 break;
1031
1032 gaps.clear ();
1033 gaps.swap (remaining);
1034 }
1035
1036 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1037 if (gaps.empty ())
1038 break;
1039
1040 remaining.clear ();
1041 }
1042
1043 /* We may omit this in some cases. Not sure it is worth the extra
1044 complication, though. */
1045 ftrace_compute_global_level_offset (btinfo);
1046 }
1047
1048 /* Compute the function branch trace from BTS trace. */
1049
1050 static void
1051 btrace_compute_ftrace_bts (struct thread_info *tp,
1052 const struct btrace_data_bts *btrace,
1053 std::vector<unsigned int> &gaps)
1054 {
1055 struct btrace_thread_info *btinfo;
1056 struct gdbarch *gdbarch;
1057 unsigned int blk;
1058 int level;
1059
1060 gdbarch = target_gdbarch ();
1061 btinfo = &tp->btrace;
1062 blk = btrace->blocks->size ();
1063
1064 if (btinfo->functions.empty ())
1065 level = INT_MAX;
1066 else
1067 level = -btinfo->level;
1068
1069 while (blk != 0)
1070 {
1071 CORE_ADDR pc;
1072
1073 blk -= 1;
1074
1075 const btrace_block &block = btrace->blocks->at (blk);
1076 pc = block.begin;
1077
1078 for (;;)
1079 {
1080 struct btrace_function *bfun;
1081 struct btrace_insn insn;
1082 int size;
1083
1084 /* We should hit the end of the block. Warn if we went too far. */
1085 if (block.end < pc)
1086 {
1087 /* Indicate the gap in the trace. */
1088 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1089
1090 warning (_("Recorded trace may be corrupted at instruction "
1091 "%u (pc = %s)."), bfun->insn_offset - 1,
1092 core_addr_to_string_nz (pc));
1093
1094 break;
1095 }
1096
1097 bfun = ftrace_update_function (btinfo, pc);
1098
1099 /* Maintain the function level offset.
1100 For all but the last block, we do it here. */
1101 if (blk != 0)
1102 level = std::min (level, bfun->level);
1103
1104 size = 0;
1105 try
1106 {
1107 size = gdb_insn_length (gdbarch, pc);
1108 }
1109 catch (const gdb_exception_error &error)
1110 {
1111 }
1112
1113 insn.pc = pc;
1114 insn.size = size;
1115 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1116 insn.flags = 0;
1117
1118 ftrace_update_insns (bfun, insn);
1119
1120 /* We're done once we pushed the instruction at the end. */
1121 if (block.end == pc)
1122 break;
1123
1124 /* We can't continue if we fail to compute the size. */
1125 if (size <= 0)
1126 {
1127 /* Indicate the gap in the trace. We just added INSN so we're
1128 not at the beginning. */
1129 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1130
1131 warning (_("Recorded trace may be incomplete at instruction %u "
1132 "(pc = %s)."), bfun->insn_offset - 1,
1133 core_addr_to_string_nz (pc));
1134
1135 break;
1136 }
1137
1138 pc += size;
1139
1140 /* Maintain the function level offset.
1141 For the last block, we do it here to not consider the last
1142 instruction.
1143 Since the last instruction corresponds to the current instruction
1144 and is not really part of the execution history, it shouldn't
1145 affect the level. */
1146 if (blk == 0)
1147 level = std::min (level, bfun->level);
1148 }
1149 }
1150
1151 /* LEVEL is the minimal function level of all btrace function segments.
1152 Define the global level offset to -LEVEL so all function levels are
1153 normalized to start at zero. */
1154 btinfo->level = -level;
1155 }
1156
1157 #if defined (HAVE_LIBIPT)
1158
1159 static enum btrace_insn_class
1160 pt_reclassify_insn (enum pt_insn_class iclass)
1161 {
1162 switch (iclass)
1163 {
1164 case ptic_call:
1165 return BTRACE_INSN_CALL;
1166
1167 case ptic_return:
1168 return BTRACE_INSN_RETURN;
1169
1170 case ptic_jump:
1171 return BTRACE_INSN_JUMP;
1172
1173 default:
1174 return BTRACE_INSN_OTHER;
1175 }
1176 }
1177
1178 /* Return the btrace instruction flags for INSN. */
1179
1180 static btrace_insn_flags
1181 pt_btrace_insn_flags (const struct pt_insn &insn)
1182 {
1183 btrace_insn_flags flags = 0;
1184
1185 if (insn.speculative)
1186 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1187
1188 return flags;
1189 }
1190
1191 /* Return the btrace instruction for INSN. */
1192
1193 static btrace_insn
1194 pt_btrace_insn (const struct pt_insn &insn)
1195 {
1196 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1197 pt_reclassify_insn (insn.iclass),
1198 pt_btrace_insn_flags (insn)};
1199 }
1200
1201 /* Handle instruction decode events (libipt-v2). */
1202
1203 static int
1204 handle_pt_insn_events (struct btrace_thread_info *btinfo,
1205 struct pt_insn_decoder *decoder,
1206 std::vector<unsigned int> &gaps, int status)
1207 {
1208 #if defined (HAVE_PT_INSN_EVENT)
1209 while (status & pts_event_pending)
1210 {
1211 struct btrace_function *bfun;
1212 struct pt_event event;
1213 uint64_t offset;
1214
1215 status = pt_insn_event (decoder, &event, sizeof (event));
1216 if (status < 0)
1217 break;
1218
1219 switch (event.type)
1220 {
1221 default:
1222 break;
1223
1224 case ptev_enabled:
1225 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1226 {
1227 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1228
1229 pt_insn_get_offset (decoder, &offset);
1230
1231 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1232 PRIx64 ")."), bfun->insn_offset - 1, offset);
1233 }
1234
1235 break;
1236
1237 case ptev_overflow:
1238 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1239
1240 pt_insn_get_offset (decoder, &offset);
1241
1242 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1243 bfun->insn_offset - 1, offset);
1244
1245 break;
1246 }
1247 }
1248 #endif /* defined (HAVE_PT_INSN_EVENT) */
1249
1250 return status;
1251 }
1252
1253 /* Handle events indicated by flags in INSN (libipt-v1). */
1254
1255 static void
1256 handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1257 struct pt_insn_decoder *decoder,
1258 const struct pt_insn &insn,
1259 std::vector<unsigned int> &gaps)
1260 {
1261 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1262 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1263 times, we continue from the same instruction we stopped before. This is
1264 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1265 means that we continued from some other instruction. Indicate this as a
1266 trace gap except when tracing just started. */
1267 if (insn.enabled && !btinfo->functions.empty ())
1268 {
1269 struct btrace_function *bfun;
1270 uint64_t offset;
1271
1272 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1273
1274 pt_insn_get_offset (decoder, &offset);
1275
1276 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1277 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1278 insn.ip);
1279 }
1280 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1281
1282 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1283 /* Indicate trace overflows. */
1284 if (insn.resynced)
1285 {
1286 struct btrace_function *bfun;
1287 uint64_t offset;
1288
1289 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1290
1291 pt_insn_get_offset (decoder, &offset);
1292
1293 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1294 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1295 }
1296 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1297 }
1298
1299 /* Add function branch trace to BTINFO using DECODER. */
1300
1301 static void
1302 ftrace_add_pt (struct btrace_thread_info *btinfo,
1303 struct pt_insn_decoder *decoder,
1304 int *plevel,
1305 std::vector<unsigned int> &gaps)
1306 {
1307 struct btrace_function *bfun;
1308 uint64_t offset;
1309 int status;
1310
1311 for (;;)
1312 {
1313 struct pt_insn insn;
1314
1315 status = pt_insn_sync_forward (decoder);
1316 if (status < 0)
1317 {
1318 if (status != -pte_eos)
1319 warning (_("Failed to synchronize onto the Intel Processor "
1320 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1321 break;
1322 }
1323
1324 for (;;)
1325 {
1326 /* Handle events from the previous iteration or synchronization. */
1327 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1328 if (status < 0)
1329 break;
1330
1331 status = pt_insn_next (decoder, &insn, sizeof(insn));
1332 if (status < 0)
1333 break;
1334
1335 /* Handle events indicated by flags in INSN. */
1336 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1337
1338 bfun = ftrace_update_function (btinfo, insn.ip);
1339
1340 /* Maintain the function level offset. */
1341 *plevel = std::min (*plevel, bfun->level);
1342
1343 ftrace_update_insns (bfun, pt_btrace_insn (insn));
1344 }
1345
1346 if (status == -pte_eos)
1347 break;
1348
1349 /* Indicate the gap in the trace. */
1350 bfun = ftrace_new_gap (btinfo, status, gaps);
1351
1352 pt_insn_get_offset (decoder, &offset);
1353
1354 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1355 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1356 offset, insn.ip, pt_errstr (pt_errcode (status)));
1357 }
1358 }
1359
1360 /* A callback function to allow the trace decoder to read the inferior's
1361 memory. */
1362
1363 static int
1364 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1365 const struct pt_asid *asid, uint64_t pc,
1366 void *context)
1367 {
1368 int result, errcode;
1369
1370 result = (int) size;
1371 try
1372 {
1373 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1374 if (errcode != 0)
1375 result = -pte_nomap;
1376 }
1377 catch (const gdb_exception_error &error)
1378 {
1379 result = -pte_nomap;
1380 }
1381
1382 return result;
1383 }
1384
1385 /* Translate the vendor from one enum to another. */
1386
1387 static enum pt_cpu_vendor
1388 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1389 {
1390 switch (vendor)
1391 {
1392 default:
1393 return pcv_unknown;
1394
1395 case CV_INTEL:
1396 return pcv_intel;
1397 }
1398 }
1399
1400 /* Finalize the function branch trace after decode. */
1401
1402 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1403 struct thread_info *tp, int level)
1404 {
1405 pt_insn_free_decoder (decoder);
1406
1407 /* LEVEL is the minimal function level of all btrace function segments.
1408 Define the global level offset to -LEVEL so all function levels are
1409 normalized to start at zero. */
1410 tp->btrace.level = -level;
1411
1412 /* Add a single last instruction entry for the current PC.
1413 This allows us to compute the backtrace at the current PC using both
1414 standard unwind and btrace unwind.
1415 This extra entry is ignored by all record commands. */
1416 btrace_add_pc (tp);
1417 }
1418
1419 /* Compute the function branch trace from Intel Processor Trace
1420 format. */
1421
1422 static void
1423 btrace_compute_ftrace_pt (struct thread_info *tp,
1424 const struct btrace_data_pt *btrace,
1425 std::vector<unsigned int> &gaps)
1426 {
1427 struct btrace_thread_info *btinfo;
1428 struct pt_insn_decoder *decoder;
1429 struct pt_config config;
1430 int level, errcode;
1431
1432 if (btrace->size == 0)
1433 return;
1434
1435 btinfo = &tp->btrace;
1436 if (btinfo->functions.empty ())
1437 level = INT_MAX;
1438 else
1439 level = -btinfo->level;
1440
1441 pt_config_init(&config);
1442 config.begin = btrace->data;
1443 config.end = btrace->data + btrace->size;
1444
1445 /* We treat an unknown vendor as 'no errata'. */
1446 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1447 {
1448 config.cpu.vendor
1449 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1450 config.cpu.family = btrace->config.cpu.family;
1451 config.cpu.model = btrace->config.cpu.model;
1452 config.cpu.stepping = btrace->config.cpu.stepping;
1453
1454 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1455 if (errcode < 0)
1456 error (_("Failed to configure the Intel Processor Trace "
1457 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1458 }
1459
1460 decoder = pt_insn_alloc_decoder (&config);
1461 if (decoder == NULL)
1462 error (_("Failed to allocate the Intel Processor Trace decoder."));
1463
1464 try
1465 {
1466 struct pt_image *image;
1467
1468 image = pt_insn_get_image(decoder);
1469 if (image == NULL)
1470 error (_("Failed to configure the Intel Processor Trace decoder."));
1471
1472 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1473 if (errcode < 0)
1474 error (_("Failed to configure the Intel Processor Trace decoder: "
1475 "%s."), pt_errstr (pt_errcode (errcode)));
1476
1477 ftrace_add_pt (btinfo, decoder, &level, gaps);
1478 }
1479 catch (const gdb_exception &error)
1480 {
1481 /* Indicate a gap in the trace if we quit trace processing. */
1482 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1483 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1484
1485 btrace_finalize_ftrace_pt (decoder, tp, level);
1486
1487 throw;
1488 }
1489
1490 btrace_finalize_ftrace_pt (decoder, tp, level);
1491 }
1492
1493 #else /* defined (HAVE_LIBIPT) */
1494
1495 static void
1496 btrace_compute_ftrace_pt (struct thread_info *tp,
1497 const struct btrace_data_pt *btrace,
1498 std::vector<unsigned int> &gaps)
1499 {
1500 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1501 }
1502
1503 #endif /* defined (HAVE_LIBIPT) */
1504
1505 /* Compute the function branch trace from a block branch trace BTRACE for
1506 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1507 branch trace configuration. This is currently only used for the PT
1508 format. */
1509
1510 static void
1511 btrace_compute_ftrace_1 (struct thread_info *tp,
1512 struct btrace_data *btrace,
1513 const struct btrace_cpu *cpu,
1514 std::vector<unsigned int> &gaps)
1515 {
1516 DEBUG ("compute ftrace");
1517
1518 switch (btrace->format)
1519 {
1520 case BTRACE_FORMAT_NONE:
1521 return;
1522
1523 case BTRACE_FORMAT_BTS:
1524 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1525 return;
1526
1527 case BTRACE_FORMAT_PT:
1528 /* Overwrite the cpu we use for enabling errata workarounds. */
1529 if (cpu != nullptr)
1530 btrace->variant.pt.config.cpu = *cpu;
1531
1532 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1533 return;
1534 }
1535
1536 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1537 }
1538
1539 static void
1540 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1541 {
1542 if (!gaps.empty ())
1543 {
1544 tp->btrace.ngaps += gaps.size ();
1545 btrace_bridge_gaps (tp, gaps);
1546 }
1547 }
1548
1549 static void
1550 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1551 const struct btrace_cpu *cpu)
1552 {
1553 std::vector<unsigned int> gaps;
1554
1555 try
1556 {
1557 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1558 }
1559 catch (const gdb_exception &error)
1560 {
1561 btrace_finalize_ftrace (tp, gaps);
1562
1563 throw;
1564 }
1565
1566 btrace_finalize_ftrace (tp, gaps);
1567 }
1568
1569 /* Add an entry for the current PC. */
1570
1571 static void
1572 btrace_add_pc (struct thread_info *tp)
1573 {
1574 struct btrace_data btrace;
1575 struct regcache *regcache;
1576 CORE_ADDR pc;
1577
1578 regcache = get_thread_regcache (tp);
1579 pc = regcache_read_pc (regcache);
1580
1581 btrace.format = BTRACE_FORMAT_BTS;
1582 btrace.variant.bts.blocks = new std::vector<btrace_block>;
1583
1584 btrace.variant.bts.blocks->emplace_back (pc, pc);
1585
1586 btrace_compute_ftrace (tp, &btrace, NULL);
1587 }
1588
1589 /* See btrace.h. */
1590
1591 void
1592 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1593 {
1594 if (tp->btrace.target != NULL)
1595 return;
1596
1597 #if !defined (HAVE_LIBIPT)
1598 if (conf->format == BTRACE_FORMAT_PT)
1599 error (_("Intel Processor Trace support was disabled at compile time."));
1600 #endif /* !defined (HAVE_LIBIPT) */
1601
1602 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1603 target_pid_to_str (tp->ptid).c_str ());
1604
1605 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1606
1607 /* We're done if we failed to enable tracing. */
1608 if (tp->btrace.target == NULL)
1609 return;
1610
1611 /* We need to undo the enable in case of errors. */
1612 try
1613 {
1614 /* Add an entry for the current PC so we start tracing from where we
1615 enabled it.
1616
1617 If we can't access TP's registers, TP is most likely running. In this
1618 case, we can't really say where tracing was enabled so it should be
1619 safe to simply skip this step.
1620
1621 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1622 start at the PC at which tracing was enabled. */
1623 if (conf->format != BTRACE_FORMAT_PT
1624 && can_access_registers_thread (tp))
1625 btrace_add_pc (tp);
1626 }
1627 catch (const gdb_exception &exception)
1628 {
1629 btrace_disable (tp);
1630
1631 throw;
1632 }
1633 }
1634
1635 /* See btrace.h. */
1636
1637 const struct btrace_config *
1638 btrace_conf (const struct btrace_thread_info *btinfo)
1639 {
1640 if (btinfo->target == NULL)
1641 return NULL;
1642
1643 return target_btrace_conf (btinfo->target);
1644 }
1645
1646 /* See btrace.h. */
1647
1648 void
1649 btrace_disable (struct thread_info *tp)
1650 {
1651 struct btrace_thread_info *btp = &tp->btrace;
1652
1653 if (btp->target == NULL)
1654 return;
1655
1656 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1657 target_pid_to_str (tp->ptid).c_str ());
1658
1659 target_disable_btrace (btp->target);
1660 btp->target = NULL;
1661
1662 btrace_clear (tp);
1663 }
1664
1665 /* See btrace.h. */
1666
1667 void
1668 btrace_teardown (struct thread_info *tp)
1669 {
1670 struct btrace_thread_info *btp = &tp->btrace;
1671
1672 if (btp->target == NULL)
1673 return;
1674
1675 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1676 target_pid_to_str (tp->ptid).c_str ());
1677
1678 target_teardown_btrace (btp->target);
1679 btp->target = NULL;
1680
1681 btrace_clear (tp);
1682 }
1683
1684 /* Stitch branch trace in BTS format. */
1685
1686 static int
1687 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1688 {
1689 struct btrace_thread_info *btinfo;
1690 struct btrace_function *last_bfun;
1691 btrace_block *first_new_block;
1692
1693 btinfo = &tp->btrace;
1694 gdb_assert (!btinfo->functions.empty ());
1695 gdb_assert (!btrace->blocks->empty ());
1696
1697 last_bfun = &btinfo->functions.back ();
1698
1699 /* If the existing trace ends with a gap, we just glue the traces
1700 together. We need to drop the last (i.e. chronologically first) block
1701 of the new trace, though, since we can't fill in the start address.*/
1702 if (last_bfun->insn.empty ())
1703 {
1704 btrace->blocks->pop_back ();
1705 return 0;
1706 }
1707
1708 /* Beware that block trace starts with the most recent block, so the
1709 chronologically first block in the new trace is the last block in
1710 the new trace's block vector. */
1711 first_new_block = &btrace->blocks->back ();
1712 const btrace_insn &last_insn = last_bfun->insn.back ();
1713
1714 /* If the current PC at the end of the block is the same as in our current
1715 trace, there are two explanations:
1716 1. we executed the instruction and some branch brought us back.
1717 2. we have not made any progress.
1718 In the first case, the delta trace vector should contain at least two
1719 entries.
1720 In the second case, the delta trace vector should contain exactly one
1721 entry for the partial block containing the current PC. Remove it. */
1722 if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
1723 {
1724 btrace->blocks->pop_back ();
1725 return 0;
1726 }
1727
1728 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
1729 core_addr_to_string_nz (first_new_block->end));
1730
1731 /* Do a simple sanity check to make sure we don't accidentally end up
1732 with a bad block. This should not occur in practice. */
1733 if (first_new_block->end < last_insn.pc)
1734 {
1735 warning (_("Error while trying to read delta trace. Falling back to "
1736 "a full read."));
1737 return -1;
1738 }
1739
1740 /* We adjust the last block to start at the end of our current trace. */
1741 gdb_assert (first_new_block->begin == 0);
1742 first_new_block->begin = last_insn.pc;
1743
1744 /* We simply pop the last insn so we can insert it again as part of
1745 the normal branch trace computation.
1746 Since instruction iterators are based on indices in the instructions
1747 vector, we don't leave any pointers dangling. */
1748 DEBUG ("pruning insn at %s for stitching",
1749 ftrace_print_insn_addr (&last_insn));
1750
1751 last_bfun->insn.pop_back ();
1752
1753 /* The instructions vector may become empty temporarily if this has
1754 been the only instruction in this function segment.
1755 This violates the invariant but will be remedied shortly by
1756 btrace_compute_ftrace when we add the new trace. */
1757
1758 /* The only case where this would hurt is if the entire trace consisted
1759 of just that one instruction. If we remove it, we might turn the now
1760 empty btrace function segment into a gap. But we don't want gaps at
1761 the beginning. To avoid this, we remove the entire old trace. */
1762 if (last_bfun->number == 1 && last_bfun->insn.empty ())
1763 btrace_clear (tp);
1764
1765 return 0;
1766 }
1767
1768 /* Adjust the block trace in order to stitch old and new trace together.
1769 BTRACE is the new delta trace between the last and the current stop.
1770 TP is the traced thread.
1771 May modifx BTRACE as well as the existing trace in TP.
1772 Return 0 on success, -1 otherwise. */
1773
1774 static int
1775 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1776 {
1777 /* If we don't have trace, there's nothing to do. */
1778 if (btrace->empty ())
1779 return 0;
1780
1781 switch (btrace->format)
1782 {
1783 case BTRACE_FORMAT_NONE:
1784 return 0;
1785
1786 case BTRACE_FORMAT_BTS:
1787 return btrace_stitch_bts (&btrace->variant.bts, tp);
1788
1789 case BTRACE_FORMAT_PT:
1790 /* Delta reads are not supported. */
1791 return -1;
1792 }
1793
1794 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1795 }
1796
1797 /* Clear the branch trace histories in BTINFO. */
1798
1799 static void
1800 btrace_clear_history (struct btrace_thread_info *btinfo)
1801 {
1802 xfree (btinfo->insn_history);
1803 xfree (btinfo->call_history);
1804 xfree (btinfo->replay);
1805
1806 btinfo->insn_history = NULL;
1807 btinfo->call_history = NULL;
1808 btinfo->replay = NULL;
1809 }
1810
1811 /* Clear the branch trace maintenance histories in BTINFO. */
1812
1813 static void
1814 btrace_maint_clear (struct btrace_thread_info *btinfo)
1815 {
1816 switch (btinfo->data.format)
1817 {
1818 default:
1819 break;
1820
1821 case BTRACE_FORMAT_BTS:
1822 btinfo->maint.variant.bts.packet_history.begin = 0;
1823 btinfo->maint.variant.bts.packet_history.end = 0;
1824 break;
1825
1826 #if defined (HAVE_LIBIPT)
1827 case BTRACE_FORMAT_PT:
1828 delete btinfo->maint.variant.pt.packets;
1829
1830 btinfo->maint.variant.pt.packets = NULL;
1831 btinfo->maint.variant.pt.packet_history.begin = 0;
1832 btinfo->maint.variant.pt.packet_history.end = 0;
1833 break;
1834 #endif /* defined (HAVE_LIBIPT) */
1835 }
1836 }
1837
1838 /* See btrace.h. */
1839
1840 const char *
1841 btrace_decode_error (enum btrace_format format, int errcode)
1842 {
1843 switch (format)
1844 {
1845 case BTRACE_FORMAT_BTS:
1846 switch (errcode)
1847 {
1848 case BDE_BTS_OVERFLOW:
1849 return _("instruction overflow");
1850
1851 case BDE_BTS_INSN_SIZE:
1852 return _("unknown instruction");
1853
1854 default:
1855 break;
1856 }
1857 break;
1858
1859 #if defined (HAVE_LIBIPT)
1860 case BTRACE_FORMAT_PT:
1861 switch (errcode)
1862 {
1863 case BDE_PT_USER_QUIT:
1864 return _("trace decode cancelled");
1865
1866 case BDE_PT_DISABLED:
1867 return _("disabled");
1868
1869 case BDE_PT_OVERFLOW:
1870 return _("overflow");
1871
1872 default:
1873 if (errcode < 0)
1874 return pt_errstr (pt_errcode (errcode));
1875 break;
1876 }
1877 break;
1878 #endif /* defined (HAVE_LIBIPT) */
1879
1880 default:
1881 break;
1882 }
1883
1884 return _("unknown");
1885 }
1886
1887 /* See btrace.h. */
1888
1889 void
1890 btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
1891 {
1892 struct btrace_thread_info *btinfo;
1893 struct btrace_target_info *tinfo;
1894 struct btrace_data btrace;
1895 int errcode;
1896
1897 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1898 target_pid_to_str (tp->ptid).c_str ());
1899
1900 btinfo = &tp->btrace;
1901 tinfo = btinfo->target;
1902 if (tinfo == NULL)
1903 return;
1904
1905 /* There's no way we could get new trace while replaying.
1906 On the other hand, delta trace would return a partial record with the
1907 current PC, which is the replay PC, not the last PC, as expected. */
1908 if (btinfo->replay != NULL)
1909 return;
1910
1911 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1912 can store a gdb.Record object in Python referring to a different thread
1913 than the current one, temporarily set INFERIOR_PTID. */
1914 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
1915 inferior_ptid = tp->ptid;
1916
1917 /* We should not be called on running or exited threads. */
1918 gdb_assert (can_access_registers_thread (tp));
1919
1920 /* Let's first try to extend the trace we already have. */
1921 if (!btinfo->functions.empty ())
1922 {
1923 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1924 if (errcode == 0)
1925 {
1926 /* Success. Let's try to stitch the traces together. */
1927 errcode = btrace_stitch_trace (&btrace, tp);
1928 }
1929 else
1930 {
1931 /* We failed to read delta trace. Let's try to read new trace. */
1932 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1933
1934 /* If we got any new trace, discard what we have. */
1935 if (errcode == 0 && !btrace.empty ())
1936 btrace_clear (tp);
1937 }
1938
1939 /* If we were not able to read the trace, we start over. */
1940 if (errcode != 0)
1941 {
1942 btrace_clear (tp);
1943 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1944 }
1945 }
1946 else
1947 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1948
1949 /* If we were not able to read the branch trace, signal an error. */
1950 if (errcode != 0)
1951 error (_("Failed to read branch trace."));
1952
1953 /* Compute the trace, provided we have any. */
1954 if (!btrace.empty ())
1955 {
1956 /* Store the raw trace data. The stored data will be cleared in
1957 btrace_clear, so we always append the new trace. */
1958 btrace_data_append (&btinfo->data, &btrace);
1959 btrace_maint_clear (btinfo);
1960
1961 btrace_clear_history (btinfo);
1962 btrace_compute_ftrace (tp, &btrace, cpu);
1963 }
1964 }
1965
1966 /* See btrace.h. */
1967
1968 void
1969 btrace_clear (struct thread_info *tp)
1970 {
1971 struct btrace_thread_info *btinfo;
1972
1973 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1974 target_pid_to_str (tp->ptid).c_str ());
1975
1976 /* Make sure btrace frames that may hold a pointer into the branch
1977 trace data are destroyed. */
1978 reinit_frame_cache ();
1979
1980 btinfo = &tp->btrace;
1981
1982 btinfo->functions.clear ();
1983 btinfo->ngaps = 0;
1984
1985 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1986 btrace_maint_clear (btinfo);
1987 btinfo->data.clear ();
1988 btrace_clear_history (btinfo);
1989 }
1990
1991 /* See btrace.h. */
1992
1993 void
1994 btrace_free_objfile (struct objfile *objfile)
1995 {
1996 DEBUG ("free objfile");
1997
1998 for (thread_info *tp : all_non_exited_threads ())
1999 btrace_clear (tp);
2000 }
2001
2002 #if defined (HAVE_LIBEXPAT)
2003
2004 /* Check the btrace document version. */
2005
2006 static void
2007 check_xml_btrace_version (struct gdb_xml_parser *parser,
2008 const struct gdb_xml_element *element,
2009 void *user_data,
2010 std::vector<gdb_xml_value> &attributes)
2011 {
2012 const char *version
2013 = (const char *) xml_find_attribute (attributes, "version")->value.get ();
2014
2015 if (strcmp (version, "1.0") != 0)
2016 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
2017 }
2018
2019 /* Parse a btrace "block" xml record. */
2020
2021 static void
2022 parse_xml_btrace_block (struct gdb_xml_parser *parser,
2023 const struct gdb_xml_element *element,
2024 void *user_data,
2025 std::vector<gdb_xml_value> &attributes)
2026 {
2027 struct btrace_data *btrace;
2028 ULONGEST *begin, *end;
2029
2030 btrace = (struct btrace_data *) user_data;
2031
2032 switch (btrace->format)
2033 {
2034 case BTRACE_FORMAT_BTS:
2035 break;
2036
2037 case BTRACE_FORMAT_NONE:
2038 btrace->format = BTRACE_FORMAT_BTS;
2039 btrace->variant.bts.blocks = new std::vector<btrace_block>;
2040 break;
2041
2042 default:
2043 gdb_xml_error (parser, _("Btrace format error."));
2044 }
2045
2046 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get ();
2047 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get ();
2048 btrace->variant.bts.blocks->emplace_back (*begin, *end);
2049 }
2050
2051 /* Parse a "raw" xml record. */
2052
2053 static void
2054 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
2055 gdb_byte **pdata, size_t *psize)
2056 {
2057 gdb_byte *bin;
2058 size_t len, size;
2059
2060 len = strlen (body_text);
2061 if (len % 2 != 0)
2062 gdb_xml_error (parser, _("Bad raw data size."));
2063
2064 size = len / 2;
2065
2066 gdb::unique_xmalloc_ptr<gdb_byte> data ((gdb_byte *) xmalloc (size));
2067 bin = data.get ();
2068
2069 /* We use hex encoding - see gdbsupport/rsp-low.h. */
2070 while (len > 0)
2071 {
2072 char hi, lo;
2073
2074 hi = *body_text++;
2075 lo = *body_text++;
2076
2077 if (hi == 0 || lo == 0)
2078 gdb_xml_error (parser, _("Bad hex encoding."));
2079
2080 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2081 len -= 2;
2082 }
2083
2084 *pdata = data.release ();
2085 *psize = size;
2086 }
2087
2088 /* Parse a btrace pt-config "cpu" xml record. */
2089
2090 static void
2091 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2092 const struct gdb_xml_element *element,
2093 void *user_data,
2094 std::vector<gdb_xml_value> &attributes)
2095 {
2096 struct btrace_data *btrace;
2097 const char *vendor;
2098 ULONGEST *family, *model, *stepping;
2099
2100 vendor =
2101 (const char *) xml_find_attribute (attributes, "vendor")->value.get ();
2102 family
2103 = (ULONGEST *) xml_find_attribute (attributes, "family")->value.get ();
2104 model
2105 = (ULONGEST *) xml_find_attribute (attributes, "model")->value.get ();
2106 stepping
2107 = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get ();
2108
2109 btrace = (struct btrace_data *) user_data;
2110
2111 if (strcmp (vendor, "GenuineIntel") == 0)
2112 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2113
2114 btrace->variant.pt.config.cpu.family = *family;
2115 btrace->variant.pt.config.cpu.model = *model;
2116 btrace->variant.pt.config.cpu.stepping = *stepping;
2117 }
2118
2119 /* Parse a btrace pt "raw" xml record. */
2120
2121 static void
2122 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2123 const struct gdb_xml_element *element,
2124 void *user_data, const char *body_text)
2125 {
2126 struct btrace_data *btrace;
2127
2128 btrace = (struct btrace_data *) user_data;
2129 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2130 &btrace->variant.pt.size);
2131 }
2132
2133 /* Parse a btrace "pt" xml record. */
2134
2135 static void
2136 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2137 const struct gdb_xml_element *element,
2138 void *user_data,
2139 std::vector<gdb_xml_value> &attributes)
2140 {
2141 struct btrace_data *btrace;
2142
2143 btrace = (struct btrace_data *) user_data;
2144 btrace->format = BTRACE_FORMAT_PT;
2145 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2146 btrace->variant.pt.data = NULL;
2147 btrace->variant.pt.size = 0;
2148 }
2149
2150 static const struct gdb_xml_attribute block_attributes[] = {
2151 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2152 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2153 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2154 };
2155
2156 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2157 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2158 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2159 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2160 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2161 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2162 };
2163
2164 static const struct gdb_xml_element btrace_pt_config_children[] = {
2165 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2166 parse_xml_btrace_pt_config_cpu, NULL },
2167 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2168 };
2169
2170 static const struct gdb_xml_element btrace_pt_children[] = {
2171 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2172 NULL },
2173 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2174 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2175 };
2176
2177 static const struct gdb_xml_attribute btrace_attributes[] = {
2178 { "version", GDB_XML_AF_NONE, NULL, NULL },
2179 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2180 };
2181
2182 static const struct gdb_xml_element btrace_children[] = {
2183 { "block", block_attributes, NULL,
2184 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2185 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2186 NULL },
2187 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2188 };
2189
2190 static const struct gdb_xml_element btrace_elements[] = {
2191 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2192 check_xml_btrace_version, NULL },
2193 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2194 };
2195
2196 #endif /* defined (HAVE_LIBEXPAT) */
2197
2198 /* See btrace.h. */
2199
2200 void
2201 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2202 {
2203 #if defined (HAVE_LIBEXPAT)
2204
2205 int errcode;
2206 btrace_data result;
2207 result.format = BTRACE_FORMAT_NONE;
2208
2209 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2210 buffer, &result);
2211 if (errcode != 0)
2212 error (_("Error parsing branch trace."));
2213
2214 /* Keep parse results. */
2215 *btrace = std::move (result);
2216
2217 #else /* !defined (HAVE_LIBEXPAT) */
2218
2219 error (_("Cannot process branch trace. XML support was disabled at "
2220 "compile time."));
2221
2222 #endif /* !defined (HAVE_LIBEXPAT) */
2223 }
2224
2225 #if defined (HAVE_LIBEXPAT)
2226
2227 /* Parse a btrace-conf "bts" xml record. */
2228
2229 static void
2230 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2231 const struct gdb_xml_element *element,
2232 void *user_data,
2233 std::vector<gdb_xml_value> &attributes)
2234 {
2235 struct btrace_config *conf;
2236 struct gdb_xml_value *size;
2237
2238 conf = (struct btrace_config *) user_data;
2239 conf->format = BTRACE_FORMAT_BTS;
2240 conf->bts.size = 0;
2241
2242 size = xml_find_attribute (attributes, "size");
2243 if (size != NULL)
2244 conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get ();
2245 }
2246
2247 /* Parse a btrace-conf "pt" xml record. */
2248
2249 static void
2250 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2251 const struct gdb_xml_element *element,
2252 void *user_data,
2253 std::vector<gdb_xml_value> &attributes)
2254 {
2255 struct btrace_config *conf;
2256 struct gdb_xml_value *size;
2257
2258 conf = (struct btrace_config *) user_data;
2259 conf->format = BTRACE_FORMAT_PT;
2260 conf->pt.size = 0;
2261
2262 size = xml_find_attribute (attributes, "size");
2263 if (size != NULL)
2264 conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get ();
2265 }
2266
2267 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2268 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2269 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2270 };
2271
2272 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2273 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2274 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2275 };
2276
2277 static const struct gdb_xml_element btrace_conf_children[] = {
2278 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2279 parse_xml_btrace_conf_bts, NULL },
2280 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2281 parse_xml_btrace_conf_pt, NULL },
2282 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2283 };
2284
2285 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2286 { "version", GDB_XML_AF_NONE, NULL, NULL },
2287 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2288 };
2289
2290 static const struct gdb_xml_element btrace_conf_elements[] = {
2291 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2292 GDB_XML_EF_NONE, NULL, NULL },
2293 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2294 };
2295
2296 #endif /* defined (HAVE_LIBEXPAT) */
2297
2298 /* See btrace.h. */
2299
2300 void
2301 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2302 {
2303 #if defined (HAVE_LIBEXPAT)
2304
2305 int errcode;
2306 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2307 btrace_conf_elements, xml, conf);
2308 if (errcode != 0)
2309 error (_("Error parsing branch trace configuration."));
2310
2311 #else /* !defined (HAVE_LIBEXPAT) */
2312
2313 error (_("Cannot process the branch trace configuration. XML support "
2314 "was disabled at compile time."));
2315
2316 #endif /* !defined (HAVE_LIBEXPAT) */
2317 }
2318
2319 /* See btrace.h. */
2320
2321 const struct btrace_insn *
2322 btrace_insn_get (const struct btrace_insn_iterator *it)
2323 {
2324 const struct btrace_function *bfun;
2325 unsigned int index, end;
2326
2327 index = it->insn_index;
2328 bfun = &it->btinfo->functions[it->call_index];
2329
2330 /* Check if the iterator points to a gap in the trace. */
2331 if (bfun->errcode != 0)
2332 return NULL;
2333
2334 /* The index is within the bounds of this function's instruction vector. */
2335 end = bfun->insn.size ();
2336 gdb_assert (0 < end);
2337 gdb_assert (index < end);
2338
2339 return &bfun->insn[index];
2340 }
2341
2342 /* See btrace.h. */
2343
2344 int
2345 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2346 {
2347 return it->btinfo->functions[it->call_index].errcode;
2348 }
2349
2350 /* See btrace.h. */
2351
2352 unsigned int
2353 btrace_insn_number (const struct btrace_insn_iterator *it)
2354 {
2355 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2356 }
2357
2358 /* See btrace.h. */
2359
2360 void
2361 btrace_insn_begin (struct btrace_insn_iterator *it,
2362 const struct btrace_thread_info *btinfo)
2363 {
2364 if (btinfo->functions.empty ())
2365 error (_("No trace."));
2366
2367 it->btinfo = btinfo;
2368 it->call_index = 0;
2369 it->insn_index = 0;
2370 }
2371
2372 /* See btrace.h. */
2373
2374 void
2375 btrace_insn_end (struct btrace_insn_iterator *it,
2376 const struct btrace_thread_info *btinfo)
2377 {
2378 const struct btrace_function *bfun;
2379 unsigned int length;
2380
2381 if (btinfo->functions.empty ())
2382 error (_("No trace."));
2383
2384 bfun = &btinfo->functions.back ();
2385 length = bfun->insn.size ();
2386
2387 /* The last function may either be a gap or it contains the current
2388 instruction, which is one past the end of the execution trace; ignore
2389 it. */
2390 if (length > 0)
2391 length -= 1;
2392
2393 it->btinfo = btinfo;
2394 it->call_index = bfun->number - 1;
2395 it->insn_index = length;
2396 }
2397
2398 /* See btrace.h. */
2399
2400 unsigned int
2401 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2402 {
2403 const struct btrace_function *bfun;
2404 unsigned int index, steps;
2405
2406 bfun = &it->btinfo->functions[it->call_index];
2407 steps = 0;
2408 index = it->insn_index;
2409
2410 while (stride != 0)
2411 {
2412 unsigned int end, space, adv;
2413
2414 end = bfun->insn.size ();
2415
2416 /* An empty function segment represents a gap in the trace. We count
2417 it as one instruction. */
2418 if (end == 0)
2419 {
2420 const struct btrace_function *next;
2421
2422 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2423 if (next == NULL)
2424 break;
2425
2426 stride -= 1;
2427 steps += 1;
2428
2429 bfun = next;
2430 index = 0;
2431
2432 continue;
2433 }
2434
2435 gdb_assert (0 < end);
2436 gdb_assert (index < end);
2437
2438 /* Compute the number of instructions remaining in this segment. */
2439 space = end - index;
2440
2441 /* Advance the iterator as far as possible within this segment. */
2442 adv = std::min (space, stride);
2443 stride -= adv;
2444 index += adv;
2445 steps += adv;
2446
2447 /* Move to the next function if we're at the end of this one. */
2448 if (index == end)
2449 {
2450 const struct btrace_function *next;
2451
2452 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2453 if (next == NULL)
2454 {
2455 /* We stepped past the last function.
2456
2457 Let's adjust the index to point to the last instruction in
2458 the previous function. */
2459 index -= 1;
2460 steps -= 1;
2461 break;
2462 }
2463
2464 /* We now point to the first instruction in the new function. */
2465 bfun = next;
2466 index = 0;
2467 }
2468
2469 /* We did make progress. */
2470 gdb_assert (adv > 0);
2471 }
2472
2473 /* Update the iterator. */
2474 it->call_index = bfun->number - 1;
2475 it->insn_index = index;
2476
2477 return steps;
2478 }
2479
2480 /* See btrace.h. */
2481
2482 unsigned int
2483 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2484 {
2485 const struct btrace_function *bfun;
2486 unsigned int index, steps;
2487
2488 bfun = &it->btinfo->functions[it->call_index];
2489 steps = 0;
2490 index = it->insn_index;
2491
2492 while (stride != 0)
2493 {
2494 unsigned int adv;
2495
2496 /* Move to the previous function if we're at the start of this one. */
2497 if (index == 0)
2498 {
2499 const struct btrace_function *prev;
2500
2501 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2502 if (prev == NULL)
2503 break;
2504
2505 /* We point to one after the last instruction in the new function. */
2506 bfun = prev;
2507 index = bfun->insn.size ();
2508
2509 /* An empty function segment represents a gap in the trace. We count
2510 it as one instruction. */
2511 if (index == 0)
2512 {
2513 stride -= 1;
2514 steps += 1;
2515
2516 continue;
2517 }
2518 }
2519
2520 /* Advance the iterator as far as possible within this segment. */
2521 adv = std::min (index, stride);
2522
2523 stride -= adv;
2524 index -= adv;
2525 steps += adv;
2526
2527 /* We did make progress. */
2528 gdb_assert (adv > 0);
2529 }
2530
2531 /* Update the iterator. */
2532 it->call_index = bfun->number - 1;
2533 it->insn_index = index;
2534
2535 return steps;
2536 }
2537
2538 /* See btrace.h. */
2539
2540 int
2541 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2542 const struct btrace_insn_iterator *rhs)
2543 {
2544 gdb_assert (lhs->btinfo == rhs->btinfo);
2545
2546 if (lhs->call_index != rhs->call_index)
2547 return lhs->call_index - rhs->call_index;
2548
2549 return lhs->insn_index - rhs->insn_index;
2550 }
2551
2552 /* See btrace.h. */
2553
2554 int
2555 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2556 const struct btrace_thread_info *btinfo,
2557 unsigned int number)
2558 {
2559 const struct btrace_function *bfun;
2560 unsigned int upper, lower;
2561
2562 if (btinfo->functions.empty ())
2563 return 0;
2564
2565 lower = 0;
2566 bfun = &btinfo->functions[lower];
2567 if (number < bfun->insn_offset)
2568 return 0;
2569
2570 upper = btinfo->functions.size () - 1;
2571 bfun = &btinfo->functions[upper];
2572 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2573 return 0;
2574
2575 /* We assume that there are no holes in the numbering. */
2576 for (;;)
2577 {
2578 const unsigned int average = lower + (upper - lower) / 2;
2579
2580 bfun = &btinfo->functions[average];
2581
2582 if (number < bfun->insn_offset)
2583 {
2584 upper = average - 1;
2585 continue;
2586 }
2587
2588 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2589 {
2590 lower = average + 1;
2591 continue;
2592 }
2593
2594 break;
2595 }
2596
2597 it->btinfo = btinfo;
2598 it->call_index = bfun->number - 1;
2599 it->insn_index = number - bfun->insn_offset;
2600 return 1;
2601 }
2602
2603 /* Returns true if the recording ends with a function segment that
2604 contains only a single (i.e. the current) instruction. */
2605
2606 static bool
2607 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2608 {
2609 const btrace_function *bfun;
2610
2611 if (btinfo->functions.empty ())
2612 return false;
2613
2614 bfun = &btinfo->functions.back ();
2615 if (bfun->errcode != 0)
2616 return false;
2617
2618 return ftrace_call_num_insn (bfun) == 1;
2619 }
2620
2621 /* See btrace.h. */
2622
2623 const struct btrace_function *
2624 btrace_call_get (const struct btrace_call_iterator *it)
2625 {
2626 if (it->index >= it->btinfo->functions.size ())
2627 return NULL;
2628
2629 return &it->btinfo->functions[it->index];
2630 }
2631
2632 /* See btrace.h. */
2633
2634 unsigned int
2635 btrace_call_number (const struct btrace_call_iterator *it)
2636 {
2637 const unsigned int length = it->btinfo->functions.size ();
2638
2639 /* If the last function segment contains only a single instruction (i.e. the
2640 current instruction), skip it. */
2641 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2642 return length;
2643
2644 return it->index + 1;
2645 }
2646
2647 /* See btrace.h. */
2648
2649 void
2650 btrace_call_begin (struct btrace_call_iterator *it,
2651 const struct btrace_thread_info *btinfo)
2652 {
2653 if (btinfo->functions.empty ())
2654 error (_("No trace."));
2655
2656 it->btinfo = btinfo;
2657 it->index = 0;
2658 }
2659
2660 /* See btrace.h. */
2661
2662 void
2663 btrace_call_end (struct btrace_call_iterator *it,
2664 const struct btrace_thread_info *btinfo)
2665 {
2666 if (btinfo->functions.empty ())
2667 error (_("No trace."));
2668
2669 it->btinfo = btinfo;
2670 it->index = btinfo->functions.size ();
2671 }
2672
2673 /* See btrace.h. */
2674
2675 unsigned int
2676 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2677 {
2678 const unsigned int length = it->btinfo->functions.size ();
2679
2680 if (it->index + stride < length - 1)
2681 /* Default case: Simply advance the iterator. */
2682 it->index += stride;
2683 else if (it->index + stride == length - 1)
2684 {
2685 /* We land exactly at the last function segment. If it contains only one
2686 instruction (i.e. the current instruction) it is not actually part of
2687 the trace. */
2688 if (btrace_ends_with_single_insn (it->btinfo))
2689 it->index = length;
2690 else
2691 it->index = length - 1;
2692 }
2693 else
2694 {
2695 /* We land past the last function segment and have to adjust the stride.
2696 If the last function segment contains only one instruction (i.e. the
2697 current instruction) it is not actually part of the trace. */
2698 if (btrace_ends_with_single_insn (it->btinfo))
2699 stride = length - it->index - 1;
2700 else
2701 stride = length - it->index;
2702
2703 it->index = length;
2704 }
2705
2706 return stride;
2707 }
2708
2709 /* See btrace.h. */
2710
2711 unsigned int
2712 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2713 {
2714 const unsigned int length = it->btinfo->functions.size ();
2715 int steps = 0;
2716
2717 gdb_assert (it->index <= length);
2718
2719 if (stride == 0 || it->index == 0)
2720 return 0;
2721
2722 /* If we are at the end, the first step is a special case. If the last
2723 function segment contains only one instruction (i.e. the current
2724 instruction) it is not actually part of the trace. To be able to step
2725 over this instruction, we need at least one more function segment. */
2726 if ((it->index == length) && (length > 1))
2727 {
2728 if (btrace_ends_with_single_insn (it->btinfo))
2729 it->index = length - 2;
2730 else
2731 it->index = length - 1;
2732
2733 steps = 1;
2734 stride -= 1;
2735 }
2736
2737 stride = std::min (stride, it->index);
2738
2739 it->index -= stride;
2740 return steps + stride;
2741 }
2742
2743 /* See btrace.h. */
2744
2745 int
2746 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2747 const struct btrace_call_iterator *rhs)
2748 {
2749 gdb_assert (lhs->btinfo == rhs->btinfo);
2750 return (int) (lhs->index - rhs->index);
2751 }
2752
2753 /* See btrace.h. */
2754
2755 int
2756 btrace_find_call_by_number (struct btrace_call_iterator *it,
2757 const struct btrace_thread_info *btinfo,
2758 unsigned int number)
2759 {
2760 const unsigned int length = btinfo->functions.size ();
2761
2762 if ((number == 0) || (number > length))
2763 return 0;
2764
2765 it->btinfo = btinfo;
2766 it->index = number - 1;
2767 return 1;
2768 }
2769
2770 /* See btrace.h. */
2771
2772 void
2773 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2774 const struct btrace_insn_iterator *begin,
2775 const struct btrace_insn_iterator *end)
2776 {
2777 if (btinfo->insn_history == NULL)
2778 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2779
2780 btinfo->insn_history->begin = *begin;
2781 btinfo->insn_history->end = *end;
2782 }
2783
2784 /* See btrace.h. */
2785
2786 void
2787 btrace_set_call_history (struct btrace_thread_info *btinfo,
2788 const struct btrace_call_iterator *begin,
2789 const struct btrace_call_iterator *end)
2790 {
2791 gdb_assert (begin->btinfo == end->btinfo);
2792
2793 if (btinfo->call_history == NULL)
2794 btinfo->call_history = XCNEW (struct btrace_call_history);
2795
2796 btinfo->call_history->begin = *begin;
2797 btinfo->call_history->end = *end;
2798 }
2799
2800 /* See btrace.h. */
2801
2802 int
2803 btrace_is_replaying (struct thread_info *tp)
2804 {
2805 return tp->btrace.replay != NULL;
2806 }
2807
2808 /* See btrace.h. */
2809
2810 int
2811 btrace_is_empty (struct thread_info *tp)
2812 {
2813 struct btrace_insn_iterator begin, end;
2814 struct btrace_thread_info *btinfo;
2815
2816 btinfo = &tp->btrace;
2817
2818 if (btinfo->functions.empty ())
2819 return 1;
2820
2821 btrace_insn_begin (&begin, btinfo);
2822 btrace_insn_end (&end, btinfo);
2823
2824 return btrace_insn_cmp (&begin, &end) == 0;
2825 }
2826
2827 #if defined (HAVE_LIBIPT)
2828
2829 /* Print a single packet. */
2830
2831 static void
2832 pt_print_packet (const struct pt_packet *packet)
2833 {
2834 switch (packet->type)
2835 {
2836 default:
2837 printf_unfiltered (("[??: %x]"), packet->type);
2838 break;
2839
2840 case ppt_psb:
2841 printf_unfiltered (("psb"));
2842 break;
2843
2844 case ppt_psbend:
2845 printf_unfiltered (("psbend"));
2846 break;
2847
2848 case ppt_pad:
2849 printf_unfiltered (("pad"));
2850 break;
2851
2852 case ppt_tip:
2853 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2854 packet->payload.ip.ipc,
2855 packet->payload.ip.ip);
2856 break;
2857
2858 case ppt_tip_pge:
2859 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2860 packet->payload.ip.ipc,
2861 packet->payload.ip.ip);
2862 break;
2863
2864 case ppt_tip_pgd:
2865 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2866 packet->payload.ip.ipc,
2867 packet->payload.ip.ip);
2868 break;
2869
2870 case ppt_fup:
2871 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2872 packet->payload.ip.ipc,
2873 packet->payload.ip.ip);
2874 break;
2875
2876 case ppt_tnt_8:
2877 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2878 packet->payload.tnt.bit_size,
2879 packet->payload.tnt.payload);
2880 break;
2881
2882 case ppt_tnt_64:
2883 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2884 packet->payload.tnt.bit_size,
2885 packet->payload.tnt.payload);
2886 break;
2887
2888 case ppt_pip:
2889 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2890 packet->payload.pip.nr ? (" nr") : (""));
2891 break;
2892
2893 case ppt_tsc:
2894 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2895 break;
2896
2897 case ppt_cbr:
2898 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2899 break;
2900
2901 case ppt_mode:
2902 switch (packet->payload.mode.leaf)
2903 {
2904 default:
2905 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2906 break;
2907
2908 case pt_mol_exec:
2909 printf_unfiltered (("mode.exec%s%s"),
2910 packet->payload.mode.bits.exec.csl
2911 ? (" cs.l") : (""),
2912 packet->payload.mode.bits.exec.csd
2913 ? (" cs.d") : (""));
2914 break;
2915
2916 case pt_mol_tsx:
2917 printf_unfiltered (("mode.tsx%s%s"),
2918 packet->payload.mode.bits.tsx.intx
2919 ? (" intx") : (""),
2920 packet->payload.mode.bits.tsx.abrt
2921 ? (" abrt") : (""));
2922 break;
2923 }
2924 break;
2925
2926 case ppt_ovf:
2927 printf_unfiltered (("ovf"));
2928 break;
2929
2930 case ppt_stop:
2931 printf_unfiltered (("stop"));
2932 break;
2933
2934 case ppt_vmcs:
2935 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2936 break;
2937
2938 case ppt_tma:
2939 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2940 packet->payload.tma.fc);
2941 break;
2942
2943 case ppt_mtc:
2944 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2945 break;
2946
2947 case ppt_cyc:
2948 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2949 break;
2950
2951 case ppt_mnt:
2952 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2953 break;
2954 }
2955 }
2956
2957 /* Decode packets into MAINT using DECODER. */
2958
2959 static void
2960 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2961 struct pt_packet_decoder *decoder)
2962 {
2963 int errcode;
2964
2965 if (maint->variant.pt.packets == NULL)
2966 maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
2967
2968 for (;;)
2969 {
2970 struct btrace_pt_packet packet;
2971
2972 errcode = pt_pkt_sync_forward (decoder);
2973 if (errcode < 0)
2974 break;
2975
2976 for (;;)
2977 {
2978 pt_pkt_get_offset (decoder, &packet.offset);
2979
2980 errcode = pt_pkt_next (decoder, &packet.packet,
2981 sizeof(packet.packet));
2982 if (errcode < 0)
2983 break;
2984
2985 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2986 {
2987 packet.errcode = pt_errcode (errcode);
2988 maint->variant.pt.packets->push_back (packet);
2989 }
2990 }
2991
2992 if (errcode == -pte_eos)
2993 break;
2994
2995 packet.errcode = pt_errcode (errcode);
2996 maint->variant.pt.packets->push_back (packet);
2997
2998 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2999 packet.offset, pt_errstr (packet.errcode));
3000 }
3001
3002 if (errcode != -pte_eos)
3003 warning (_("Failed to synchronize onto the Intel Processor Trace "
3004 "stream: %s."), pt_errstr (pt_errcode (errcode)));
3005 }
3006
3007 /* Update the packet history in BTINFO. */
3008
3009 static void
3010 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3011 {
3012 struct pt_packet_decoder *decoder;
3013 const struct btrace_cpu *cpu;
3014 struct btrace_data_pt *pt;
3015 struct pt_config config;
3016 int errcode;
3017
3018 pt = &btinfo->data.variant.pt;
3019
3020 /* Nothing to do if there is no trace. */
3021 if (pt->size == 0)
3022 return;
3023
3024 memset (&config, 0, sizeof(config));
3025
3026 config.size = sizeof (config);
3027 config.begin = pt->data;
3028 config.end = pt->data + pt->size;
3029
3030 cpu = record_btrace_get_cpu ();
3031 if (cpu == nullptr)
3032 cpu = &pt->config.cpu;
3033
3034 /* We treat an unknown vendor as 'no errata'. */
3035 if (cpu->vendor != CV_UNKNOWN)
3036 {
3037 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
3038 config.cpu.family = cpu->family;
3039 config.cpu.model = cpu->model;
3040 config.cpu.stepping = cpu->stepping;
3041
3042 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3043 if (errcode < 0)
3044 error (_("Failed to configure the Intel Processor Trace "
3045 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
3046 }
3047
3048 decoder = pt_pkt_alloc_decoder (&config);
3049 if (decoder == NULL)
3050 error (_("Failed to allocate the Intel Processor Trace decoder."));
3051
3052 try
3053 {
3054 btrace_maint_decode_pt (&btinfo->maint, decoder);
3055 }
3056 catch (const gdb_exception &except)
3057 {
3058 pt_pkt_free_decoder (decoder);
3059
3060 if (except.reason < 0)
3061 throw;
3062 }
3063
3064 pt_pkt_free_decoder (decoder);
3065 }
3066
3067 #endif /* !defined (HAVE_LIBIPT) */
3068
3069 /* Update the packet maintenance information for BTINFO and store the
3070 low and high bounds into BEGIN and END, respectively.
3071 Store the current iterator state into FROM and TO. */
3072
3073 static void
3074 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3075 unsigned int *begin, unsigned int *end,
3076 unsigned int *from, unsigned int *to)
3077 {
3078 switch (btinfo->data.format)
3079 {
3080 default:
3081 *begin = 0;
3082 *end = 0;
3083 *from = 0;
3084 *to = 0;
3085 break;
3086
3087 case BTRACE_FORMAT_BTS:
3088 /* Nothing to do - we operate directly on BTINFO->DATA. */
3089 *begin = 0;
3090 *end = btinfo->data.variant.bts.blocks->size ();
3091 *from = btinfo->maint.variant.bts.packet_history.begin;
3092 *to = btinfo->maint.variant.bts.packet_history.end;
3093 break;
3094
3095 #if defined (HAVE_LIBIPT)
3096 case BTRACE_FORMAT_PT:
3097 if (btinfo->maint.variant.pt.packets == nullptr)
3098 btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
3099
3100 if (btinfo->maint.variant.pt.packets->empty ())
3101 btrace_maint_update_pt_packets (btinfo);
3102
3103 *begin = 0;
3104 *end = btinfo->maint.variant.pt.packets->size ();
3105 *from = btinfo->maint.variant.pt.packet_history.begin;
3106 *to = btinfo->maint.variant.pt.packet_history.end;
3107 break;
3108 #endif /* defined (HAVE_LIBIPT) */
3109 }
3110 }
3111
3112 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3113 update the current iterator position. */
3114
3115 static void
3116 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3117 unsigned int begin, unsigned int end)
3118 {
3119 switch (btinfo->data.format)
3120 {
3121 default:
3122 break;
3123
3124 case BTRACE_FORMAT_BTS:
3125 {
3126 const std::vector<btrace_block> &blocks
3127 = *btinfo->data.variant.bts.blocks;
3128 unsigned int blk;
3129
3130 for (blk = begin; blk < end; ++blk)
3131 {
3132 const btrace_block &block = blocks.at (blk);
3133
3134 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3135 core_addr_to_string_nz (block.begin),
3136 core_addr_to_string_nz (block.end));
3137 }
3138
3139 btinfo->maint.variant.bts.packet_history.begin = begin;
3140 btinfo->maint.variant.bts.packet_history.end = end;
3141 }
3142 break;
3143
3144 #if defined (HAVE_LIBIPT)
3145 case BTRACE_FORMAT_PT:
3146 {
3147 const std::vector<btrace_pt_packet> &packets
3148 = *btinfo->maint.variant.pt.packets;
3149 unsigned int pkt;
3150
3151 for (pkt = begin; pkt < end; ++pkt)
3152 {
3153 const struct btrace_pt_packet &packet = packets.at (pkt);
3154
3155 printf_unfiltered ("%u\t", pkt);
3156 printf_unfiltered ("0x%" PRIx64 "\t", packet.offset);
3157
3158 if (packet.errcode == pte_ok)
3159 pt_print_packet (&packet.packet);
3160 else
3161 printf_unfiltered ("[error: %s]", pt_errstr (packet.errcode));
3162
3163 printf_unfiltered ("\n");
3164 }
3165
3166 btinfo->maint.variant.pt.packet_history.begin = begin;
3167 btinfo->maint.variant.pt.packet_history.end = end;
3168 }
3169 break;
3170 #endif /* defined (HAVE_LIBIPT) */
3171 }
3172 }
3173
3174 /* Read a number from an argument string. */
3175
3176 static unsigned int
3177 get_uint (const char **arg)
3178 {
3179 const char *begin, *pos;
3180 char *end;
3181 unsigned long number;
3182
3183 begin = *arg;
3184 pos = skip_spaces (begin);
3185
3186 if (!isdigit (*pos))
3187 error (_("Expected positive number, got: %s."), pos);
3188
3189 number = strtoul (pos, &end, 10);
3190 if (number > UINT_MAX)
3191 error (_("Number too big."));
3192
3193 *arg += (end - begin);
3194
3195 return (unsigned int) number;
3196 }
3197
3198 /* Read a context size from an argument string. */
3199
3200 static int
3201 get_context_size (const char **arg)
3202 {
3203 const char *pos = skip_spaces (*arg);
3204
3205 if (!isdigit (*pos))
3206 error (_("Expected positive number, got: %s."), pos);
3207
3208 char *end;
3209 long result = strtol (pos, &end, 10);
3210 *arg = end;
3211 return result;
3212 }
3213
3214 /* Complain about junk at the end of an argument string. */
3215
3216 static void
3217 no_chunk (const char *arg)
3218 {
3219 if (*arg != 0)
3220 error (_("Junk after argument: %s."), arg);
3221 }
3222
3223 /* The "maintenance btrace packet-history" command. */
3224
3225 static void
3226 maint_btrace_packet_history_cmd (const char *arg, int from_tty)
3227 {
3228 struct btrace_thread_info *btinfo;
3229 unsigned int size, begin, end, from, to;
3230
3231 thread_info *tp = find_thread_ptid (inferior_ptid);
3232 if (tp == NULL)
3233 error (_("No thread."));
3234
3235 size = 10;
3236 btinfo = &tp->btrace;
3237
3238 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3239 if (begin == end)
3240 {
3241 printf_unfiltered (_("No trace.\n"));
3242 return;
3243 }
3244
3245 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3246 {
3247 from = to;
3248
3249 if (end - from < size)
3250 size = end - from;
3251 to = from + size;
3252 }
3253 else if (strcmp (arg, "-") == 0)
3254 {
3255 to = from;
3256
3257 if (to - begin < size)
3258 size = to - begin;
3259 from = to - size;
3260 }
3261 else
3262 {
3263 from = get_uint (&arg);
3264 if (end <= from)
3265 error (_("'%u' is out of range."), from);
3266
3267 arg = skip_spaces (arg);
3268 if (*arg == ',')
3269 {
3270 arg = skip_spaces (++arg);
3271
3272 if (*arg == '+')
3273 {
3274 arg += 1;
3275 size = get_context_size (&arg);
3276
3277 no_chunk (arg);
3278
3279 if (end - from < size)
3280 size = end - from;
3281 to = from + size;
3282 }
3283 else if (*arg == '-')
3284 {
3285 arg += 1;
3286 size = get_context_size (&arg);
3287
3288 no_chunk (arg);
3289
3290 /* Include the packet given as first argument. */
3291 from += 1;
3292 to = from;
3293
3294 if (to - begin < size)
3295 size = to - begin;
3296 from = to - size;
3297 }
3298 else
3299 {
3300 to = get_uint (&arg);
3301
3302 /* Include the packet at the second argument and silently
3303 truncate the range. */
3304 if (to < end)
3305 to += 1;
3306 else
3307 to = end;
3308
3309 no_chunk (arg);
3310 }
3311 }
3312 else
3313 {
3314 no_chunk (arg);
3315
3316 if (end - from < size)
3317 size = end - from;
3318 to = from + size;
3319 }
3320
3321 dont_repeat ();
3322 }
3323
3324 btrace_maint_print_packets (btinfo, from, to);
3325 }
3326
3327 /* The "maintenance btrace clear-packet-history" command. */
3328
3329 static void
3330 maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3331 {
3332 if (args != NULL && *args != 0)
3333 error (_("Invalid argument."));
3334
3335 if (inferior_ptid == null_ptid)
3336 error (_("No thread."));
3337
3338 thread_info *tp = inferior_thread ();
3339 btrace_thread_info *btinfo = &tp->btrace;
3340
3341 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3342 btrace_maint_clear (btinfo);
3343 btinfo->data.clear ();
3344 }
3345
3346 /* The "maintenance btrace clear" command. */
3347
3348 static void
3349 maint_btrace_clear_cmd (const char *args, int from_tty)
3350 {
3351 if (args != NULL && *args != 0)
3352 error (_("Invalid argument."));
3353
3354 if (inferior_ptid == null_ptid)
3355 error (_("No thread."));
3356
3357 thread_info *tp = inferior_thread ();
3358 btrace_clear (tp);
3359 }
3360
3361 /* The "maintenance btrace" command. */
3362
3363 static void
3364 maint_btrace_cmd (const char *args, int from_tty)
3365 {
3366 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3367 gdb_stdout);
3368 }
3369
3370 /* The "maintenance set btrace" command. */
3371
3372 static void
3373 maint_btrace_set_cmd (const char *args, int from_tty)
3374 {
3375 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3376 gdb_stdout);
3377 }
3378
3379 /* The "maintenance show btrace" command. */
3380
3381 static void
3382 maint_btrace_show_cmd (const char *args, int from_tty)
3383 {
3384 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3385 all_commands, gdb_stdout);
3386 }
3387
3388 /* The "maintenance set btrace pt" command. */
3389
3390 static void
3391 maint_btrace_pt_set_cmd (const char *args, int from_tty)
3392 {
3393 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3394 all_commands, gdb_stdout);
3395 }
3396
3397 /* The "maintenance show btrace pt" command. */
3398
3399 static void
3400 maint_btrace_pt_show_cmd (const char *args, int from_tty)
3401 {
3402 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3403 all_commands, gdb_stdout);
3404 }
3405
3406 /* The "maintenance info btrace" command. */
3407
3408 static void
3409 maint_info_btrace_cmd (const char *args, int from_tty)
3410 {
3411 struct btrace_thread_info *btinfo;
3412 const struct btrace_config *conf;
3413
3414 if (args != NULL && *args != 0)
3415 error (_("Invalid argument."));
3416
3417 if (inferior_ptid == null_ptid)
3418 error (_("No thread."));
3419
3420 thread_info *tp = inferior_thread ();
3421
3422 btinfo = &tp->btrace;
3423
3424 conf = btrace_conf (btinfo);
3425 if (conf == NULL)
3426 error (_("No btrace configuration."));
3427
3428 printf_unfiltered (_("Format: %s.\n"),
3429 btrace_format_string (conf->format));
3430
3431 switch (conf->format)
3432 {
3433 default:
3434 break;
3435
3436 case BTRACE_FORMAT_BTS:
3437 printf_unfiltered (_("Number of packets: %zu.\n"),
3438 btinfo->data.variant.bts.blocks->size ());
3439 break;
3440
3441 #if defined (HAVE_LIBIPT)
3442 case BTRACE_FORMAT_PT:
3443 {
3444 struct pt_version version;
3445
3446 version = pt_library_version ();
3447 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3448 version.minor, version.build,
3449 version.ext != NULL ? version.ext : "");
3450
3451 btrace_maint_update_pt_packets (btinfo);
3452 printf_unfiltered (_("Number of packets: %zu.\n"),
3453 ((btinfo->maint.variant.pt.packets == nullptr)
3454 ? 0 : btinfo->maint.variant.pt.packets->size ()));
3455 }
3456 break;
3457 #endif /* defined (HAVE_LIBIPT) */
3458 }
3459 }
3460
3461 /* The "maint show btrace pt skip-pad" show value function. */
3462
3463 static void
3464 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3465 struct cmd_list_element *c,
3466 const char *value)
3467 {
3468 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3469 }
3470
3471
3472 /* Initialize btrace maintenance commands. */
3473
3474 void
3475 _initialize_btrace (void)
3476 {
3477 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3478 _("Info about branch tracing data."), &maintenanceinfolist);
3479
3480 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3481 _("Branch tracing maintenance commands."),
3482 &maint_btrace_cmdlist, "maintenance btrace ",
3483 0, &maintenancelist);
3484
3485 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3486 Set branch tracing specific variables."),
3487 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3488 0, &maintenance_set_cmdlist);
3489
3490 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3491 Set Intel Processor Trace specific variables."),
3492 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3493 0, &maint_btrace_set_cmdlist);
3494
3495 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3496 Show branch tracing specific variables."),
3497 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3498 0, &maintenance_show_cmdlist);
3499
3500 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3501 Show Intel Processor Trace specific variables."),
3502 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3503 0, &maint_btrace_show_cmdlist);
3504
3505 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3506 &maint_btrace_pt_skip_pad, _("\
3507 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3508 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3509 When enabled, PAD packets are ignored in the btrace packet history."),
3510 NULL, show_maint_btrace_pt_skip_pad,
3511 &maint_btrace_pt_set_cmdlist,
3512 &maint_btrace_pt_show_cmdlist);
3513
3514 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3515 _("Print the raw branch tracing data.\n\
3516 With no argument, print ten more packets after the previous ten-line print.\n\
3517 With '-' as argument print ten packets before a previous ten-line print.\n\
3518 One argument specifies the starting packet of a ten-line print.\n\
3519 Two arguments with comma between specify starting and ending packets to \
3520 print.\n\
3521 Preceded with '+'/'-' the second argument specifies the distance from the \
3522 first."),
3523 &maint_btrace_cmdlist);
3524
3525 add_cmd ("clear-packet-history", class_maintenance,
3526 maint_btrace_clear_packet_history_cmd,
3527 _("Clears the branch tracing packet history.\n\
3528 Discards the raw branch tracing data but not the execution history data."),
3529 &maint_btrace_cmdlist);
3530
3531 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3532 _("Clears the branch tracing data.\n\
3533 Discards the raw branch tracing data and the execution history data.\n\
3534 The next 'record' command will fetch the branch tracing data anew."),
3535 &maint_btrace_cmdlist);
3536
3537 }
This page took 0.113477 seconds and 4 git commands to generate.