10224c51654bece4c73e7b8f2ec5fbcba54f8914
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 static void btrace_add_pc (struct thread_info *tp);
53
54 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57 #define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
68 /* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71 static const char *
72 ftrace_print_function_name (const struct btrace_function *bfun)
73 {
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
84 return MSYMBOL_PRINT_NAME (msym);
85
86 return "<unknown>";
87 }
88
89 /* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_filename (const struct btrace_function *bfun)
94 {
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
102 else
103 filename = "<unknown>";
104
105 return filename;
106 }
107
108 /* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
110
111 static const char *
112 ftrace_print_insn_addr (const struct btrace_insn *insn)
113 {
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
118 }
119
120 /* Print an ftrace debug status message. */
121
122 static void
123 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
124 {
125 const char *fun, *file;
126 unsigned int ibegin, iend;
127 int level;
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
138 }
139
140 /* Return the number of instructions in a given function call segment. */
141
142 static unsigned int
143 ftrace_call_num_insn (const struct btrace_function* bfun)
144 {
145 if (bfun == NULL)
146 return 0;
147
148 /* A gap is always counted as one instruction. */
149 if (bfun->errcode != 0)
150 return 1;
151
152 return VEC_length (btrace_insn_s, bfun->insn);
153 }
154
155 /* Return the function segment with the given NUMBER or NULL if no such segment
156 exists. BTINFO is the branch trace information for the current thread. */
157
158 static struct btrace_function *
159 ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
160 unsigned int number)
161 {
162 if (number == 0 || number > btinfo->functions.size ())
163 return NULL;
164
165 return &btinfo->functions[number - 1];
166 }
167
168 /* A const version of the function above. */
169
170 static const struct btrace_function *
171 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
172 unsigned int number)
173 {
174 if (number == 0 || number > btinfo->functions.size ())
175 return NULL;
176
177 return &btinfo->functions[number - 1];
178 }
179
180 /* Return non-zero if BFUN does not match MFUN and FUN,
181 return zero otherwise. */
182
183 static int
184 ftrace_function_switched (const struct btrace_function *bfun,
185 const struct minimal_symbol *mfun,
186 const struct symbol *fun)
187 {
188 struct minimal_symbol *msym;
189 struct symbol *sym;
190
191 msym = bfun->msym;
192 sym = bfun->sym;
193
194 /* If the minimal symbol changed, we certainly switched functions. */
195 if (mfun != NULL && msym != NULL
196 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
197 return 1;
198
199 /* If the symbol changed, we certainly switched functions. */
200 if (fun != NULL && sym != NULL)
201 {
202 const char *bfname, *fname;
203
204 /* Check the function name. */
205 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
206 return 1;
207
208 /* Check the location of those functions, as well. */
209 bfname = symtab_to_fullname (symbol_symtab (sym));
210 fname = symtab_to_fullname (symbol_symtab (fun));
211 if (filename_cmp (fname, bfname) != 0)
212 return 1;
213 }
214
215 /* If we lost symbol information, we switched functions. */
216 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
217 return 1;
218
219 /* If we gained symbol information, we switched functions. */
220 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
221 return 1;
222
223 return 0;
224 }
225
226 /* Allocate and initialize a new branch trace function segment at the end of
227 the trace.
228 BTINFO is the branch trace information for the current thread.
229 MFUN and FUN are the symbol information we have for this function.
230 This invalidates all struct btrace_function pointer currently held. */
231
232 static struct btrace_function *
233 ftrace_new_function (struct btrace_thread_info *btinfo,
234 struct minimal_symbol *mfun,
235 struct symbol *fun)
236 {
237 int level;
238 unsigned int number, insn_offset;
239
240 if (btinfo->functions.empty ())
241 {
242 /* Start counting NUMBER and INSN_OFFSET at one. */
243 level = 0;
244 number = 1;
245 insn_offset = 1;
246 }
247 else
248 {
249 const struct btrace_function *prev = &btinfo->functions.back ();
250 level = prev->level;
251 number = prev->number + 1;
252 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
253 }
254
255 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
256 return &btinfo->functions.back ();
257 }
258
259 /* Update the UP field of a function segment. */
260
261 static void
262 ftrace_update_caller (struct btrace_function *bfun,
263 struct btrace_function *caller,
264 enum btrace_function_flag flags)
265 {
266 if (bfun->up != 0)
267 ftrace_debug (bfun, "updating caller");
268
269 bfun->up = caller->number;
270 bfun->flags = flags;
271
272 ftrace_debug (bfun, "set caller");
273 ftrace_debug (caller, "..to");
274 }
275
276 /* Fix up the caller for all segments of a function. */
277
278 static void
279 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
280 struct btrace_function *bfun,
281 struct btrace_function *caller,
282 enum btrace_function_flag flags)
283 {
284 unsigned int prev, next;
285
286 prev = bfun->prev;
287 next = bfun->next;
288 ftrace_update_caller (bfun, caller, flags);
289
290 /* Update all function segments belonging to the same function. */
291 for (; prev != 0; prev = bfun->prev)
292 {
293 bfun = ftrace_find_call_by_number (btinfo, prev);
294 ftrace_update_caller (bfun, caller, flags);
295 }
296
297 for (; next != 0; next = bfun->next)
298 {
299 bfun = ftrace_find_call_by_number (btinfo, next);
300 ftrace_update_caller (bfun, caller, flags);
301 }
302 }
303
304 /* Add a new function segment for a call at the end of the trace.
305 BTINFO is the branch trace information for the current thread.
306 MFUN and FUN are the symbol information we have for this function. */
307
308 static struct btrace_function *
309 ftrace_new_call (struct btrace_thread_info *btinfo,
310 struct minimal_symbol *mfun,
311 struct symbol *fun)
312 {
313 const unsigned int length = btinfo->functions.size ();
314 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
315
316 bfun->up = length;
317 bfun->level += 1;
318
319 ftrace_debug (bfun, "new call");
320
321 return bfun;
322 }
323
324 /* Add a new function segment for a tail call at the end of the trace.
325 BTINFO is the branch trace information for the current thread.
326 MFUN and FUN are the symbol information we have for this function. */
327
328 static struct btrace_function *
329 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
330 struct minimal_symbol *mfun,
331 struct symbol *fun)
332 {
333 const unsigned int length = btinfo->functions.size ();
334 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
335
336 bfun->up = length;
337 bfun->level += 1;
338 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
339
340 ftrace_debug (bfun, "new tail call");
341
342 return bfun;
343 }
344
345 /* Return the caller of BFUN or NULL if there is none. This function skips
346 tail calls in the call chain. BTINFO is the branch trace information for
347 the current thread. */
348 static struct btrace_function *
349 ftrace_get_caller (struct btrace_thread_info *btinfo,
350 struct btrace_function *bfun)
351 {
352 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
353 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
354 return ftrace_find_call_by_number (btinfo, bfun->up);
355
356 return NULL;
357 }
358
359 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
360 symbol information. BTINFO is the branch trace information for the current
361 thread. */
362
363 static struct btrace_function *
364 ftrace_find_caller (struct btrace_thread_info *btinfo,
365 struct btrace_function *bfun,
366 struct minimal_symbol *mfun,
367 struct symbol *fun)
368 {
369 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
370 {
371 /* Skip functions with incompatible symbol information. */
372 if (ftrace_function_switched (bfun, mfun, fun))
373 continue;
374
375 /* This is the function segment we're looking for. */
376 break;
377 }
378
379 return bfun;
380 }
381
382 /* Find the innermost caller in the back trace of BFUN, skipping all
383 function segments that do not end with a call instruction (e.g.
384 tail calls ending with a jump). BTINFO is the branch trace information for
385 the current thread. */
386
387 static struct btrace_function *
388 ftrace_find_call (struct btrace_thread_info *btinfo,
389 struct btrace_function *bfun)
390 {
391 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
392 {
393 struct btrace_insn *last;
394
395 /* Skip gaps. */
396 if (bfun->errcode != 0)
397 continue;
398
399 last = VEC_last (btrace_insn_s, bfun->insn);
400
401 if (last->iclass == BTRACE_INSN_CALL)
402 break;
403 }
404
405 return bfun;
406 }
407
408 /* Add a continuation segment for a function into which we return at the end of
409 the trace.
410 BTINFO is the branch trace information for the current thread.
411 MFUN and FUN are the symbol information we have for this function. */
412
413 static struct btrace_function *
414 ftrace_new_return (struct btrace_thread_info *btinfo,
415 struct minimal_symbol *mfun,
416 struct symbol *fun)
417 {
418 struct btrace_function *prev, *bfun, *caller;
419
420 bfun = ftrace_new_function (btinfo, mfun, fun);
421 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
422
423 /* It is important to start at PREV's caller. Otherwise, we might find
424 PREV itself, if PREV is a recursive function. */
425 caller = ftrace_find_call_by_number (btinfo, prev->up);
426 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
427 if (caller != NULL)
428 {
429 /* The caller of PREV is the preceding btrace function segment in this
430 function instance. */
431 gdb_assert (caller->next == 0);
432
433 caller->next = bfun->number;
434 bfun->prev = caller->number;
435
436 /* Maintain the function level. */
437 bfun->level = caller->level;
438
439 /* Maintain the call stack. */
440 bfun->up = caller->up;
441 bfun->flags = caller->flags;
442
443 ftrace_debug (bfun, "new return");
444 }
445 else
446 {
447 /* We did not find a caller. This could mean that something went
448 wrong or that the call is simply not included in the trace. */
449
450 /* Let's search for some actual call. */
451 caller = ftrace_find_call_by_number (btinfo, prev->up);
452 caller = ftrace_find_call (btinfo, caller);
453 if (caller == NULL)
454 {
455 /* There is no call in PREV's back trace. We assume that the
456 branch trace did not include it. */
457
458 /* Let's find the topmost function and add a new caller for it.
459 This should handle a series of initial tail calls. */
460 while (prev->up != 0)
461 prev = ftrace_find_call_by_number (btinfo, prev->up);
462
463 bfun->level = prev->level - 1;
464
465 /* Fix up the call stack for PREV. */
466 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
467
468 ftrace_debug (bfun, "new return - no caller");
469 }
470 else
471 {
472 /* There is a call in PREV's back trace to which we should have
473 returned but didn't. Let's start a new, separate back trace
474 from PREV's level. */
475 bfun->level = prev->level - 1;
476
477 /* We fix up the back trace for PREV but leave other function segments
478 on the same level as they are.
479 This should handle things like schedule () correctly where we're
480 switching contexts. */
481 prev->up = bfun->number;
482 prev->flags = BFUN_UP_LINKS_TO_RET;
483
484 ftrace_debug (bfun, "new return - unknown caller");
485 }
486 }
487
488 return bfun;
489 }
490
491 /* Add a new function segment for a function switch at the end of the trace.
492 BTINFO is the branch trace information for the current thread.
493 MFUN and FUN are the symbol information we have for this function. */
494
495 static struct btrace_function *
496 ftrace_new_switch (struct btrace_thread_info *btinfo,
497 struct minimal_symbol *mfun,
498 struct symbol *fun)
499 {
500 struct btrace_function *prev, *bfun;
501
502 /* This is an unexplained function switch. We can't really be sure about the
503 call stack, yet the best I can think of right now is to preserve it. */
504 bfun = ftrace_new_function (btinfo, mfun, fun);
505 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
506 bfun->up = prev->up;
507 bfun->flags = prev->flags;
508
509 ftrace_debug (bfun, "new switch");
510
511 return bfun;
512 }
513
514 /* Add a new function segment for a gap in the trace due to a decode error at
515 the end of the trace.
516 BTINFO is the branch trace information for the current thread.
517 ERRCODE is the format-specific error code. */
518
519 static struct btrace_function *
520 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
521 std::vector<unsigned int> &gaps)
522 {
523 struct btrace_function *bfun;
524
525 if (btinfo->functions.empty ())
526 bfun = ftrace_new_function (btinfo, NULL, NULL);
527 else
528 {
529 /* We hijack the previous function segment if it was empty. */
530 bfun = &btinfo->functions.back ();
531 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
532 bfun = ftrace_new_function (btinfo, NULL, NULL);
533 }
534
535 bfun->errcode = errcode;
536 gaps.push_back (bfun->number);
537
538 ftrace_debug (bfun, "new gap");
539
540 return bfun;
541 }
542
543 /* Update the current function segment at the end of the trace in BTINFO with
544 respect to the instruction at PC. This may create new function segments.
545 Return the chronologically latest function segment, never NULL. */
546
547 static struct btrace_function *
548 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
549 {
550 struct bound_minimal_symbol bmfun;
551 struct minimal_symbol *mfun;
552 struct symbol *fun;
553 struct btrace_insn *last;
554 struct btrace_function *bfun;
555
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun = find_pc_function (pc);
560 bmfun = lookup_minimal_symbol_by_pc (pc);
561 mfun = bmfun.minsym;
562
563 if (fun == NULL && mfun == NULL)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
565
566 /* If we didn't have a function, we create one. */
567 if (btinfo->functions.empty ())
568 return ftrace_new_function (btinfo, mfun, fun);
569
570 /* If we had a gap before, we create a function. */
571 bfun = &btinfo->functions.back ();
572 if (bfun->errcode != 0)
573 return ftrace_new_function (btinfo, mfun, fun);
574
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
578 last = NULL;
579 if (!VEC_empty (btrace_insn_s, bfun->insn))
580 last = VEC_last (btrace_insn_s, bfun->insn);
581
582 if (last != NULL)
583 {
584 switch (last->iclass)
585 {
586 case BTRACE_INSN_RETURN:
587 {
588 const char *fname;
589
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname = ftrace_print_function_name (bfun);
600 if (strcmp (fname, "_dl_runtime_resolve") == 0)
601 return ftrace_new_tailcall (btinfo, mfun, fun);
602
603 return ftrace_new_return (btinfo, mfun, fun);
604 }
605
606 case BTRACE_INSN_CALL:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last->pc + last->size == pc)
609 break;
610
611 return ftrace_new_call (btinfo, mfun, fun);
612
613 case BTRACE_INSN_JUMP:
614 {
615 CORE_ADDR start;
616
617 start = get_pc_function_start (pc);
618
619 /* A jump to the start of a function is (typically) a tail call. */
620 if (start == pc)
621 return ftrace_new_tailcall (btinfo, mfun, fun);
622
623 /* If we can't determine the function for PC, we treat a jump at
624 the end of the block as tail call if we're switching functions
625 and as an intra-function branch if we don't. */
626 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
627 return ftrace_new_tailcall (btinfo, mfun, fun);
628
629 break;
630 }
631 }
632 }
633
634 /* Check if we're switching functions for some other reason. */
635 if (ftrace_function_switched (bfun, mfun, fun))
636 {
637 DEBUG_FTRACE ("switching from %s in %s at %s",
638 ftrace_print_insn_addr (last),
639 ftrace_print_function_name (bfun),
640 ftrace_print_filename (bfun));
641
642 return ftrace_new_switch (btinfo, mfun, fun);
643 }
644
645 return bfun;
646 }
647
648 /* Add the instruction at PC to BFUN's instructions. */
649
650 static void
651 ftrace_update_insns (struct btrace_function *bfun,
652 const struct btrace_insn *insn)
653 {
654 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
655
656 if (record_debug > 1)
657 ftrace_debug (bfun, "update insn");
658 }
659
660 /* Classify the instruction at PC. */
661
662 static enum btrace_insn_class
663 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
664 {
665 enum btrace_insn_class iclass;
666
667 iclass = BTRACE_INSN_OTHER;
668 TRY
669 {
670 if (gdbarch_insn_is_call (gdbarch, pc))
671 iclass = BTRACE_INSN_CALL;
672 else if (gdbarch_insn_is_ret (gdbarch, pc))
673 iclass = BTRACE_INSN_RETURN;
674 else if (gdbarch_insn_is_jump (gdbarch, pc))
675 iclass = BTRACE_INSN_JUMP;
676 }
677 CATCH (error, RETURN_MASK_ERROR)
678 {
679 }
680 END_CATCH
681
682 return iclass;
683 }
684
685 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
686 number of matching function segments or zero if the back traces do not
687 match. BTINFO is the branch trace information for the current thread. */
688
689 static int
690 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
691 struct btrace_function *lhs,
692 struct btrace_function *rhs)
693 {
694 int matches;
695
696 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
697 {
698 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
699 return 0;
700
701 lhs = ftrace_get_caller (btinfo, lhs);
702 rhs = ftrace_get_caller (btinfo, rhs);
703 }
704
705 return matches;
706 }
707
708 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
709 BTINFO is the branch trace information for the current thread. */
710
711 static void
712 ftrace_fixup_level (struct btrace_thread_info *btinfo,
713 struct btrace_function *bfun, int adjustment)
714 {
715 if (adjustment == 0)
716 return;
717
718 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
719 ftrace_debug (bfun, "..bfun");
720
721 while (bfun != NULL)
722 {
723 bfun->level += adjustment;
724 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
725 }
726 }
727
728 /* Recompute the global level offset. Traverse the function trace and compute
729 the global level offset as the negative of the minimal function level. */
730
731 static void
732 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
733 {
734 int level = INT_MAX;
735
736 if (btinfo == NULL)
737 return;
738
739 if (btinfo->functions.empty ())
740 return;
741
742 unsigned int length = btinfo->functions.size() - 1;
743 for (unsigned int i = 0; i < length; ++i)
744 level = std::min (level, btinfo->functions[i].level);
745
746 /* The last function segment contains the current instruction, which is not
747 really part of the trace. If it contains just this one instruction, we
748 ignore the segment. */
749 struct btrace_function *last = &btinfo->functions.back();
750 if (VEC_length (btrace_insn_s, last->insn) != 1)
751 level = std::min (level, last->level);
752
753 DEBUG_FTRACE ("setting global level offset: %d", -level);
754 btinfo->level = -level;
755 }
756
757 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
758 ftrace_connect_backtrace. BTINFO is the branch trace information for the
759 current thread. */
760
761 static void
762 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
763 struct btrace_function *prev,
764 struct btrace_function *next)
765 {
766 DEBUG_FTRACE ("connecting...");
767 ftrace_debug (prev, "..prev");
768 ftrace_debug (next, "..next");
769
770 /* The function segments are not yet connected. */
771 gdb_assert (prev->next == 0);
772 gdb_assert (next->prev == 0);
773
774 prev->next = next->number;
775 next->prev = prev->number;
776
777 /* We may have moved NEXT to a different function level. */
778 ftrace_fixup_level (btinfo, next, prev->level - next->level);
779
780 /* If we run out of back trace for one, let's use the other's. */
781 if (prev->up == 0)
782 {
783 const btrace_function_flags flags = next->flags;
784
785 next = ftrace_find_call_by_number (btinfo, next->up);
786 if (next != NULL)
787 {
788 DEBUG_FTRACE ("using next's callers");
789 ftrace_fixup_caller (btinfo, prev, next, flags);
790 }
791 }
792 else if (next->up == 0)
793 {
794 const btrace_function_flags flags = prev->flags;
795
796 prev = ftrace_find_call_by_number (btinfo, prev->up);
797 if (prev != NULL)
798 {
799 DEBUG_FTRACE ("using prev's callers");
800 ftrace_fixup_caller (btinfo, next, prev, flags);
801 }
802 }
803 else
804 {
805 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
806 link to add the tail callers to NEXT's back trace.
807
808 This removes NEXT->UP from NEXT's back trace. It will be added back
809 when connecting NEXT and PREV's callers - provided they exist.
810
811 If PREV's back trace consists of a series of tail calls without an
812 actual call, there will be no further connection and NEXT's caller will
813 be removed for good. To catch this case, we handle it here and connect
814 the top of PREV's back trace to NEXT's caller. */
815 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
816 {
817 struct btrace_function *caller;
818 btrace_function_flags next_flags, prev_flags;
819
820 /* We checked NEXT->UP above so CALLER can't be NULL. */
821 caller = ftrace_find_call_by_number (btinfo, next->up);
822 next_flags = next->flags;
823 prev_flags = prev->flags;
824
825 DEBUG_FTRACE ("adding prev's tail calls to next");
826
827 prev = ftrace_find_call_by_number (btinfo, prev->up);
828 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
829
830 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
831 prev->up))
832 {
833 /* At the end of PREV's back trace, continue with CALLER. */
834 if (prev->up == 0)
835 {
836 DEBUG_FTRACE ("fixing up link for tailcall chain");
837 ftrace_debug (prev, "..top");
838 ftrace_debug (caller, "..up");
839
840 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
841
842 /* If we skipped any tail calls, this may move CALLER to a
843 different function level.
844
845 Note that changing CALLER's level is only OK because we
846 know that this is the last iteration of the bottom-to-top
847 walk in ftrace_connect_backtrace.
848
849 Otherwise we will fix up CALLER's level when we connect it
850 to PREV's caller in the next iteration. */
851 ftrace_fixup_level (btinfo, caller,
852 prev->level - caller->level - 1);
853 break;
854 }
855
856 /* There's nothing to do if we find a real call. */
857 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
858 {
859 DEBUG_FTRACE ("will fix up link in next iteration");
860 break;
861 }
862 }
863 }
864 }
865 }
866
867 /* Connect function segments on the same level in the back trace at LHS and RHS.
868 The back traces at LHS and RHS are expected to match according to
869 ftrace_match_backtrace. BTINFO is the branch trace information for the
870 current thread. */
871
872 static void
873 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
874 struct btrace_function *lhs,
875 struct btrace_function *rhs)
876 {
877 while (lhs != NULL && rhs != NULL)
878 {
879 struct btrace_function *prev, *next;
880
881 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
882
883 /* Connecting LHS and RHS may change the up link. */
884 prev = lhs;
885 next = rhs;
886
887 lhs = ftrace_get_caller (btinfo, lhs);
888 rhs = ftrace_get_caller (btinfo, rhs);
889
890 ftrace_connect_bfun (btinfo, prev, next);
891 }
892 }
893
894 /* Bridge the gap between two function segments left and right of a gap if their
895 respective back traces match in at least MIN_MATCHES functions. BTINFO is
896 the branch trace information for the current thread.
897
898 Returns non-zero if the gap could be bridged, zero otherwise. */
899
900 static int
901 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
902 struct btrace_function *lhs, struct btrace_function *rhs,
903 int min_matches)
904 {
905 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
906 int best_matches;
907
908 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
909 rhs->insn_offset - 1, min_matches);
910
911 best_matches = 0;
912 best_l = NULL;
913 best_r = NULL;
914
915 /* We search the back traces of LHS and RHS for valid connections and connect
916 the two functon segments that give the longest combined back trace. */
917
918 for (cand_l = lhs; cand_l != NULL;
919 cand_l = ftrace_get_caller (btinfo, cand_l))
920 for (cand_r = rhs; cand_r != NULL;
921 cand_r = ftrace_get_caller (btinfo, cand_r))
922 {
923 int matches;
924
925 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
926 if (best_matches < matches)
927 {
928 best_matches = matches;
929 best_l = cand_l;
930 best_r = cand_r;
931 }
932 }
933
934 /* We need at least MIN_MATCHES matches. */
935 gdb_assert (min_matches > 0);
936 if (best_matches < min_matches)
937 return 0;
938
939 DEBUG_FTRACE ("..matches: %d", best_matches);
940
941 /* We will fix up the level of BEST_R and succeeding function segments such
942 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
943
944 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
945 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
946
947 To catch this, we already fix up the level here where we can start at RHS
948 instead of at BEST_R. We will ignore the level fixup when connecting
949 BEST_L to BEST_R as they will already be on the same level. */
950 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
951
952 ftrace_connect_backtrace (btinfo, best_l, best_r);
953
954 return best_matches;
955 }
956
957 /* Try to bridge gaps due to overflow or decode errors by connecting the
958 function segments that are separated by the gap. */
959
960 static void
961 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
962 {
963 struct btrace_thread_info *btinfo = &tp->btrace;
964 std::vector<unsigned int> remaining;
965 int min_matches;
966
967 DEBUG ("bridge gaps");
968
969 /* We require a minimum amount of matches for bridging a gap. The number of
970 required matches will be lowered with each iteration.
971
972 The more matches the higher our confidence that the bridging is correct.
973 For big gaps or small traces, however, it may not be feasible to require a
974 high number of matches. */
975 for (min_matches = 5; min_matches > 0; --min_matches)
976 {
977 /* Let's try to bridge as many gaps as we can. In some cases, we need to
978 skip a gap and revisit it again after we closed later gaps. */
979 while (!gaps.empty ())
980 {
981 for (const unsigned int number : gaps)
982 {
983 struct btrace_function *gap, *lhs, *rhs;
984 int bridged;
985
986 gap = ftrace_find_call_by_number (btinfo, number);
987
988 /* We may have a sequence of gaps if we run from one error into
989 the next as we try to re-sync onto the trace stream. Ignore
990 all but the leftmost gap in such a sequence.
991
992 Also ignore gaps at the beginning of the trace. */
993 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
994 if (lhs == NULL || lhs->errcode != 0)
995 continue;
996
997 /* Skip gaps to the right. */
998 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
999 while (rhs != NULL && rhs->errcode != 0)
1000 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1001
1002 /* Ignore gaps at the end of the trace. */
1003 if (rhs == NULL)
1004 continue;
1005
1006 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1007
1008 /* Keep track of gaps we were not able to bridge and try again.
1009 If we just pushed them to the end of GAPS we would risk an
1010 infinite loop in case we simply cannot bridge a gap. */
1011 if (bridged == 0)
1012 remaining.push_back (number);
1013 }
1014
1015 /* Let's see if we made any progress. */
1016 if (remaining.size () == gaps.size ())
1017 break;
1018
1019 gaps.clear ();
1020 gaps.swap (remaining);
1021 }
1022
1023 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1024 if (gaps.empty ())
1025 break;
1026
1027 remaining.clear ();
1028 }
1029
1030 /* We may omit this in some cases. Not sure it is worth the extra
1031 complication, though. */
1032 ftrace_compute_global_level_offset (btinfo);
1033 }
1034
1035 /* Compute the function branch trace from BTS trace. */
1036
1037 static void
1038 btrace_compute_ftrace_bts (struct thread_info *tp,
1039 const struct btrace_data_bts *btrace,
1040 std::vector<unsigned int> &gaps)
1041 {
1042 struct btrace_thread_info *btinfo;
1043 struct gdbarch *gdbarch;
1044 unsigned int blk;
1045 int level;
1046
1047 gdbarch = target_gdbarch ();
1048 btinfo = &tp->btrace;
1049 blk = VEC_length (btrace_block_s, btrace->blocks);
1050
1051 if (btinfo->functions.empty ())
1052 level = INT_MAX;
1053 else
1054 level = -btinfo->level;
1055
1056 while (blk != 0)
1057 {
1058 btrace_block_s *block;
1059 CORE_ADDR pc;
1060
1061 blk -= 1;
1062
1063 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1064 pc = block->begin;
1065
1066 for (;;)
1067 {
1068 struct btrace_function *bfun;
1069 struct btrace_insn insn;
1070 int size;
1071
1072 /* We should hit the end of the block. Warn if we went too far. */
1073 if (block->end < pc)
1074 {
1075 /* Indicate the gap in the trace. */
1076 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1077
1078 warning (_("Recorded trace may be corrupted at instruction "
1079 "%u (pc = %s)."), bfun->insn_offset - 1,
1080 core_addr_to_string_nz (pc));
1081
1082 break;
1083 }
1084
1085 bfun = ftrace_update_function (btinfo, pc);
1086
1087 /* Maintain the function level offset.
1088 For all but the last block, we do it here. */
1089 if (blk != 0)
1090 level = std::min (level, bfun->level);
1091
1092 size = 0;
1093 TRY
1094 {
1095 size = gdb_insn_length (gdbarch, pc);
1096 }
1097 CATCH (error, RETURN_MASK_ERROR)
1098 {
1099 }
1100 END_CATCH
1101
1102 insn.pc = pc;
1103 insn.size = size;
1104 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1105 insn.flags = 0;
1106
1107 ftrace_update_insns (bfun, &insn);
1108
1109 /* We're done once we pushed the instruction at the end. */
1110 if (block->end == pc)
1111 break;
1112
1113 /* We can't continue if we fail to compute the size. */
1114 if (size <= 0)
1115 {
1116 /* Indicate the gap in the trace. We just added INSN so we're
1117 not at the beginning. */
1118 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1119
1120 warning (_("Recorded trace may be incomplete at instruction %u "
1121 "(pc = %s)."), bfun->insn_offset - 1,
1122 core_addr_to_string_nz (pc));
1123
1124 break;
1125 }
1126
1127 pc += size;
1128
1129 /* Maintain the function level offset.
1130 For the last block, we do it here to not consider the last
1131 instruction.
1132 Since the last instruction corresponds to the current instruction
1133 and is not really part of the execution history, it shouldn't
1134 affect the level. */
1135 if (blk == 0)
1136 level = std::min (level, bfun->level);
1137 }
1138 }
1139
1140 /* LEVEL is the minimal function level of all btrace function segments.
1141 Define the global level offset to -LEVEL so all function levels are
1142 normalized to start at zero. */
1143 btinfo->level = -level;
1144 }
1145
1146 #if defined (HAVE_LIBIPT)
1147
1148 static enum btrace_insn_class
1149 pt_reclassify_insn (enum pt_insn_class iclass)
1150 {
1151 switch (iclass)
1152 {
1153 case ptic_call:
1154 return BTRACE_INSN_CALL;
1155
1156 case ptic_return:
1157 return BTRACE_INSN_RETURN;
1158
1159 case ptic_jump:
1160 return BTRACE_INSN_JUMP;
1161
1162 default:
1163 return BTRACE_INSN_OTHER;
1164 }
1165 }
1166
1167 /* Return the btrace instruction flags for INSN. */
1168
1169 static btrace_insn_flags
1170 pt_btrace_insn_flags (const struct pt_insn &insn)
1171 {
1172 btrace_insn_flags flags = 0;
1173
1174 if (insn.speculative)
1175 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1176
1177 return flags;
1178 }
1179
1180 /* Return the btrace instruction for INSN. */
1181
1182 static btrace_insn
1183 pt_btrace_insn (const struct pt_insn &insn)
1184 {
1185 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1186 pt_reclassify_insn (insn.iclass),
1187 pt_btrace_insn_flags (insn)};
1188 }
1189
1190 /* Handle instruction decode events (libipt-v2). */
1191
1192 static int
1193 handle_pt_insn_events (struct btrace_thread_info *btinfo,
1194 struct pt_insn_decoder *decoder,
1195 std::vector<unsigned int> &gaps, int status)
1196 {
1197 #if defined (HAVE_PT_INSN_EVENT)
1198 while (status & pts_event_pending)
1199 {
1200 struct btrace_function *bfun;
1201 struct pt_event event;
1202 uint64_t offset;
1203
1204 status = pt_insn_event (decoder, &event, sizeof (event));
1205 if (status < 0)
1206 break;
1207
1208 switch (event.type)
1209 {
1210 default:
1211 break;
1212
1213 case ptev_enabled:
1214 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1215 {
1216 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1217
1218 pt_insn_get_offset (decoder, &offset);
1219
1220 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1221 PRIx64 ")."), bfun->insn_offset - 1, offset);
1222 }
1223
1224 break;
1225
1226 case ptev_overflow:
1227 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1228
1229 pt_insn_get_offset (decoder, &offset);
1230
1231 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1232 bfun->insn_offset - 1, offset);
1233
1234 break;
1235 }
1236 }
1237 #endif /* defined (HAVE_PT_INSN_EVENT) */
1238
1239 return status;
1240 }
1241
1242 /* Handle events indicated by flags in INSN (libipt-v1). */
1243
1244 static void
1245 handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1246 struct pt_insn_decoder *decoder,
1247 const struct pt_insn &insn,
1248 std::vector<unsigned int> &gaps)
1249 {
1250 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1251 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1252 times, we continue from the same instruction we stopped before. This is
1253 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1254 means that we continued from some other instruction. Indicate this as a
1255 trace gap except when tracing just started. */
1256 if (insn.enabled && !btinfo->functions.empty ())
1257 {
1258 struct btrace_function *bfun;
1259 uint64_t offset;
1260
1261 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1262
1263 pt_insn_get_offset (decoder, &offset);
1264
1265 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1266 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1267 insn.ip);
1268 }
1269 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1270
1271 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1272 /* Indicate trace overflows. */
1273 if (insn.resynced)
1274 {
1275 struct btrace_function *bfun;
1276 uint64_t offset;
1277
1278 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1279
1280 pt_insn_get_offset (decoder, &offset);
1281
1282 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1283 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1284 }
1285 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1286 }
1287
1288 /* Add function branch trace to BTINFO using DECODER. */
1289
1290 static void
1291 ftrace_add_pt (struct btrace_thread_info *btinfo,
1292 struct pt_insn_decoder *decoder,
1293 int *plevel,
1294 std::vector<unsigned int> &gaps)
1295 {
1296 struct btrace_function *bfun;
1297 uint64_t offset;
1298 int status;
1299
1300 for (;;)
1301 {
1302 struct pt_insn insn;
1303
1304 status = pt_insn_sync_forward (decoder);
1305 if (status < 0)
1306 {
1307 if (status != -pte_eos)
1308 warning (_("Failed to synchronize onto the Intel Processor "
1309 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1310 break;
1311 }
1312
1313 for (;;)
1314 {
1315 /* Handle events from the previous iteration or synchronization. */
1316 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1317 if (status < 0)
1318 break;
1319
1320 status = pt_insn_next (decoder, &insn, sizeof(insn));
1321 if (status < 0)
1322 break;
1323
1324 /* Handle events indicated by flags in INSN. */
1325 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1326
1327 bfun = ftrace_update_function (btinfo, insn.ip);
1328
1329 /* Maintain the function level offset. */
1330 *plevel = std::min (*plevel, bfun->level);
1331
1332 btrace_insn btinsn = pt_btrace_insn (insn);
1333 ftrace_update_insns (bfun, &btinsn);
1334 }
1335
1336 if (status == -pte_eos)
1337 break;
1338
1339 /* Indicate the gap in the trace. */
1340 bfun = ftrace_new_gap (btinfo, status, gaps);
1341
1342 pt_insn_get_offset (decoder, &offset);
1343
1344 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1345 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1346 offset, insn.ip, pt_errstr (pt_errcode (status)));
1347 }
1348 }
1349
1350 /* A callback function to allow the trace decoder to read the inferior's
1351 memory. */
1352
1353 static int
1354 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1355 const struct pt_asid *asid, uint64_t pc,
1356 void *context)
1357 {
1358 int result, errcode;
1359
1360 result = (int) size;
1361 TRY
1362 {
1363 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1364 if (errcode != 0)
1365 result = -pte_nomap;
1366 }
1367 CATCH (error, RETURN_MASK_ERROR)
1368 {
1369 result = -pte_nomap;
1370 }
1371 END_CATCH
1372
1373 return result;
1374 }
1375
1376 /* Translate the vendor from one enum to another. */
1377
1378 static enum pt_cpu_vendor
1379 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1380 {
1381 switch (vendor)
1382 {
1383 default:
1384 return pcv_unknown;
1385
1386 case CV_INTEL:
1387 return pcv_intel;
1388 }
1389 }
1390
1391 /* Finalize the function branch trace after decode. */
1392
1393 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1394 struct thread_info *tp, int level)
1395 {
1396 pt_insn_free_decoder (decoder);
1397
1398 /* LEVEL is the minimal function level of all btrace function segments.
1399 Define the global level offset to -LEVEL so all function levels are
1400 normalized to start at zero. */
1401 tp->btrace.level = -level;
1402
1403 /* Add a single last instruction entry for the current PC.
1404 This allows us to compute the backtrace at the current PC using both
1405 standard unwind and btrace unwind.
1406 This extra entry is ignored by all record commands. */
1407 btrace_add_pc (tp);
1408 }
1409
1410 /* Compute the function branch trace from Intel Processor Trace
1411 format. */
1412
1413 static void
1414 btrace_compute_ftrace_pt (struct thread_info *tp,
1415 const struct btrace_data_pt *btrace,
1416 std::vector<unsigned int> &gaps)
1417 {
1418 struct btrace_thread_info *btinfo;
1419 struct pt_insn_decoder *decoder;
1420 struct pt_config config;
1421 int level, errcode;
1422
1423 if (btrace->size == 0)
1424 return;
1425
1426 btinfo = &tp->btrace;
1427 if (btinfo->functions.empty ())
1428 level = INT_MAX;
1429 else
1430 level = -btinfo->level;
1431
1432 pt_config_init(&config);
1433 config.begin = btrace->data;
1434 config.end = btrace->data + btrace->size;
1435
1436 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1437 config.cpu.family = btrace->config.cpu.family;
1438 config.cpu.model = btrace->config.cpu.model;
1439 config.cpu.stepping = btrace->config.cpu.stepping;
1440
1441 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1442 if (errcode < 0)
1443 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1444 pt_errstr (pt_errcode (errcode)));
1445
1446 decoder = pt_insn_alloc_decoder (&config);
1447 if (decoder == NULL)
1448 error (_("Failed to allocate the Intel Processor Trace decoder."));
1449
1450 TRY
1451 {
1452 struct pt_image *image;
1453
1454 image = pt_insn_get_image(decoder);
1455 if (image == NULL)
1456 error (_("Failed to configure the Intel Processor Trace decoder."));
1457
1458 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1459 if (errcode < 0)
1460 error (_("Failed to configure the Intel Processor Trace decoder: "
1461 "%s."), pt_errstr (pt_errcode (errcode)));
1462
1463 ftrace_add_pt (btinfo, decoder, &level, gaps);
1464 }
1465 CATCH (error, RETURN_MASK_ALL)
1466 {
1467 /* Indicate a gap in the trace if we quit trace processing. */
1468 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1469 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1470
1471 btrace_finalize_ftrace_pt (decoder, tp, level);
1472
1473 throw_exception (error);
1474 }
1475 END_CATCH
1476
1477 btrace_finalize_ftrace_pt (decoder, tp, level);
1478 }
1479
1480 #else /* defined (HAVE_LIBIPT) */
1481
1482 static void
1483 btrace_compute_ftrace_pt (struct thread_info *tp,
1484 const struct btrace_data_pt *btrace,
1485 std::vector<unsigned int> &gaps)
1486 {
1487 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1488 }
1489
1490 #endif /* defined (HAVE_LIBIPT) */
1491
1492 /* Compute the function branch trace from a block branch trace BTRACE for
1493 a thread given by BTINFO. */
1494
1495 static void
1496 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1497 std::vector<unsigned int> &gaps)
1498 {
1499 DEBUG ("compute ftrace");
1500
1501 switch (btrace->format)
1502 {
1503 case BTRACE_FORMAT_NONE:
1504 return;
1505
1506 case BTRACE_FORMAT_BTS:
1507 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1508 return;
1509
1510 case BTRACE_FORMAT_PT:
1511 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1512 return;
1513 }
1514
1515 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1516 }
1517
1518 static void
1519 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1520 {
1521 if (!gaps.empty ())
1522 {
1523 tp->btrace.ngaps += gaps.size ();
1524 btrace_bridge_gaps (tp, gaps);
1525 }
1526 }
1527
1528 static void
1529 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1530 {
1531 std::vector<unsigned int> gaps;
1532
1533 TRY
1534 {
1535 btrace_compute_ftrace_1 (tp, btrace, gaps);
1536 }
1537 CATCH (error, RETURN_MASK_ALL)
1538 {
1539 btrace_finalize_ftrace (tp, gaps);
1540
1541 throw_exception (error);
1542 }
1543 END_CATCH
1544
1545 btrace_finalize_ftrace (tp, gaps);
1546 }
1547
1548 /* Add an entry for the current PC. */
1549
1550 static void
1551 btrace_add_pc (struct thread_info *tp)
1552 {
1553 struct btrace_data btrace;
1554 struct btrace_block *block;
1555 struct regcache *regcache;
1556 struct cleanup *cleanup;
1557 CORE_ADDR pc;
1558
1559 regcache = get_thread_regcache (tp->ptid);
1560 pc = regcache_read_pc (regcache);
1561
1562 btrace_data_init (&btrace);
1563 btrace.format = BTRACE_FORMAT_BTS;
1564 btrace.variant.bts.blocks = NULL;
1565
1566 cleanup = make_cleanup_btrace_data (&btrace);
1567
1568 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1569 block->begin = pc;
1570 block->end = pc;
1571
1572 btrace_compute_ftrace (tp, &btrace);
1573
1574 do_cleanups (cleanup);
1575 }
1576
1577 /* See btrace.h. */
1578
1579 void
1580 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1581 {
1582 if (tp->btrace.target != NULL)
1583 return;
1584
1585 #if !defined (HAVE_LIBIPT)
1586 if (conf->format == BTRACE_FORMAT_PT)
1587 error (_("GDB does not support Intel Processor Trace."));
1588 #endif /* !defined (HAVE_LIBIPT) */
1589
1590 if (!target_supports_btrace (conf->format))
1591 error (_("Target does not support branch tracing."));
1592
1593 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1594 target_pid_to_str (tp->ptid));
1595
1596 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1597
1598 /* We're done if we failed to enable tracing. */
1599 if (tp->btrace.target == NULL)
1600 return;
1601
1602 /* We need to undo the enable in case of errors. */
1603 TRY
1604 {
1605 /* Add an entry for the current PC so we start tracing from where we
1606 enabled it.
1607
1608 If we can't access TP's registers, TP is most likely running. In this
1609 case, we can't really say where tracing was enabled so it should be
1610 safe to simply skip this step.
1611
1612 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1613 start at the PC at which tracing was enabled. */
1614 if (conf->format != BTRACE_FORMAT_PT
1615 && can_access_registers_ptid (tp->ptid))
1616 btrace_add_pc (tp);
1617 }
1618 CATCH (exception, RETURN_MASK_ALL)
1619 {
1620 btrace_disable (tp);
1621
1622 throw_exception (exception);
1623 }
1624 END_CATCH
1625 }
1626
1627 /* See btrace.h. */
1628
1629 const struct btrace_config *
1630 btrace_conf (const struct btrace_thread_info *btinfo)
1631 {
1632 if (btinfo->target == NULL)
1633 return NULL;
1634
1635 return target_btrace_conf (btinfo->target);
1636 }
1637
1638 /* See btrace.h. */
1639
1640 void
1641 btrace_disable (struct thread_info *tp)
1642 {
1643 struct btrace_thread_info *btp = &tp->btrace;
1644 int errcode = 0;
1645
1646 if (btp->target == NULL)
1647 return;
1648
1649 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1650 target_pid_to_str (tp->ptid));
1651
1652 target_disable_btrace (btp->target);
1653 btp->target = NULL;
1654
1655 btrace_clear (tp);
1656 }
1657
1658 /* See btrace.h. */
1659
1660 void
1661 btrace_teardown (struct thread_info *tp)
1662 {
1663 struct btrace_thread_info *btp = &tp->btrace;
1664 int errcode = 0;
1665
1666 if (btp->target == NULL)
1667 return;
1668
1669 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1670 target_pid_to_str (tp->ptid));
1671
1672 target_teardown_btrace (btp->target);
1673 btp->target = NULL;
1674
1675 btrace_clear (tp);
1676 }
1677
1678 /* Stitch branch trace in BTS format. */
1679
1680 static int
1681 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1682 {
1683 struct btrace_thread_info *btinfo;
1684 struct btrace_function *last_bfun;
1685 struct btrace_insn *last_insn;
1686 btrace_block_s *first_new_block;
1687
1688 btinfo = &tp->btrace;
1689 gdb_assert (!btinfo->functions.empty ());
1690 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1691
1692 last_bfun = &btinfo->functions.back ();
1693
1694 /* If the existing trace ends with a gap, we just glue the traces
1695 together. We need to drop the last (i.e. chronologically first) block
1696 of the new trace, though, since we can't fill in the start address.*/
1697 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1698 {
1699 VEC_pop (btrace_block_s, btrace->blocks);
1700 return 0;
1701 }
1702
1703 /* Beware that block trace starts with the most recent block, so the
1704 chronologically first block in the new trace is the last block in
1705 the new trace's block vector. */
1706 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1707 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1708
1709 /* If the current PC at the end of the block is the same as in our current
1710 trace, there are two explanations:
1711 1. we executed the instruction and some branch brought us back.
1712 2. we have not made any progress.
1713 In the first case, the delta trace vector should contain at least two
1714 entries.
1715 In the second case, the delta trace vector should contain exactly one
1716 entry for the partial block containing the current PC. Remove it. */
1717 if (first_new_block->end == last_insn->pc
1718 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1719 {
1720 VEC_pop (btrace_block_s, btrace->blocks);
1721 return 0;
1722 }
1723
1724 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1725 core_addr_to_string_nz (first_new_block->end));
1726
1727 /* Do a simple sanity check to make sure we don't accidentally end up
1728 with a bad block. This should not occur in practice. */
1729 if (first_new_block->end < last_insn->pc)
1730 {
1731 warning (_("Error while trying to read delta trace. Falling back to "
1732 "a full read."));
1733 return -1;
1734 }
1735
1736 /* We adjust the last block to start at the end of our current trace. */
1737 gdb_assert (first_new_block->begin == 0);
1738 first_new_block->begin = last_insn->pc;
1739
1740 /* We simply pop the last insn so we can insert it again as part of
1741 the normal branch trace computation.
1742 Since instruction iterators are based on indices in the instructions
1743 vector, we don't leave any pointers dangling. */
1744 DEBUG ("pruning insn at %s for stitching",
1745 ftrace_print_insn_addr (last_insn));
1746
1747 VEC_pop (btrace_insn_s, last_bfun->insn);
1748
1749 /* The instructions vector may become empty temporarily if this has
1750 been the only instruction in this function segment.
1751 This violates the invariant but will be remedied shortly by
1752 btrace_compute_ftrace when we add the new trace. */
1753
1754 /* The only case where this would hurt is if the entire trace consisted
1755 of just that one instruction. If we remove it, we might turn the now
1756 empty btrace function segment into a gap. But we don't want gaps at
1757 the beginning. To avoid this, we remove the entire old trace. */
1758 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
1759 btrace_clear (tp);
1760
1761 return 0;
1762 }
1763
1764 /* Adjust the block trace in order to stitch old and new trace together.
1765 BTRACE is the new delta trace between the last and the current stop.
1766 TP is the traced thread.
1767 May modifx BTRACE as well as the existing trace in TP.
1768 Return 0 on success, -1 otherwise. */
1769
1770 static int
1771 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1772 {
1773 /* If we don't have trace, there's nothing to do. */
1774 if (btrace_data_empty (btrace))
1775 return 0;
1776
1777 switch (btrace->format)
1778 {
1779 case BTRACE_FORMAT_NONE:
1780 return 0;
1781
1782 case BTRACE_FORMAT_BTS:
1783 return btrace_stitch_bts (&btrace->variant.bts, tp);
1784
1785 case BTRACE_FORMAT_PT:
1786 /* Delta reads are not supported. */
1787 return -1;
1788 }
1789
1790 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1791 }
1792
1793 /* Clear the branch trace histories in BTINFO. */
1794
1795 static void
1796 btrace_clear_history (struct btrace_thread_info *btinfo)
1797 {
1798 xfree (btinfo->insn_history);
1799 xfree (btinfo->call_history);
1800 xfree (btinfo->replay);
1801
1802 btinfo->insn_history = NULL;
1803 btinfo->call_history = NULL;
1804 btinfo->replay = NULL;
1805 }
1806
1807 /* Clear the branch trace maintenance histories in BTINFO. */
1808
1809 static void
1810 btrace_maint_clear (struct btrace_thread_info *btinfo)
1811 {
1812 switch (btinfo->data.format)
1813 {
1814 default:
1815 break;
1816
1817 case BTRACE_FORMAT_BTS:
1818 btinfo->maint.variant.bts.packet_history.begin = 0;
1819 btinfo->maint.variant.bts.packet_history.end = 0;
1820 break;
1821
1822 #if defined (HAVE_LIBIPT)
1823 case BTRACE_FORMAT_PT:
1824 xfree (btinfo->maint.variant.pt.packets);
1825
1826 btinfo->maint.variant.pt.packets = NULL;
1827 btinfo->maint.variant.pt.packet_history.begin = 0;
1828 btinfo->maint.variant.pt.packet_history.end = 0;
1829 break;
1830 #endif /* defined (HAVE_LIBIPT) */
1831 }
1832 }
1833
1834 /* See btrace.h. */
1835
1836 const char *
1837 btrace_decode_error (enum btrace_format format, int errcode)
1838 {
1839 switch (format)
1840 {
1841 case BTRACE_FORMAT_BTS:
1842 switch (errcode)
1843 {
1844 case BDE_BTS_OVERFLOW:
1845 return _("instruction overflow");
1846
1847 case BDE_BTS_INSN_SIZE:
1848 return _("unknown instruction");
1849
1850 default:
1851 break;
1852 }
1853 break;
1854
1855 #if defined (HAVE_LIBIPT)
1856 case BTRACE_FORMAT_PT:
1857 switch (errcode)
1858 {
1859 case BDE_PT_USER_QUIT:
1860 return _("trace decode cancelled");
1861
1862 case BDE_PT_DISABLED:
1863 return _("disabled");
1864
1865 case BDE_PT_OVERFLOW:
1866 return _("overflow");
1867
1868 default:
1869 if (errcode < 0)
1870 return pt_errstr (pt_errcode (errcode));
1871 break;
1872 }
1873 break;
1874 #endif /* defined (HAVE_LIBIPT) */
1875
1876 default:
1877 break;
1878 }
1879
1880 return _("unknown");
1881 }
1882
1883 /* See btrace.h. */
1884
1885 void
1886 btrace_fetch (struct thread_info *tp)
1887 {
1888 struct btrace_thread_info *btinfo;
1889 struct btrace_target_info *tinfo;
1890 struct btrace_data btrace;
1891 struct cleanup *cleanup;
1892 int errcode;
1893
1894 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1895 target_pid_to_str (tp->ptid));
1896
1897 btinfo = &tp->btrace;
1898 tinfo = btinfo->target;
1899 if (tinfo == NULL)
1900 return;
1901
1902 /* There's no way we could get new trace while replaying.
1903 On the other hand, delta trace would return a partial record with the
1904 current PC, which is the replay PC, not the last PC, as expected. */
1905 if (btinfo->replay != NULL)
1906 return;
1907
1908 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1909 can store a gdb.Record object in Python referring to a different thread
1910 than the current one, temporarily set INFERIOR_PTID. */
1911 cleanup = save_inferior_ptid ();
1912 inferior_ptid = tp->ptid;
1913
1914 /* We should not be called on running or exited threads. */
1915 gdb_assert (can_access_registers_ptid (tp->ptid));
1916
1917 btrace_data_init (&btrace);
1918 make_cleanup_btrace_data (&btrace);
1919
1920 /* Let's first try to extend the trace we already have. */
1921 if (!btinfo->functions.empty ())
1922 {
1923 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1924 if (errcode == 0)
1925 {
1926 /* Success. Let's try to stitch the traces together. */
1927 errcode = btrace_stitch_trace (&btrace, tp);
1928 }
1929 else
1930 {
1931 /* We failed to read delta trace. Let's try to read new trace. */
1932 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1933
1934 /* If we got any new trace, discard what we have. */
1935 if (errcode == 0 && !btrace_data_empty (&btrace))
1936 btrace_clear (tp);
1937 }
1938
1939 /* If we were not able to read the trace, we start over. */
1940 if (errcode != 0)
1941 {
1942 btrace_clear (tp);
1943 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1944 }
1945 }
1946 else
1947 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1948
1949 /* If we were not able to read the branch trace, signal an error. */
1950 if (errcode != 0)
1951 error (_("Failed to read branch trace."));
1952
1953 /* Compute the trace, provided we have any. */
1954 if (!btrace_data_empty (&btrace))
1955 {
1956 /* Store the raw trace data. The stored data will be cleared in
1957 btrace_clear, so we always append the new trace. */
1958 btrace_data_append (&btinfo->data, &btrace);
1959 btrace_maint_clear (btinfo);
1960
1961 btrace_clear_history (btinfo);
1962 btrace_compute_ftrace (tp, &btrace);
1963 }
1964
1965 do_cleanups (cleanup);
1966 }
1967
1968 /* See btrace.h. */
1969
1970 void
1971 btrace_clear (struct thread_info *tp)
1972 {
1973 struct btrace_thread_info *btinfo;
1974
1975 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1976 target_pid_to_str (tp->ptid));
1977
1978 /* Make sure btrace frames that may hold a pointer into the branch
1979 trace data are destroyed. */
1980 reinit_frame_cache ();
1981
1982 btinfo = &tp->btrace;
1983 for (auto &bfun : btinfo->functions)
1984 VEC_free (btrace_insn_s, bfun.insn);
1985
1986 btinfo->functions.clear ();
1987 btinfo->ngaps = 0;
1988
1989 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1990 btrace_maint_clear (btinfo);
1991 btrace_data_clear (&btinfo->data);
1992 btrace_clear_history (btinfo);
1993 }
1994
1995 /* See btrace.h. */
1996
1997 void
1998 btrace_free_objfile (struct objfile *objfile)
1999 {
2000 struct thread_info *tp;
2001
2002 DEBUG ("free objfile");
2003
2004 ALL_NON_EXITED_THREADS (tp)
2005 btrace_clear (tp);
2006 }
2007
2008 #if defined (HAVE_LIBEXPAT)
2009
2010 /* Check the btrace document version. */
2011
2012 static void
2013 check_xml_btrace_version (struct gdb_xml_parser *parser,
2014 const struct gdb_xml_element *element,
2015 void *user_data, VEC (gdb_xml_value_s) *attributes)
2016 {
2017 const char *version
2018 = (const char *) xml_find_attribute (attributes, "version")->value;
2019
2020 if (strcmp (version, "1.0") != 0)
2021 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
2022 }
2023
2024 /* Parse a btrace "block" xml record. */
2025
2026 static void
2027 parse_xml_btrace_block (struct gdb_xml_parser *parser,
2028 const struct gdb_xml_element *element,
2029 void *user_data, VEC (gdb_xml_value_s) *attributes)
2030 {
2031 struct btrace_data *btrace;
2032 struct btrace_block *block;
2033 ULONGEST *begin, *end;
2034
2035 btrace = (struct btrace_data *) user_data;
2036
2037 switch (btrace->format)
2038 {
2039 case BTRACE_FORMAT_BTS:
2040 break;
2041
2042 case BTRACE_FORMAT_NONE:
2043 btrace->format = BTRACE_FORMAT_BTS;
2044 btrace->variant.bts.blocks = NULL;
2045 break;
2046
2047 default:
2048 gdb_xml_error (parser, _("Btrace format error."));
2049 }
2050
2051 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
2052 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
2053
2054 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
2055 block->begin = *begin;
2056 block->end = *end;
2057 }
2058
2059 /* Parse a "raw" xml record. */
2060
2061 static void
2062 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
2063 gdb_byte **pdata, size_t *psize)
2064 {
2065 struct cleanup *cleanup;
2066 gdb_byte *data, *bin;
2067 size_t len, size;
2068
2069 len = strlen (body_text);
2070 if (len % 2 != 0)
2071 gdb_xml_error (parser, _("Bad raw data size."));
2072
2073 size = len / 2;
2074
2075 bin = data = (gdb_byte *) xmalloc (size);
2076 cleanup = make_cleanup (xfree, data);
2077
2078 /* We use hex encoding - see common/rsp-low.h. */
2079 while (len > 0)
2080 {
2081 char hi, lo;
2082
2083 hi = *body_text++;
2084 lo = *body_text++;
2085
2086 if (hi == 0 || lo == 0)
2087 gdb_xml_error (parser, _("Bad hex encoding."));
2088
2089 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2090 len -= 2;
2091 }
2092
2093 discard_cleanups (cleanup);
2094
2095 *pdata = data;
2096 *psize = size;
2097 }
2098
2099 /* Parse a btrace pt-config "cpu" xml record. */
2100
2101 static void
2102 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2103 const struct gdb_xml_element *element,
2104 void *user_data,
2105 VEC (gdb_xml_value_s) *attributes)
2106 {
2107 struct btrace_data *btrace;
2108 const char *vendor;
2109 ULONGEST *family, *model, *stepping;
2110
2111 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2112 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2113 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2114 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2115
2116 btrace = (struct btrace_data *) user_data;
2117
2118 if (strcmp (vendor, "GenuineIntel") == 0)
2119 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2120
2121 btrace->variant.pt.config.cpu.family = *family;
2122 btrace->variant.pt.config.cpu.model = *model;
2123 btrace->variant.pt.config.cpu.stepping = *stepping;
2124 }
2125
2126 /* Parse a btrace pt "raw" xml record. */
2127
2128 static void
2129 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2130 const struct gdb_xml_element *element,
2131 void *user_data, const char *body_text)
2132 {
2133 struct btrace_data *btrace;
2134
2135 btrace = (struct btrace_data *) user_data;
2136 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2137 &btrace->variant.pt.size);
2138 }
2139
2140 /* Parse a btrace "pt" xml record. */
2141
2142 static void
2143 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2144 const struct gdb_xml_element *element,
2145 void *user_data, VEC (gdb_xml_value_s) *attributes)
2146 {
2147 struct btrace_data *btrace;
2148
2149 btrace = (struct btrace_data *) user_data;
2150 btrace->format = BTRACE_FORMAT_PT;
2151 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2152 btrace->variant.pt.data = NULL;
2153 btrace->variant.pt.size = 0;
2154 }
2155
2156 static const struct gdb_xml_attribute block_attributes[] = {
2157 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2158 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2159 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2160 };
2161
2162 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2163 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2164 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2165 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2166 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2167 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2168 };
2169
2170 static const struct gdb_xml_element btrace_pt_config_children[] = {
2171 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2172 parse_xml_btrace_pt_config_cpu, NULL },
2173 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2174 };
2175
2176 static const struct gdb_xml_element btrace_pt_children[] = {
2177 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2178 NULL },
2179 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2180 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2181 };
2182
2183 static const struct gdb_xml_attribute btrace_attributes[] = {
2184 { "version", GDB_XML_AF_NONE, NULL, NULL },
2185 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2186 };
2187
2188 static const struct gdb_xml_element btrace_children[] = {
2189 { "block", block_attributes, NULL,
2190 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2191 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2192 NULL },
2193 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2194 };
2195
2196 static const struct gdb_xml_element btrace_elements[] = {
2197 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2198 check_xml_btrace_version, NULL },
2199 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2200 };
2201
2202 #endif /* defined (HAVE_LIBEXPAT) */
2203
2204 /* See btrace.h. */
2205
2206 void
2207 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2208 {
2209 struct cleanup *cleanup;
2210 int errcode;
2211
2212 #if defined (HAVE_LIBEXPAT)
2213
2214 btrace->format = BTRACE_FORMAT_NONE;
2215
2216 cleanup = make_cleanup_btrace_data (btrace);
2217 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2218 buffer, btrace);
2219 if (errcode != 0)
2220 error (_("Error parsing branch trace."));
2221
2222 /* Keep parse results. */
2223 discard_cleanups (cleanup);
2224
2225 #else /* !defined (HAVE_LIBEXPAT) */
2226
2227 error (_("Cannot process branch trace. XML parsing is not supported."));
2228
2229 #endif /* !defined (HAVE_LIBEXPAT) */
2230 }
2231
2232 #if defined (HAVE_LIBEXPAT)
2233
2234 /* Parse a btrace-conf "bts" xml record. */
2235
2236 static void
2237 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2238 const struct gdb_xml_element *element,
2239 void *user_data, VEC (gdb_xml_value_s) *attributes)
2240 {
2241 struct btrace_config *conf;
2242 struct gdb_xml_value *size;
2243
2244 conf = (struct btrace_config *) user_data;
2245 conf->format = BTRACE_FORMAT_BTS;
2246 conf->bts.size = 0;
2247
2248 size = xml_find_attribute (attributes, "size");
2249 if (size != NULL)
2250 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2251 }
2252
2253 /* Parse a btrace-conf "pt" xml record. */
2254
2255 static void
2256 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2257 const struct gdb_xml_element *element,
2258 void *user_data, VEC (gdb_xml_value_s) *attributes)
2259 {
2260 struct btrace_config *conf;
2261 struct gdb_xml_value *size;
2262
2263 conf = (struct btrace_config *) user_data;
2264 conf->format = BTRACE_FORMAT_PT;
2265 conf->pt.size = 0;
2266
2267 size = xml_find_attribute (attributes, "size");
2268 if (size != NULL)
2269 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2270 }
2271
2272 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2273 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2274 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2275 };
2276
2277 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2278 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2279 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2280 };
2281
2282 static const struct gdb_xml_element btrace_conf_children[] = {
2283 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2284 parse_xml_btrace_conf_bts, NULL },
2285 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2286 parse_xml_btrace_conf_pt, NULL },
2287 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2288 };
2289
2290 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2291 { "version", GDB_XML_AF_NONE, NULL, NULL },
2292 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2293 };
2294
2295 static const struct gdb_xml_element btrace_conf_elements[] = {
2296 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2297 GDB_XML_EF_NONE, NULL, NULL },
2298 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2299 };
2300
2301 #endif /* defined (HAVE_LIBEXPAT) */
2302
2303 /* See btrace.h. */
2304
2305 void
2306 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2307 {
2308 int errcode;
2309
2310 #if defined (HAVE_LIBEXPAT)
2311
2312 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2313 btrace_conf_elements, xml, conf);
2314 if (errcode != 0)
2315 error (_("Error parsing branch trace configuration."));
2316
2317 #else /* !defined (HAVE_LIBEXPAT) */
2318
2319 error (_("XML parsing is not supported."));
2320
2321 #endif /* !defined (HAVE_LIBEXPAT) */
2322 }
2323
2324 /* See btrace.h. */
2325
2326 const struct btrace_insn *
2327 btrace_insn_get (const struct btrace_insn_iterator *it)
2328 {
2329 const struct btrace_function *bfun;
2330 unsigned int index, end;
2331
2332 index = it->insn_index;
2333 bfun = &it->btinfo->functions[it->call_index];
2334
2335 /* Check if the iterator points to a gap in the trace. */
2336 if (bfun->errcode != 0)
2337 return NULL;
2338
2339 /* The index is within the bounds of this function's instruction vector. */
2340 end = VEC_length (btrace_insn_s, bfun->insn);
2341 gdb_assert (0 < end);
2342 gdb_assert (index < end);
2343
2344 return VEC_index (btrace_insn_s, bfun->insn, index);
2345 }
2346
2347 /* See btrace.h. */
2348
2349 int
2350 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2351 {
2352 return it->btinfo->functions[it->call_index].errcode;
2353 }
2354
2355 /* See btrace.h. */
2356
2357 unsigned int
2358 btrace_insn_number (const struct btrace_insn_iterator *it)
2359 {
2360 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2361 }
2362
2363 /* See btrace.h. */
2364
2365 void
2366 btrace_insn_begin (struct btrace_insn_iterator *it,
2367 const struct btrace_thread_info *btinfo)
2368 {
2369 if (btinfo->functions.empty ())
2370 error (_("No trace."));
2371
2372 it->btinfo = btinfo;
2373 it->call_index = 0;
2374 it->insn_index = 0;
2375 }
2376
2377 /* See btrace.h. */
2378
2379 void
2380 btrace_insn_end (struct btrace_insn_iterator *it,
2381 const struct btrace_thread_info *btinfo)
2382 {
2383 const struct btrace_function *bfun;
2384 unsigned int length;
2385
2386 if (btinfo->functions.empty ())
2387 error (_("No trace."));
2388
2389 bfun = &btinfo->functions.back ();
2390 length = VEC_length (btrace_insn_s, bfun->insn);
2391
2392 /* The last function may either be a gap or it contains the current
2393 instruction, which is one past the end of the execution trace; ignore
2394 it. */
2395 if (length > 0)
2396 length -= 1;
2397
2398 it->btinfo = btinfo;
2399 it->call_index = bfun->number - 1;
2400 it->insn_index = length;
2401 }
2402
2403 /* See btrace.h. */
2404
2405 unsigned int
2406 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2407 {
2408 const struct btrace_function *bfun;
2409 unsigned int index, steps;
2410
2411 bfun = &it->btinfo->functions[it->call_index];
2412 steps = 0;
2413 index = it->insn_index;
2414
2415 while (stride != 0)
2416 {
2417 unsigned int end, space, adv;
2418
2419 end = VEC_length (btrace_insn_s, bfun->insn);
2420
2421 /* An empty function segment represents a gap in the trace. We count
2422 it as one instruction. */
2423 if (end == 0)
2424 {
2425 const struct btrace_function *next;
2426
2427 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2428 if (next == NULL)
2429 break;
2430
2431 stride -= 1;
2432 steps += 1;
2433
2434 bfun = next;
2435 index = 0;
2436
2437 continue;
2438 }
2439
2440 gdb_assert (0 < end);
2441 gdb_assert (index < end);
2442
2443 /* Compute the number of instructions remaining in this segment. */
2444 space = end - index;
2445
2446 /* Advance the iterator as far as possible within this segment. */
2447 adv = std::min (space, stride);
2448 stride -= adv;
2449 index += adv;
2450 steps += adv;
2451
2452 /* Move to the next function if we're at the end of this one. */
2453 if (index == end)
2454 {
2455 const struct btrace_function *next;
2456
2457 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2458 if (next == NULL)
2459 {
2460 /* We stepped past the last function.
2461
2462 Let's adjust the index to point to the last instruction in
2463 the previous function. */
2464 index -= 1;
2465 steps -= 1;
2466 break;
2467 }
2468
2469 /* We now point to the first instruction in the new function. */
2470 bfun = next;
2471 index = 0;
2472 }
2473
2474 /* We did make progress. */
2475 gdb_assert (adv > 0);
2476 }
2477
2478 /* Update the iterator. */
2479 it->call_index = bfun->number - 1;
2480 it->insn_index = index;
2481
2482 return steps;
2483 }
2484
2485 /* See btrace.h. */
2486
2487 unsigned int
2488 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2489 {
2490 const struct btrace_function *bfun;
2491 unsigned int index, steps;
2492
2493 bfun = &it->btinfo->functions[it->call_index];
2494 steps = 0;
2495 index = it->insn_index;
2496
2497 while (stride != 0)
2498 {
2499 unsigned int adv;
2500
2501 /* Move to the previous function if we're at the start of this one. */
2502 if (index == 0)
2503 {
2504 const struct btrace_function *prev;
2505
2506 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2507 if (prev == NULL)
2508 break;
2509
2510 /* We point to one after the last instruction in the new function. */
2511 bfun = prev;
2512 index = VEC_length (btrace_insn_s, bfun->insn);
2513
2514 /* An empty function segment represents a gap in the trace. We count
2515 it as one instruction. */
2516 if (index == 0)
2517 {
2518 stride -= 1;
2519 steps += 1;
2520
2521 continue;
2522 }
2523 }
2524
2525 /* Advance the iterator as far as possible within this segment. */
2526 adv = std::min (index, stride);
2527
2528 stride -= adv;
2529 index -= adv;
2530 steps += adv;
2531
2532 /* We did make progress. */
2533 gdb_assert (adv > 0);
2534 }
2535
2536 /* Update the iterator. */
2537 it->call_index = bfun->number - 1;
2538 it->insn_index = index;
2539
2540 return steps;
2541 }
2542
2543 /* See btrace.h. */
2544
2545 int
2546 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2547 const struct btrace_insn_iterator *rhs)
2548 {
2549 gdb_assert (lhs->btinfo == rhs->btinfo);
2550
2551 if (lhs->call_index != rhs->call_index)
2552 return lhs->call_index - rhs->call_index;
2553
2554 return lhs->insn_index - rhs->insn_index;
2555 }
2556
2557 /* See btrace.h. */
2558
2559 int
2560 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2561 const struct btrace_thread_info *btinfo,
2562 unsigned int number)
2563 {
2564 const struct btrace_function *bfun;
2565 unsigned int upper, lower;
2566
2567 if (btinfo->functions.empty ())
2568 return 0;
2569
2570 lower = 0;
2571 bfun = &btinfo->functions[lower];
2572 if (number < bfun->insn_offset)
2573 return 0;
2574
2575 upper = btinfo->functions.size () - 1;
2576 bfun = &btinfo->functions[upper];
2577 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2578 return 0;
2579
2580 /* We assume that there are no holes in the numbering. */
2581 for (;;)
2582 {
2583 const unsigned int average = lower + (upper - lower) / 2;
2584
2585 bfun = &btinfo->functions[average];
2586
2587 if (number < bfun->insn_offset)
2588 {
2589 upper = average - 1;
2590 continue;
2591 }
2592
2593 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2594 {
2595 lower = average + 1;
2596 continue;
2597 }
2598
2599 break;
2600 }
2601
2602 it->btinfo = btinfo;
2603 it->call_index = bfun->number - 1;
2604 it->insn_index = number - bfun->insn_offset;
2605 return 1;
2606 }
2607
2608 /* Returns true if the recording ends with a function segment that
2609 contains only a single (i.e. the current) instruction. */
2610
2611 static bool
2612 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2613 {
2614 const btrace_function *bfun;
2615
2616 if (btinfo->functions.empty ())
2617 return false;
2618
2619 bfun = &btinfo->functions.back ();
2620 if (bfun->errcode != 0)
2621 return false;
2622
2623 return ftrace_call_num_insn (bfun) == 1;
2624 }
2625
2626 /* See btrace.h. */
2627
2628 const struct btrace_function *
2629 btrace_call_get (const struct btrace_call_iterator *it)
2630 {
2631 if (it->index >= it->btinfo->functions.size ())
2632 return NULL;
2633
2634 return &it->btinfo->functions[it->index];
2635 }
2636
2637 /* See btrace.h. */
2638
2639 unsigned int
2640 btrace_call_number (const struct btrace_call_iterator *it)
2641 {
2642 const unsigned int length = it->btinfo->functions.size ();
2643
2644 /* If the last function segment contains only a single instruction (i.e. the
2645 current instruction), skip it. */
2646 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2647 return length;
2648
2649 return it->index + 1;
2650 }
2651
2652 /* See btrace.h. */
2653
2654 void
2655 btrace_call_begin (struct btrace_call_iterator *it,
2656 const struct btrace_thread_info *btinfo)
2657 {
2658 if (btinfo->functions.empty ())
2659 error (_("No trace."));
2660
2661 it->btinfo = btinfo;
2662 it->index = 0;
2663 }
2664
2665 /* See btrace.h. */
2666
2667 void
2668 btrace_call_end (struct btrace_call_iterator *it,
2669 const struct btrace_thread_info *btinfo)
2670 {
2671 if (btinfo->functions.empty ())
2672 error (_("No trace."));
2673
2674 it->btinfo = btinfo;
2675 it->index = btinfo->functions.size ();
2676 }
2677
2678 /* See btrace.h. */
2679
2680 unsigned int
2681 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2682 {
2683 const unsigned int length = it->btinfo->functions.size ();
2684
2685 if (it->index + stride < length - 1)
2686 /* Default case: Simply advance the iterator. */
2687 it->index += stride;
2688 else if (it->index + stride == length - 1)
2689 {
2690 /* We land exactly at the last function segment. If it contains only one
2691 instruction (i.e. the current instruction) it is not actually part of
2692 the trace. */
2693 if (btrace_ends_with_single_insn (it->btinfo))
2694 it->index = length;
2695 else
2696 it->index = length - 1;
2697 }
2698 else
2699 {
2700 /* We land past the last function segment and have to adjust the stride.
2701 If the last function segment contains only one instruction (i.e. the
2702 current instruction) it is not actually part of the trace. */
2703 if (btrace_ends_with_single_insn (it->btinfo))
2704 stride = length - it->index - 1;
2705 else
2706 stride = length - it->index;
2707
2708 it->index = length;
2709 }
2710
2711 return stride;
2712 }
2713
2714 /* See btrace.h. */
2715
2716 unsigned int
2717 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2718 {
2719 const unsigned int length = it->btinfo->functions.size ();
2720 int steps = 0;
2721
2722 gdb_assert (it->index <= length);
2723
2724 if (stride == 0 || it->index == 0)
2725 return 0;
2726
2727 /* If we are at the end, the first step is a special case. If the last
2728 function segment contains only one instruction (i.e. the current
2729 instruction) it is not actually part of the trace. To be able to step
2730 over this instruction, we need at least one more function segment. */
2731 if ((it->index == length) && (length > 1))
2732 {
2733 if (btrace_ends_with_single_insn (it->btinfo))
2734 it->index = length - 2;
2735 else
2736 it->index = length - 1;
2737
2738 steps = 1;
2739 stride -= 1;
2740 }
2741
2742 stride = std::min (stride, it->index);
2743
2744 it->index -= stride;
2745 return steps + stride;
2746 }
2747
2748 /* See btrace.h. */
2749
2750 int
2751 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2752 const struct btrace_call_iterator *rhs)
2753 {
2754 gdb_assert (lhs->btinfo == rhs->btinfo);
2755 return (int) (lhs->index - rhs->index);
2756 }
2757
2758 /* See btrace.h. */
2759
2760 int
2761 btrace_find_call_by_number (struct btrace_call_iterator *it,
2762 const struct btrace_thread_info *btinfo,
2763 unsigned int number)
2764 {
2765 const unsigned int length = btinfo->functions.size ();
2766
2767 if ((number == 0) || (number > length))
2768 return 0;
2769
2770 it->btinfo = btinfo;
2771 it->index = number - 1;
2772 return 1;
2773 }
2774
2775 /* See btrace.h. */
2776
2777 void
2778 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2779 const struct btrace_insn_iterator *begin,
2780 const struct btrace_insn_iterator *end)
2781 {
2782 if (btinfo->insn_history == NULL)
2783 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2784
2785 btinfo->insn_history->begin = *begin;
2786 btinfo->insn_history->end = *end;
2787 }
2788
2789 /* See btrace.h. */
2790
2791 void
2792 btrace_set_call_history (struct btrace_thread_info *btinfo,
2793 const struct btrace_call_iterator *begin,
2794 const struct btrace_call_iterator *end)
2795 {
2796 gdb_assert (begin->btinfo == end->btinfo);
2797
2798 if (btinfo->call_history == NULL)
2799 btinfo->call_history = XCNEW (struct btrace_call_history);
2800
2801 btinfo->call_history->begin = *begin;
2802 btinfo->call_history->end = *end;
2803 }
2804
2805 /* See btrace.h. */
2806
2807 int
2808 btrace_is_replaying (struct thread_info *tp)
2809 {
2810 return tp->btrace.replay != NULL;
2811 }
2812
2813 /* See btrace.h. */
2814
2815 int
2816 btrace_is_empty (struct thread_info *tp)
2817 {
2818 struct btrace_insn_iterator begin, end;
2819 struct btrace_thread_info *btinfo;
2820
2821 btinfo = &tp->btrace;
2822
2823 if (btinfo->functions.empty ())
2824 return 1;
2825
2826 btrace_insn_begin (&begin, btinfo);
2827 btrace_insn_end (&end, btinfo);
2828
2829 return btrace_insn_cmp (&begin, &end) == 0;
2830 }
2831
2832 /* Forward the cleanup request. */
2833
2834 static void
2835 do_btrace_data_cleanup (void *arg)
2836 {
2837 btrace_data_fini ((struct btrace_data *) arg);
2838 }
2839
2840 /* See btrace.h. */
2841
2842 struct cleanup *
2843 make_cleanup_btrace_data (struct btrace_data *data)
2844 {
2845 return make_cleanup (do_btrace_data_cleanup, data);
2846 }
2847
2848 #if defined (HAVE_LIBIPT)
2849
2850 /* Print a single packet. */
2851
2852 static void
2853 pt_print_packet (const struct pt_packet *packet)
2854 {
2855 switch (packet->type)
2856 {
2857 default:
2858 printf_unfiltered (("[??: %x]"), packet->type);
2859 break;
2860
2861 case ppt_psb:
2862 printf_unfiltered (("psb"));
2863 break;
2864
2865 case ppt_psbend:
2866 printf_unfiltered (("psbend"));
2867 break;
2868
2869 case ppt_pad:
2870 printf_unfiltered (("pad"));
2871 break;
2872
2873 case ppt_tip:
2874 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2875 packet->payload.ip.ipc,
2876 packet->payload.ip.ip);
2877 break;
2878
2879 case ppt_tip_pge:
2880 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2881 packet->payload.ip.ipc,
2882 packet->payload.ip.ip);
2883 break;
2884
2885 case ppt_tip_pgd:
2886 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2887 packet->payload.ip.ipc,
2888 packet->payload.ip.ip);
2889 break;
2890
2891 case ppt_fup:
2892 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2893 packet->payload.ip.ipc,
2894 packet->payload.ip.ip);
2895 break;
2896
2897 case ppt_tnt_8:
2898 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2899 packet->payload.tnt.bit_size,
2900 packet->payload.tnt.payload);
2901 break;
2902
2903 case ppt_tnt_64:
2904 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2905 packet->payload.tnt.bit_size,
2906 packet->payload.tnt.payload);
2907 break;
2908
2909 case ppt_pip:
2910 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2911 packet->payload.pip.nr ? (" nr") : (""));
2912 break;
2913
2914 case ppt_tsc:
2915 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2916 break;
2917
2918 case ppt_cbr:
2919 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2920 break;
2921
2922 case ppt_mode:
2923 switch (packet->payload.mode.leaf)
2924 {
2925 default:
2926 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2927 break;
2928
2929 case pt_mol_exec:
2930 printf_unfiltered (("mode.exec%s%s"),
2931 packet->payload.mode.bits.exec.csl
2932 ? (" cs.l") : (""),
2933 packet->payload.mode.bits.exec.csd
2934 ? (" cs.d") : (""));
2935 break;
2936
2937 case pt_mol_tsx:
2938 printf_unfiltered (("mode.tsx%s%s"),
2939 packet->payload.mode.bits.tsx.intx
2940 ? (" intx") : (""),
2941 packet->payload.mode.bits.tsx.abrt
2942 ? (" abrt") : (""));
2943 break;
2944 }
2945 break;
2946
2947 case ppt_ovf:
2948 printf_unfiltered (("ovf"));
2949 break;
2950
2951 case ppt_stop:
2952 printf_unfiltered (("stop"));
2953 break;
2954
2955 case ppt_vmcs:
2956 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2957 break;
2958
2959 case ppt_tma:
2960 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2961 packet->payload.tma.fc);
2962 break;
2963
2964 case ppt_mtc:
2965 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2966 break;
2967
2968 case ppt_cyc:
2969 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2970 break;
2971
2972 case ppt_mnt:
2973 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2974 break;
2975 }
2976 }
2977
2978 /* Decode packets into MAINT using DECODER. */
2979
2980 static void
2981 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2982 struct pt_packet_decoder *decoder)
2983 {
2984 int errcode;
2985
2986 for (;;)
2987 {
2988 struct btrace_pt_packet packet;
2989
2990 errcode = pt_pkt_sync_forward (decoder);
2991 if (errcode < 0)
2992 break;
2993
2994 for (;;)
2995 {
2996 pt_pkt_get_offset (decoder, &packet.offset);
2997
2998 errcode = pt_pkt_next (decoder, &packet.packet,
2999 sizeof(packet.packet));
3000 if (errcode < 0)
3001 break;
3002
3003 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
3004 {
3005 packet.errcode = pt_errcode (errcode);
3006 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
3007 &packet);
3008 }
3009 }
3010
3011 if (errcode == -pte_eos)
3012 break;
3013
3014 packet.errcode = pt_errcode (errcode);
3015 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
3016 &packet);
3017
3018 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
3019 packet.offset, pt_errstr (packet.errcode));
3020 }
3021
3022 if (errcode != -pte_eos)
3023 warning (_("Failed to synchronize onto the Intel Processor Trace "
3024 "stream: %s."), pt_errstr (pt_errcode (errcode)));
3025 }
3026
3027 /* Update the packet history in BTINFO. */
3028
3029 static void
3030 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3031 {
3032 volatile struct gdb_exception except;
3033 struct pt_packet_decoder *decoder;
3034 struct btrace_data_pt *pt;
3035 struct pt_config config;
3036 int errcode;
3037
3038 pt = &btinfo->data.variant.pt;
3039
3040 /* Nothing to do if there is no trace. */
3041 if (pt->size == 0)
3042 return;
3043
3044 memset (&config, 0, sizeof(config));
3045
3046 config.size = sizeof (config);
3047 config.begin = pt->data;
3048 config.end = pt->data + pt->size;
3049
3050 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
3051 config.cpu.family = pt->config.cpu.family;
3052 config.cpu.model = pt->config.cpu.model;
3053 config.cpu.stepping = pt->config.cpu.stepping;
3054
3055 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3056 if (errcode < 0)
3057 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
3058 pt_errstr (pt_errcode (errcode)));
3059
3060 decoder = pt_pkt_alloc_decoder (&config);
3061 if (decoder == NULL)
3062 error (_("Failed to allocate the Intel Processor Trace decoder."));
3063
3064 TRY
3065 {
3066 btrace_maint_decode_pt (&btinfo->maint, decoder);
3067 }
3068 CATCH (except, RETURN_MASK_ALL)
3069 {
3070 pt_pkt_free_decoder (decoder);
3071
3072 if (except.reason < 0)
3073 throw_exception (except);
3074 }
3075 END_CATCH
3076
3077 pt_pkt_free_decoder (decoder);
3078 }
3079
3080 #endif /* !defined (HAVE_LIBIPT) */
3081
3082 /* Update the packet maintenance information for BTINFO and store the
3083 low and high bounds into BEGIN and END, respectively.
3084 Store the current iterator state into FROM and TO. */
3085
3086 static void
3087 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3088 unsigned int *begin, unsigned int *end,
3089 unsigned int *from, unsigned int *to)
3090 {
3091 switch (btinfo->data.format)
3092 {
3093 default:
3094 *begin = 0;
3095 *end = 0;
3096 *from = 0;
3097 *to = 0;
3098 break;
3099
3100 case BTRACE_FORMAT_BTS:
3101 /* Nothing to do - we operate directly on BTINFO->DATA. */
3102 *begin = 0;
3103 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3104 *from = btinfo->maint.variant.bts.packet_history.begin;
3105 *to = btinfo->maint.variant.bts.packet_history.end;
3106 break;
3107
3108 #if defined (HAVE_LIBIPT)
3109 case BTRACE_FORMAT_PT:
3110 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3111 btrace_maint_update_pt_packets (btinfo);
3112
3113 *begin = 0;
3114 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3115 *from = btinfo->maint.variant.pt.packet_history.begin;
3116 *to = btinfo->maint.variant.pt.packet_history.end;
3117 break;
3118 #endif /* defined (HAVE_LIBIPT) */
3119 }
3120 }
3121
3122 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3123 update the current iterator position. */
3124
3125 static void
3126 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3127 unsigned int begin, unsigned int end)
3128 {
3129 switch (btinfo->data.format)
3130 {
3131 default:
3132 break;
3133
3134 case BTRACE_FORMAT_BTS:
3135 {
3136 VEC (btrace_block_s) *blocks;
3137 unsigned int blk;
3138
3139 blocks = btinfo->data.variant.bts.blocks;
3140 for (blk = begin; blk < end; ++blk)
3141 {
3142 const btrace_block_s *block;
3143
3144 block = VEC_index (btrace_block_s, blocks, blk);
3145
3146 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3147 core_addr_to_string_nz (block->begin),
3148 core_addr_to_string_nz (block->end));
3149 }
3150
3151 btinfo->maint.variant.bts.packet_history.begin = begin;
3152 btinfo->maint.variant.bts.packet_history.end = end;
3153 }
3154 break;
3155
3156 #if defined (HAVE_LIBIPT)
3157 case BTRACE_FORMAT_PT:
3158 {
3159 VEC (btrace_pt_packet_s) *packets;
3160 unsigned int pkt;
3161
3162 packets = btinfo->maint.variant.pt.packets;
3163 for (pkt = begin; pkt < end; ++pkt)
3164 {
3165 const struct btrace_pt_packet *packet;
3166
3167 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3168
3169 printf_unfiltered ("%u\t", pkt);
3170 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3171
3172 if (packet->errcode == pte_ok)
3173 pt_print_packet (&packet->packet);
3174 else
3175 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3176
3177 printf_unfiltered ("\n");
3178 }
3179
3180 btinfo->maint.variant.pt.packet_history.begin = begin;
3181 btinfo->maint.variant.pt.packet_history.end = end;
3182 }
3183 break;
3184 #endif /* defined (HAVE_LIBIPT) */
3185 }
3186 }
3187
3188 /* Read a number from an argument string. */
3189
3190 static unsigned int
3191 get_uint (char **arg)
3192 {
3193 char *begin, *end, *pos;
3194 unsigned long number;
3195
3196 begin = *arg;
3197 pos = skip_spaces (begin);
3198
3199 if (!isdigit (*pos))
3200 error (_("Expected positive number, got: %s."), pos);
3201
3202 number = strtoul (pos, &end, 10);
3203 if (number > UINT_MAX)
3204 error (_("Number too big."));
3205
3206 *arg += (end - begin);
3207
3208 return (unsigned int) number;
3209 }
3210
3211 /* Read a context size from an argument string. */
3212
3213 static int
3214 get_context_size (char **arg)
3215 {
3216 char *pos;
3217 int number;
3218
3219 pos = skip_spaces (*arg);
3220
3221 if (!isdigit (*pos))
3222 error (_("Expected positive number, got: %s."), pos);
3223
3224 return strtol (pos, arg, 10);
3225 }
3226
3227 /* Complain about junk at the end of an argument string. */
3228
3229 static void
3230 no_chunk (char *arg)
3231 {
3232 if (*arg != 0)
3233 error (_("Junk after argument: %s."), arg);
3234 }
3235
3236 /* The "maintenance btrace packet-history" command. */
3237
3238 static void
3239 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3240 {
3241 struct btrace_thread_info *btinfo;
3242 struct thread_info *tp;
3243 unsigned int size, begin, end, from, to;
3244
3245 tp = find_thread_ptid (inferior_ptid);
3246 if (tp == NULL)
3247 error (_("No thread."));
3248
3249 size = 10;
3250 btinfo = &tp->btrace;
3251
3252 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3253 if (begin == end)
3254 {
3255 printf_unfiltered (_("No trace.\n"));
3256 return;
3257 }
3258
3259 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3260 {
3261 from = to;
3262
3263 if (end - from < size)
3264 size = end - from;
3265 to = from + size;
3266 }
3267 else if (strcmp (arg, "-") == 0)
3268 {
3269 to = from;
3270
3271 if (to - begin < size)
3272 size = to - begin;
3273 from = to - size;
3274 }
3275 else
3276 {
3277 from = get_uint (&arg);
3278 if (end <= from)
3279 error (_("'%u' is out of range."), from);
3280
3281 arg = skip_spaces (arg);
3282 if (*arg == ',')
3283 {
3284 arg = skip_spaces (++arg);
3285
3286 if (*arg == '+')
3287 {
3288 arg += 1;
3289 size = get_context_size (&arg);
3290
3291 no_chunk (arg);
3292
3293 if (end - from < size)
3294 size = end - from;
3295 to = from + size;
3296 }
3297 else if (*arg == '-')
3298 {
3299 arg += 1;
3300 size = get_context_size (&arg);
3301
3302 no_chunk (arg);
3303
3304 /* Include the packet given as first argument. */
3305 from += 1;
3306 to = from;
3307
3308 if (to - begin < size)
3309 size = to - begin;
3310 from = to - size;
3311 }
3312 else
3313 {
3314 to = get_uint (&arg);
3315
3316 /* Include the packet at the second argument and silently
3317 truncate the range. */
3318 if (to < end)
3319 to += 1;
3320 else
3321 to = end;
3322
3323 no_chunk (arg);
3324 }
3325 }
3326 else
3327 {
3328 no_chunk (arg);
3329
3330 if (end - from < size)
3331 size = end - from;
3332 to = from + size;
3333 }
3334
3335 dont_repeat ();
3336 }
3337
3338 btrace_maint_print_packets (btinfo, from, to);
3339 }
3340
3341 /* The "maintenance btrace clear-packet-history" command. */
3342
3343 static void
3344 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3345 {
3346 struct btrace_thread_info *btinfo;
3347 struct thread_info *tp;
3348
3349 if (args != NULL && *args != 0)
3350 error (_("Invalid argument."));
3351
3352 tp = find_thread_ptid (inferior_ptid);
3353 if (tp == NULL)
3354 error (_("No thread."));
3355
3356 btinfo = &tp->btrace;
3357
3358 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3359 btrace_maint_clear (btinfo);
3360 btrace_data_clear (&btinfo->data);
3361 }
3362
3363 /* The "maintenance btrace clear" command. */
3364
3365 static void
3366 maint_btrace_clear_cmd (char *args, int from_tty)
3367 {
3368 struct btrace_thread_info *btinfo;
3369 struct thread_info *tp;
3370
3371 if (args != NULL && *args != 0)
3372 error (_("Invalid argument."));
3373
3374 tp = find_thread_ptid (inferior_ptid);
3375 if (tp == NULL)
3376 error (_("No thread."));
3377
3378 btrace_clear (tp);
3379 }
3380
3381 /* The "maintenance btrace" command. */
3382
3383 static void
3384 maint_btrace_cmd (char *args, int from_tty)
3385 {
3386 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3387 gdb_stdout);
3388 }
3389
3390 /* The "maintenance set btrace" command. */
3391
3392 static void
3393 maint_btrace_set_cmd (char *args, int from_tty)
3394 {
3395 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3396 gdb_stdout);
3397 }
3398
3399 /* The "maintenance show btrace" command. */
3400
3401 static void
3402 maint_btrace_show_cmd (char *args, int from_tty)
3403 {
3404 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3405 all_commands, gdb_stdout);
3406 }
3407
3408 /* The "maintenance set btrace pt" command. */
3409
3410 static void
3411 maint_btrace_pt_set_cmd (char *args, int from_tty)
3412 {
3413 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3414 all_commands, gdb_stdout);
3415 }
3416
3417 /* The "maintenance show btrace pt" command. */
3418
3419 static void
3420 maint_btrace_pt_show_cmd (char *args, int from_tty)
3421 {
3422 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3423 all_commands, gdb_stdout);
3424 }
3425
3426 /* The "maintenance info btrace" command. */
3427
3428 static void
3429 maint_info_btrace_cmd (char *args, int from_tty)
3430 {
3431 struct btrace_thread_info *btinfo;
3432 struct thread_info *tp;
3433 const struct btrace_config *conf;
3434
3435 if (args != NULL && *args != 0)
3436 error (_("Invalid argument."));
3437
3438 tp = find_thread_ptid (inferior_ptid);
3439 if (tp == NULL)
3440 error (_("No thread."));
3441
3442 btinfo = &tp->btrace;
3443
3444 conf = btrace_conf (btinfo);
3445 if (conf == NULL)
3446 error (_("No btrace configuration."));
3447
3448 printf_unfiltered (_("Format: %s.\n"),
3449 btrace_format_string (conf->format));
3450
3451 switch (conf->format)
3452 {
3453 default:
3454 break;
3455
3456 case BTRACE_FORMAT_BTS:
3457 printf_unfiltered (_("Number of packets: %u.\n"),
3458 VEC_length (btrace_block_s,
3459 btinfo->data.variant.bts.blocks));
3460 break;
3461
3462 #if defined (HAVE_LIBIPT)
3463 case BTRACE_FORMAT_PT:
3464 {
3465 struct pt_version version;
3466
3467 version = pt_library_version ();
3468 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3469 version.minor, version.build,
3470 version.ext != NULL ? version.ext : "");
3471
3472 btrace_maint_update_pt_packets (btinfo);
3473 printf_unfiltered (_("Number of packets: %u.\n"),
3474 VEC_length (btrace_pt_packet_s,
3475 btinfo->maint.variant.pt.packets));
3476 }
3477 break;
3478 #endif /* defined (HAVE_LIBIPT) */
3479 }
3480 }
3481
3482 /* The "maint show btrace pt skip-pad" show value function. */
3483
3484 static void
3485 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3486 struct cmd_list_element *c,
3487 const char *value)
3488 {
3489 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3490 }
3491
3492
3493 /* Initialize btrace maintenance commands. */
3494
3495 void _initialize_btrace (void);
3496 void
3497 _initialize_btrace (void)
3498 {
3499 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3500 _("Info about branch tracing data."), &maintenanceinfolist);
3501
3502 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3503 _("Branch tracing maintenance commands."),
3504 &maint_btrace_cmdlist, "maintenance btrace ",
3505 0, &maintenancelist);
3506
3507 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3508 Set branch tracing specific variables."),
3509 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3510 0, &maintenance_set_cmdlist);
3511
3512 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3513 Set Intel Processor Trace specific variables."),
3514 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3515 0, &maint_btrace_set_cmdlist);
3516
3517 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3518 Show branch tracing specific variables."),
3519 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3520 0, &maintenance_show_cmdlist);
3521
3522 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3523 Show Intel Processor Trace specific variables."),
3524 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3525 0, &maint_btrace_show_cmdlist);
3526
3527 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3528 &maint_btrace_pt_skip_pad, _("\
3529 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3530 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3531 When enabled, PAD packets are ignored in the btrace packet history."),
3532 NULL, show_maint_btrace_pt_skip_pad,
3533 &maint_btrace_pt_set_cmdlist,
3534 &maint_btrace_pt_show_cmdlist);
3535
3536 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3537 _("Print the raw branch tracing data.\n\
3538 With no argument, print ten more packets after the previous ten-line print.\n\
3539 With '-' as argument print ten packets before a previous ten-line print.\n\
3540 One argument specifies the starting packet of a ten-line print.\n\
3541 Two arguments with comma between specify starting and ending packets to \
3542 print.\n\
3543 Preceded with '+'/'-' the second argument specifies the distance from the \
3544 first.\n"),
3545 &maint_btrace_cmdlist);
3546
3547 add_cmd ("clear-packet-history", class_maintenance,
3548 maint_btrace_clear_packet_history_cmd,
3549 _("Clears the branch tracing packet history.\n\
3550 Discards the raw branch tracing data but not the execution history data.\n\
3551 "),
3552 &maint_btrace_cmdlist);
3553
3554 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3555 _("Clears the branch tracing data.\n\
3556 Discards the raw branch tracing data and the execution history data.\n\
3557 The next 'record' command will fetch the branch tracing data anew.\n\
3558 "),
3559 &maint_btrace_cmdlist);
3560
3561 }
This page took 0.106707 seconds and 4 git commands to generate.