ebac069a668563a920781f01ed8c0a7eb79ed938
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 static void btrace_add_pc (struct thread_info *tp);
53
54 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57 #define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
68 /* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71 static const char *
72 ftrace_print_function_name (const struct btrace_function *bfun)
73 {
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
84 return MSYMBOL_PRINT_NAME (msym);
85
86 return "<unknown>";
87 }
88
89 /* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_filename (const struct btrace_function *bfun)
94 {
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
102 else
103 filename = "<unknown>";
104
105 return filename;
106 }
107
108 /* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
110
111 static const char *
112 ftrace_print_insn_addr (const struct btrace_insn *insn)
113 {
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
118 }
119
120 /* Print an ftrace debug status message. */
121
122 static void
123 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
124 {
125 const char *fun, *file;
126 unsigned int ibegin, iend;
127 int level;
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
138 }
139
140 /* Return the number of instructions in a given function call segment. */
141
142 static unsigned int
143 ftrace_call_num_insn (const struct btrace_function* bfun)
144 {
145 if (bfun == NULL)
146 return 0;
147
148 /* A gap is always counted as one instruction. */
149 if (bfun->errcode != 0)
150 return 1;
151
152 return VEC_length (btrace_insn_s, bfun->insn);
153 }
154
155 /* Return the function segment with the given NUMBER or NULL if no such segment
156 exists. BTINFO is the branch trace information for the current thread. */
157
158 static struct btrace_function *
159 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
160 unsigned int number)
161 {
162 if (number == 0 || number > btinfo->functions.size ())
163 return NULL;
164
165 return btinfo->functions[number - 1];
166 }
167
168 /* Return non-zero if BFUN does not match MFUN and FUN,
169 return zero otherwise. */
170
171 static int
172 ftrace_function_switched (const struct btrace_function *bfun,
173 const struct minimal_symbol *mfun,
174 const struct symbol *fun)
175 {
176 struct minimal_symbol *msym;
177 struct symbol *sym;
178
179 msym = bfun->msym;
180 sym = bfun->sym;
181
182 /* If the minimal symbol changed, we certainly switched functions. */
183 if (mfun != NULL && msym != NULL
184 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
185 return 1;
186
187 /* If the symbol changed, we certainly switched functions. */
188 if (fun != NULL && sym != NULL)
189 {
190 const char *bfname, *fname;
191
192 /* Check the function name. */
193 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
194 return 1;
195
196 /* Check the location of those functions, as well. */
197 bfname = symtab_to_fullname (symbol_symtab (sym));
198 fname = symtab_to_fullname (symbol_symtab (fun));
199 if (filename_cmp (fname, bfname) != 0)
200 return 1;
201 }
202
203 /* If we lost symbol information, we switched functions. */
204 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
205 return 1;
206
207 /* If we gained symbol information, we switched functions. */
208 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
209 return 1;
210
211 return 0;
212 }
213
214 /* Allocate and initialize a new branch trace function segment at the end of
215 the trace.
216 BTINFO is the branch trace information for the current thread.
217 MFUN and FUN are the symbol information we have for this function. */
218
219 static struct btrace_function *
220 ftrace_new_function (struct btrace_thread_info *btinfo,
221 struct minimal_symbol *mfun,
222 struct symbol *fun)
223 {
224 struct btrace_function *bfun;
225
226 bfun = XCNEW (struct btrace_function);
227
228 bfun->msym = mfun;
229 bfun->sym = fun;
230
231 if (btinfo->functions.empty ())
232 {
233 /* Start counting at one. */
234 bfun->number = 1;
235 bfun->insn_offset = 1;
236 }
237 else
238 {
239 struct btrace_function *prev = btinfo->functions.back ();
240
241 bfun->number = prev->number + 1;
242 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
243 bfun->level = prev->level;
244 }
245
246 btinfo->functions.push_back (bfun);
247 return bfun;
248 }
249
250 /* Update the UP field of a function segment. */
251
252 static void
253 ftrace_update_caller (struct btrace_function *bfun,
254 struct btrace_function *caller,
255 enum btrace_function_flag flags)
256 {
257 if (bfun->up != 0)
258 ftrace_debug (bfun, "updating caller");
259
260 bfun->up = caller->number;
261 bfun->flags = flags;
262
263 ftrace_debug (bfun, "set caller");
264 ftrace_debug (caller, "..to");
265 }
266
267 /* Fix up the caller for all segments of a function. */
268
269 static void
270 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
271 struct btrace_function *bfun,
272 struct btrace_function *caller,
273 enum btrace_function_flag flags)
274 {
275 unsigned int prev, next;
276
277 prev = bfun->prev;
278 next = bfun->next;
279 ftrace_update_caller (bfun, caller, flags);
280
281 /* Update all function segments belonging to the same function. */
282 for (; prev != 0; prev = bfun->prev)
283 {
284 bfun = ftrace_find_call_by_number (btinfo, prev);
285 ftrace_update_caller (bfun, caller, flags);
286 }
287
288 for (; next != 0; next = bfun->next)
289 {
290 bfun = ftrace_find_call_by_number (btinfo, next);
291 ftrace_update_caller (bfun, caller, flags);
292 }
293 }
294
295 /* Add a new function segment for a call at the end of the trace.
296 BTINFO is the branch trace information for the current thread.
297 MFUN and FUN are the symbol information we have for this function. */
298
299 static struct btrace_function *
300 ftrace_new_call (struct btrace_thread_info *btinfo,
301 struct minimal_symbol *mfun,
302 struct symbol *fun)
303 {
304 const unsigned int length = btinfo->functions.size ();
305 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
306
307 bfun->up = length;
308 bfun->level += 1;
309
310 ftrace_debug (bfun, "new call");
311
312 return bfun;
313 }
314
315 /* Add a new function segment for a tail call at the end of the trace.
316 BTINFO is the branch trace information for the current thread.
317 MFUN and FUN are the symbol information we have for this function. */
318
319 static struct btrace_function *
320 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
321 struct minimal_symbol *mfun,
322 struct symbol *fun)
323 {
324 const unsigned int length = btinfo->functions.size ();
325 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
326
327 bfun->up = length;
328 bfun->level += 1;
329 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
330
331 ftrace_debug (bfun, "new tail call");
332
333 return bfun;
334 }
335
336 /* Return the caller of BFUN or NULL if there is none. This function skips
337 tail calls in the call chain. BTINFO is the branch trace information for
338 the current thread. */
339 static struct btrace_function *
340 ftrace_get_caller (struct btrace_thread_info *btinfo,
341 struct btrace_function *bfun)
342 {
343 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
344 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
345 return ftrace_find_call_by_number (btinfo, bfun->up);
346
347 return NULL;
348 }
349
350 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
351 symbol information. BTINFO is the branch trace information for the current
352 thread. */
353
354 static struct btrace_function *
355 ftrace_find_caller (struct btrace_thread_info *btinfo,
356 struct btrace_function *bfun,
357 struct minimal_symbol *mfun,
358 struct symbol *fun)
359 {
360 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
361 {
362 /* Skip functions with incompatible symbol information. */
363 if (ftrace_function_switched (bfun, mfun, fun))
364 continue;
365
366 /* This is the function segment we're looking for. */
367 break;
368 }
369
370 return bfun;
371 }
372
373 /* Find the innermost caller in the back trace of BFUN, skipping all
374 function segments that do not end with a call instruction (e.g.
375 tail calls ending with a jump). BTINFO is the branch trace information for
376 the current thread. */
377
378 static struct btrace_function *
379 ftrace_find_call (struct btrace_thread_info *btinfo,
380 struct btrace_function *bfun)
381 {
382 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
383 {
384 struct btrace_insn *last;
385
386 /* Skip gaps. */
387 if (bfun->errcode != 0)
388 continue;
389
390 last = VEC_last (btrace_insn_s, bfun->insn);
391
392 if (last->iclass == BTRACE_INSN_CALL)
393 break;
394 }
395
396 return bfun;
397 }
398
399 /* Add a continuation segment for a function into which we return at the end of
400 the trace.
401 BTINFO is the branch trace information for the current thread.
402 MFUN and FUN are the symbol information we have for this function. */
403
404 static struct btrace_function *
405 ftrace_new_return (struct btrace_thread_info *btinfo,
406 struct minimal_symbol *mfun,
407 struct symbol *fun)
408 {
409 struct btrace_function *prev = btinfo->functions.back ();
410 struct btrace_function *bfun, *caller;
411
412 bfun = ftrace_new_function (btinfo, mfun, fun);
413
414 /* It is important to start at PREV's caller. Otherwise, we might find
415 PREV itself, if PREV is a recursive function. */
416 caller = ftrace_find_call_by_number (btinfo, prev->up);
417 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
418 if (caller != NULL)
419 {
420 /* The caller of PREV is the preceding btrace function segment in this
421 function instance. */
422 gdb_assert (caller->next == 0);
423
424 caller->next = bfun->number;
425 bfun->prev = caller->number;
426
427 /* Maintain the function level. */
428 bfun->level = caller->level;
429
430 /* Maintain the call stack. */
431 bfun->up = caller->up;
432 bfun->flags = caller->flags;
433
434 ftrace_debug (bfun, "new return");
435 }
436 else
437 {
438 /* We did not find a caller. This could mean that something went
439 wrong or that the call is simply not included in the trace. */
440
441 /* Let's search for some actual call. */
442 caller = ftrace_find_call_by_number (btinfo, prev->up);
443 caller = ftrace_find_call (btinfo, caller);
444 if (caller == NULL)
445 {
446 /* There is no call in PREV's back trace. We assume that the
447 branch trace did not include it. */
448
449 /* Let's find the topmost function and add a new caller for it.
450 This should handle a series of initial tail calls. */
451 while (prev->up != 0)
452 prev = ftrace_find_call_by_number (btinfo, prev->up);
453
454 bfun->level = prev->level - 1;
455
456 /* Fix up the call stack for PREV. */
457 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
458
459 ftrace_debug (bfun, "new return - no caller");
460 }
461 else
462 {
463 /* There is a call in PREV's back trace to which we should have
464 returned but didn't. Let's start a new, separate back trace
465 from PREV's level. */
466 bfun->level = prev->level - 1;
467
468 /* We fix up the back trace for PREV but leave other function segments
469 on the same level as they are.
470 This should handle things like schedule () correctly where we're
471 switching contexts. */
472 prev->up = bfun->number;
473 prev->flags = BFUN_UP_LINKS_TO_RET;
474
475 ftrace_debug (bfun, "new return - unknown caller");
476 }
477 }
478
479 return bfun;
480 }
481
482 /* Add a new function segment for a function switch at the end of the trace.
483 BTINFO is the branch trace information for the current thread.
484 MFUN and FUN are the symbol information we have for this function. */
485
486 static struct btrace_function *
487 ftrace_new_switch (struct btrace_thread_info *btinfo,
488 struct minimal_symbol *mfun,
489 struct symbol *fun)
490 {
491 struct btrace_function *prev = btinfo->functions.back ();
492 struct btrace_function *bfun;
493
494 /* This is an unexplained function switch. We can't really be sure about the
495 call stack, yet the best I can think of right now is to preserve it. */
496 bfun = ftrace_new_function (btinfo, mfun, fun);
497 bfun->up = prev->up;
498 bfun->flags = prev->flags;
499
500 ftrace_debug (bfun, "new switch");
501
502 return bfun;
503 }
504
505 /* Add a new function segment for a gap in the trace due to a decode error at
506 the end of the trace.
507 BTINFO is the branch trace information for the current thread.
508 ERRCODE is the format-specific error code. */
509
510 static struct btrace_function *
511 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
512 std::vector<unsigned int> &gaps)
513 {
514 struct btrace_function *bfun;
515
516 if (btinfo->functions.empty ())
517 bfun = ftrace_new_function (btinfo, NULL, NULL);
518 else
519 {
520 /* We hijack the previous function segment if it was empty. */
521 bfun = btinfo->functions.back ();
522 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
523 bfun = ftrace_new_function (btinfo, NULL, NULL);
524 }
525
526 bfun->errcode = errcode;
527 gaps.push_back (bfun->number);
528
529 ftrace_debug (bfun, "new gap");
530
531 return bfun;
532 }
533
534 /* Update the current function segment at the end of the trace in BTINFO with
535 respect to the instruction at PC. This may create new function segments.
536 Return the chronologically latest function segment, never NULL. */
537
538 static struct btrace_function *
539 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
540 {
541 struct bound_minimal_symbol bmfun;
542 struct minimal_symbol *mfun;
543 struct symbol *fun;
544 struct btrace_insn *last;
545 struct btrace_function *bfun;
546
547 /* Try to determine the function we're in. We use both types of symbols
548 to avoid surprises when we sometimes get a full symbol and sometimes
549 only a minimal symbol. */
550 fun = find_pc_function (pc);
551 bmfun = lookup_minimal_symbol_by_pc (pc);
552 mfun = bmfun.minsym;
553
554 if (fun == NULL && mfun == NULL)
555 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
556
557 /* If we didn't have a function, we create one. */
558 if (btinfo->functions.empty ())
559 return ftrace_new_function (btinfo, mfun, fun);
560
561 /* If we had a gap before, we create a function. */
562 bfun = btinfo->functions.back ();
563 if (bfun->errcode != 0)
564 return ftrace_new_function (btinfo, mfun, fun);
565
566 /* Check the last instruction, if we have one.
567 We do this check first, since it allows us to fill in the call stack
568 links in addition to the normal flow links. */
569 last = NULL;
570 if (!VEC_empty (btrace_insn_s, bfun->insn))
571 last = VEC_last (btrace_insn_s, bfun->insn);
572
573 if (last != NULL)
574 {
575 switch (last->iclass)
576 {
577 case BTRACE_INSN_RETURN:
578 {
579 const char *fname;
580
581 /* On some systems, _dl_runtime_resolve returns to the resolved
582 function instead of jumping to it. From our perspective,
583 however, this is a tailcall.
584 If we treated it as return, we wouldn't be able to find the
585 resolved function in our stack back trace. Hence, we would
586 lose the current stack back trace and start anew with an empty
587 back trace. When the resolved function returns, we would then
588 create a stack back trace with the same function names but
589 different frame id's. This will confuse stepping. */
590 fname = ftrace_print_function_name (bfun);
591 if (strcmp (fname, "_dl_runtime_resolve") == 0)
592 return ftrace_new_tailcall (btinfo, mfun, fun);
593
594 return ftrace_new_return (btinfo, mfun, fun);
595 }
596
597 case BTRACE_INSN_CALL:
598 /* Ignore calls to the next instruction. They are used for PIC. */
599 if (last->pc + last->size == pc)
600 break;
601
602 return ftrace_new_call (btinfo, mfun, fun);
603
604 case BTRACE_INSN_JUMP:
605 {
606 CORE_ADDR start;
607
608 start = get_pc_function_start (pc);
609
610 /* A jump to the start of a function is (typically) a tail call. */
611 if (start == pc)
612 return ftrace_new_tailcall (btinfo, mfun, fun);
613
614 /* If we can't determine the function for PC, we treat a jump at
615 the end of the block as tail call if we're switching functions
616 and as an intra-function branch if we don't. */
617 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
618 return ftrace_new_tailcall (btinfo, mfun, fun);
619
620 break;
621 }
622 }
623 }
624
625 /* Check if we're switching functions for some other reason. */
626 if (ftrace_function_switched (bfun, mfun, fun))
627 {
628 DEBUG_FTRACE ("switching from %s in %s at %s",
629 ftrace_print_insn_addr (last),
630 ftrace_print_function_name (bfun),
631 ftrace_print_filename (bfun));
632
633 return ftrace_new_switch (btinfo, mfun, fun);
634 }
635
636 return bfun;
637 }
638
639 /* Add the instruction at PC to BFUN's instructions. */
640
641 static void
642 ftrace_update_insns (struct btrace_function *bfun,
643 const struct btrace_insn *insn)
644 {
645 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
646
647 if (record_debug > 1)
648 ftrace_debug (bfun, "update insn");
649 }
650
651 /* Classify the instruction at PC. */
652
653 static enum btrace_insn_class
654 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
655 {
656 enum btrace_insn_class iclass;
657
658 iclass = BTRACE_INSN_OTHER;
659 TRY
660 {
661 if (gdbarch_insn_is_call (gdbarch, pc))
662 iclass = BTRACE_INSN_CALL;
663 else if (gdbarch_insn_is_ret (gdbarch, pc))
664 iclass = BTRACE_INSN_RETURN;
665 else if (gdbarch_insn_is_jump (gdbarch, pc))
666 iclass = BTRACE_INSN_JUMP;
667 }
668 CATCH (error, RETURN_MASK_ERROR)
669 {
670 }
671 END_CATCH
672
673 return iclass;
674 }
675
676 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
677 number of matching function segments or zero if the back traces do not
678 match. BTINFO is the branch trace information for the current thread. */
679
680 static int
681 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
682 struct btrace_function *lhs,
683 struct btrace_function *rhs)
684 {
685 int matches;
686
687 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
688 {
689 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
690 return 0;
691
692 lhs = ftrace_get_caller (btinfo, lhs);
693 rhs = ftrace_get_caller (btinfo, rhs);
694 }
695
696 return matches;
697 }
698
699 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
700 BTINFO is the branch trace information for the current thread. */
701
702 static void
703 ftrace_fixup_level (struct btrace_thread_info *btinfo,
704 struct btrace_function *bfun, int adjustment)
705 {
706 if (adjustment == 0)
707 return;
708
709 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
710 ftrace_debug (bfun, "..bfun");
711
712 while (bfun != NULL)
713 {
714 bfun->level += adjustment;
715 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
716 }
717 }
718
719 /* Recompute the global level offset. Traverse the function trace and compute
720 the global level offset as the negative of the minimal function level. */
721
722 static void
723 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
724 {
725 int level = INT_MAX;
726
727 if (btinfo == NULL)
728 return;
729
730 if (btinfo->functions.empty ())
731 return;
732
733 unsigned int length = btinfo->functions.size() - 1;
734 for (unsigned int i = 0; i < length; ++i)
735 level = std::min (level, btinfo->functions[i]->level);
736
737 /* The last function segment contains the current instruction, which is not
738 really part of the trace. If it contains just this one instruction, we
739 ignore the segment. */
740 struct btrace_function *last = btinfo->functions.back();
741 if (VEC_length (btrace_insn_s, last->insn) != 1)
742 level = std::min (level, last->level);
743
744 DEBUG_FTRACE ("setting global level offset: %d", -level);
745 btinfo->level = -level;
746 }
747
748 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
749 ftrace_connect_backtrace. BTINFO is the branch trace information for the
750 current thread. */
751
752 static void
753 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
754 struct btrace_function *prev,
755 struct btrace_function *next)
756 {
757 DEBUG_FTRACE ("connecting...");
758 ftrace_debug (prev, "..prev");
759 ftrace_debug (next, "..next");
760
761 /* The function segments are not yet connected. */
762 gdb_assert (prev->next == 0);
763 gdb_assert (next->prev == 0);
764
765 prev->next = next->number;
766 next->prev = prev->number;
767
768 /* We may have moved NEXT to a different function level. */
769 ftrace_fixup_level (btinfo, next, prev->level - next->level);
770
771 /* If we run out of back trace for one, let's use the other's. */
772 if (prev->up == 0)
773 {
774 const btrace_function_flags flags = next->flags;
775
776 next = ftrace_find_call_by_number (btinfo, next->up);
777 if (next != NULL)
778 {
779 DEBUG_FTRACE ("using next's callers");
780 ftrace_fixup_caller (btinfo, prev, next, flags);
781 }
782 }
783 else if (next->up == 0)
784 {
785 const btrace_function_flags flags = prev->flags;
786
787 prev = ftrace_find_call_by_number (btinfo, prev->up);
788 if (prev != NULL)
789 {
790 DEBUG_FTRACE ("using prev's callers");
791 ftrace_fixup_caller (btinfo, next, prev, flags);
792 }
793 }
794 else
795 {
796 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
797 link to add the tail callers to NEXT's back trace.
798
799 This removes NEXT->UP from NEXT's back trace. It will be added back
800 when connecting NEXT and PREV's callers - provided they exist.
801
802 If PREV's back trace consists of a series of tail calls without an
803 actual call, there will be no further connection and NEXT's caller will
804 be removed for good. To catch this case, we handle it here and connect
805 the top of PREV's back trace to NEXT's caller. */
806 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
807 {
808 struct btrace_function *caller;
809 btrace_function_flags next_flags, prev_flags;
810
811 /* We checked NEXT->UP above so CALLER can't be NULL. */
812 caller = ftrace_find_call_by_number (btinfo, next->up);
813 next_flags = next->flags;
814 prev_flags = prev->flags;
815
816 DEBUG_FTRACE ("adding prev's tail calls to next");
817
818 prev = ftrace_find_call_by_number (btinfo, prev->up);
819 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
820
821 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
822 prev->up))
823 {
824 /* At the end of PREV's back trace, continue with CALLER. */
825 if (prev->up == 0)
826 {
827 DEBUG_FTRACE ("fixing up link for tailcall chain");
828 ftrace_debug (prev, "..top");
829 ftrace_debug (caller, "..up");
830
831 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
832
833 /* If we skipped any tail calls, this may move CALLER to a
834 different function level.
835
836 Note that changing CALLER's level is only OK because we
837 know that this is the last iteration of the bottom-to-top
838 walk in ftrace_connect_backtrace.
839
840 Otherwise we will fix up CALLER's level when we connect it
841 to PREV's caller in the next iteration. */
842 ftrace_fixup_level (btinfo, caller,
843 prev->level - caller->level - 1);
844 break;
845 }
846
847 /* There's nothing to do if we find a real call. */
848 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
849 {
850 DEBUG_FTRACE ("will fix up link in next iteration");
851 break;
852 }
853 }
854 }
855 }
856 }
857
858 /* Connect function segments on the same level in the back trace at LHS and RHS.
859 The back traces at LHS and RHS are expected to match according to
860 ftrace_match_backtrace. BTINFO is the branch trace information for the
861 current thread. */
862
863 static void
864 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
865 struct btrace_function *lhs,
866 struct btrace_function *rhs)
867 {
868 while (lhs != NULL && rhs != NULL)
869 {
870 struct btrace_function *prev, *next;
871
872 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
873
874 /* Connecting LHS and RHS may change the up link. */
875 prev = lhs;
876 next = rhs;
877
878 lhs = ftrace_get_caller (btinfo, lhs);
879 rhs = ftrace_get_caller (btinfo, rhs);
880
881 ftrace_connect_bfun (btinfo, prev, next);
882 }
883 }
884
885 /* Bridge the gap between two function segments left and right of a gap if their
886 respective back traces match in at least MIN_MATCHES functions. BTINFO is
887 the branch trace information for the current thread.
888
889 Returns non-zero if the gap could be bridged, zero otherwise. */
890
891 static int
892 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
893 struct btrace_function *lhs, struct btrace_function *rhs,
894 int min_matches)
895 {
896 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
897 int best_matches;
898
899 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
900 rhs->insn_offset - 1, min_matches);
901
902 best_matches = 0;
903 best_l = NULL;
904 best_r = NULL;
905
906 /* We search the back traces of LHS and RHS for valid connections and connect
907 the two functon segments that give the longest combined back trace. */
908
909 for (cand_l = lhs; cand_l != NULL;
910 cand_l = ftrace_get_caller (btinfo, cand_l))
911 for (cand_r = rhs; cand_r != NULL;
912 cand_r = ftrace_get_caller (btinfo, cand_r))
913 {
914 int matches;
915
916 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
917 if (best_matches < matches)
918 {
919 best_matches = matches;
920 best_l = cand_l;
921 best_r = cand_r;
922 }
923 }
924
925 /* We need at least MIN_MATCHES matches. */
926 gdb_assert (min_matches > 0);
927 if (best_matches < min_matches)
928 return 0;
929
930 DEBUG_FTRACE ("..matches: %d", best_matches);
931
932 /* We will fix up the level of BEST_R and succeeding function segments such
933 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
934
935 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
936 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
937
938 To catch this, we already fix up the level here where we can start at RHS
939 instead of at BEST_R. We will ignore the level fixup when connecting
940 BEST_L to BEST_R as they will already be on the same level. */
941 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
942
943 ftrace_connect_backtrace (btinfo, best_l, best_r);
944
945 return best_matches;
946 }
947
948 /* Try to bridge gaps due to overflow or decode errors by connecting the
949 function segments that are separated by the gap. */
950
951 static void
952 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
953 {
954 struct btrace_thread_info *btinfo = &tp->btrace;
955 std::vector<unsigned int> remaining;
956 int min_matches;
957
958 DEBUG ("bridge gaps");
959
960 /* We require a minimum amount of matches for bridging a gap. The number of
961 required matches will be lowered with each iteration.
962
963 The more matches the higher our confidence that the bridging is correct.
964 For big gaps or small traces, however, it may not be feasible to require a
965 high number of matches. */
966 for (min_matches = 5; min_matches > 0; --min_matches)
967 {
968 /* Let's try to bridge as many gaps as we can. In some cases, we need to
969 skip a gap and revisit it again after we closed later gaps. */
970 while (!gaps.empty ())
971 {
972 for (const unsigned int number : gaps)
973 {
974 struct btrace_function *gap, *lhs, *rhs;
975 int bridged;
976
977 gap = ftrace_find_call_by_number (btinfo, number);
978
979 /* We may have a sequence of gaps if we run from one error into
980 the next as we try to re-sync onto the trace stream. Ignore
981 all but the leftmost gap in such a sequence.
982
983 Also ignore gaps at the beginning of the trace. */
984 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
985 if (lhs == NULL || lhs->errcode != 0)
986 continue;
987
988 /* Skip gaps to the right. */
989 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
990 while (rhs != NULL && rhs->errcode != 0)
991 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
992
993 /* Ignore gaps at the end of the trace. */
994 if (rhs == NULL)
995 continue;
996
997 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
998
999 /* Keep track of gaps we were not able to bridge and try again.
1000 If we just pushed them to the end of GAPS we would risk an
1001 infinite loop in case we simply cannot bridge a gap. */
1002 if (bridged == 0)
1003 remaining.push_back (number);
1004 }
1005
1006 /* Let's see if we made any progress. */
1007 if (remaining.size () == gaps.size ())
1008 break;
1009
1010 gaps.clear ();
1011 gaps.swap (remaining);
1012 }
1013
1014 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1015 if (gaps.empty ())
1016 break;
1017
1018 remaining.clear ();
1019 }
1020
1021 /* We may omit this in some cases. Not sure it is worth the extra
1022 complication, though. */
1023 ftrace_compute_global_level_offset (btinfo);
1024 }
1025
1026 /* Compute the function branch trace from BTS trace. */
1027
1028 static void
1029 btrace_compute_ftrace_bts (struct thread_info *tp,
1030 const struct btrace_data_bts *btrace,
1031 std::vector<unsigned int> &gaps)
1032 {
1033 struct btrace_thread_info *btinfo;
1034 struct gdbarch *gdbarch;
1035 unsigned int blk;
1036 int level;
1037
1038 gdbarch = target_gdbarch ();
1039 btinfo = &tp->btrace;
1040 blk = VEC_length (btrace_block_s, btrace->blocks);
1041
1042 if (btinfo->functions.empty ())
1043 level = INT_MAX;
1044 else
1045 level = -btinfo->level;
1046
1047 while (blk != 0)
1048 {
1049 btrace_block_s *block;
1050 CORE_ADDR pc;
1051
1052 blk -= 1;
1053
1054 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1055 pc = block->begin;
1056
1057 for (;;)
1058 {
1059 struct btrace_function *bfun;
1060 struct btrace_insn insn;
1061 int size;
1062
1063 /* We should hit the end of the block. Warn if we went too far. */
1064 if (block->end < pc)
1065 {
1066 /* Indicate the gap in the trace. */
1067 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1068
1069 warning (_("Recorded trace may be corrupted at instruction "
1070 "%u (pc = %s)."), bfun->insn_offset - 1,
1071 core_addr_to_string_nz (pc));
1072
1073 break;
1074 }
1075
1076 bfun = ftrace_update_function (btinfo, pc);
1077
1078 /* Maintain the function level offset.
1079 For all but the last block, we do it here. */
1080 if (blk != 0)
1081 level = std::min (level, bfun->level);
1082
1083 size = 0;
1084 TRY
1085 {
1086 size = gdb_insn_length (gdbarch, pc);
1087 }
1088 CATCH (error, RETURN_MASK_ERROR)
1089 {
1090 }
1091 END_CATCH
1092
1093 insn.pc = pc;
1094 insn.size = size;
1095 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1096 insn.flags = 0;
1097
1098 ftrace_update_insns (bfun, &insn);
1099
1100 /* We're done once we pushed the instruction at the end. */
1101 if (block->end == pc)
1102 break;
1103
1104 /* We can't continue if we fail to compute the size. */
1105 if (size <= 0)
1106 {
1107 /* Indicate the gap in the trace. We just added INSN so we're
1108 not at the beginning. */
1109 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1110
1111 warning (_("Recorded trace may be incomplete at instruction %u "
1112 "(pc = %s)."), bfun->insn_offset - 1,
1113 core_addr_to_string_nz (pc));
1114
1115 break;
1116 }
1117
1118 pc += size;
1119
1120 /* Maintain the function level offset.
1121 For the last block, we do it here to not consider the last
1122 instruction.
1123 Since the last instruction corresponds to the current instruction
1124 and is not really part of the execution history, it shouldn't
1125 affect the level. */
1126 if (blk == 0)
1127 level = std::min (level, bfun->level);
1128 }
1129 }
1130
1131 /* LEVEL is the minimal function level of all btrace function segments.
1132 Define the global level offset to -LEVEL so all function levels are
1133 normalized to start at zero. */
1134 btinfo->level = -level;
1135 }
1136
1137 #if defined (HAVE_LIBIPT)
1138
1139 static enum btrace_insn_class
1140 pt_reclassify_insn (enum pt_insn_class iclass)
1141 {
1142 switch (iclass)
1143 {
1144 case ptic_call:
1145 return BTRACE_INSN_CALL;
1146
1147 case ptic_return:
1148 return BTRACE_INSN_RETURN;
1149
1150 case ptic_jump:
1151 return BTRACE_INSN_JUMP;
1152
1153 default:
1154 return BTRACE_INSN_OTHER;
1155 }
1156 }
1157
1158 /* Return the btrace instruction flags for INSN. */
1159
1160 static btrace_insn_flags
1161 pt_btrace_insn_flags (const struct pt_insn &insn)
1162 {
1163 btrace_insn_flags flags = 0;
1164
1165 if (insn.speculative)
1166 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1167
1168 return flags;
1169 }
1170
1171 /* Return the btrace instruction for INSN. */
1172
1173 static btrace_insn
1174 pt_btrace_insn (const struct pt_insn &insn)
1175 {
1176 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1177 pt_reclassify_insn (insn.iclass),
1178 pt_btrace_insn_flags (insn)};
1179 }
1180
1181
1182 /* Add function branch trace to BTINFO using DECODER. */
1183
1184 static void
1185 ftrace_add_pt (struct btrace_thread_info *btinfo,
1186 struct pt_insn_decoder *decoder,
1187 int *plevel,
1188 std::vector<unsigned int> &gaps)
1189 {
1190 struct btrace_function *bfun;
1191 uint64_t offset;
1192 int errcode;
1193
1194 for (;;)
1195 {
1196 struct pt_insn insn;
1197
1198 errcode = pt_insn_sync_forward (decoder);
1199 if (errcode < 0)
1200 {
1201 if (errcode != -pte_eos)
1202 warning (_("Failed to synchronize onto the Intel Processor "
1203 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1204 break;
1205 }
1206
1207 for (;;)
1208 {
1209 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1210 if (errcode < 0)
1211 break;
1212
1213 /* Look for gaps in the trace - unless we're at the beginning. */
1214 if (!btinfo->functions.empty ())
1215 {
1216 /* Tracing is disabled and re-enabled each time we enter the
1217 kernel. Most times, we continue from the same instruction we
1218 stopped before. This is indicated via the RESUMED instruction
1219 flag. The ENABLED instruction flag means that we continued
1220 from some other instruction. Indicate this as a trace gap. */
1221 if (insn.enabled)
1222 {
1223 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1224
1225 pt_insn_get_offset (decoder, &offset);
1226
1227 warning (_("Non-contiguous trace at instruction %u (offset "
1228 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1229 bfun->insn_offset - 1, offset, insn.ip);
1230 }
1231 }
1232
1233 /* Indicate trace overflows. */
1234 if (insn.resynced)
1235 {
1236 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1237
1238 pt_insn_get_offset (decoder, &offset);
1239
1240 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1241 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
1242 offset, insn.ip);
1243 }
1244
1245 bfun = ftrace_update_function (btinfo, insn.ip);
1246
1247 /* Maintain the function level offset. */
1248 *plevel = std::min (*plevel, bfun->level);
1249
1250 btrace_insn btinsn = pt_btrace_insn (insn);
1251 ftrace_update_insns (bfun, &btinsn);
1252 }
1253
1254 if (errcode == -pte_eos)
1255 break;
1256
1257 /* Indicate the gap in the trace. */
1258 bfun = ftrace_new_gap (btinfo, errcode, gaps);
1259
1260 pt_insn_get_offset (decoder, &offset);
1261
1262 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1263 ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
1264 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1265 }
1266 }
1267
1268 /* A callback function to allow the trace decoder to read the inferior's
1269 memory. */
1270
1271 static int
1272 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1273 const struct pt_asid *asid, uint64_t pc,
1274 void *context)
1275 {
1276 int result, errcode;
1277
1278 result = (int) size;
1279 TRY
1280 {
1281 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1282 if (errcode != 0)
1283 result = -pte_nomap;
1284 }
1285 CATCH (error, RETURN_MASK_ERROR)
1286 {
1287 result = -pte_nomap;
1288 }
1289 END_CATCH
1290
1291 return result;
1292 }
1293
1294 /* Translate the vendor from one enum to another. */
1295
1296 static enum pt_cpu_vendor
1297 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1298 {
1299 switch (vendor)
1300 {
1301 default:
1302 return pcv_unknown;
1303
1304 case CV_INTEL:
1305 return pcv_intel;
1306 }
1307 }
1308
1309 /* Finalize the function branch trace after decode. */
1310
1311 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1312 struct thread_info *tp, int level)
1313 {
1314 pt_insn_free_decoder (decoder);
1315
1316 /* LEVEL is the minimal function level of all btrace function segments.
1317 Define the global level offset to -LEVEL so all function levels are
1318 normalized to start at zero. */
1319 tp->btrace.level = -level;
1320
1321 /* Add a single last instruction entry for the current PC.
1322 This allows us to compute the backtrace at the current PC using both
1323 standard unwind and btrace unwind.
1324 This extra entry is ignored by all record commands. */
1325 btrace_add_pc (tp);
1326 }
1327
1328 /* Compute the function branch trace from Intel Processor Trace
1329 format. */
1330
1331 static void
1332 btrace_compute_ftrace_pt (struct thread_info *tp,
1333 const struct btrace_data_pt *btrace,
1334 std::vector<unsigned int> &gaps)
1335 {
1336 struct btrace_thread_info *btinfo;
1337 struct pt_insn_decoder *decoder;
1338 struct pt_config config;
1339 int level, errcode;
1340
1341 if (btrace->size == 0)
1342 return;
1343
1344 btinfo = &tp->btrace;
1345 if (btinfo->functions.empty ())
1346 level = INT_MAX;
1347 else
1348 level = -btinfo->level;
1349
1350 pt_config_init(&config);
1351 config.begin = btrace->data;
1352 config.end = btrace->data + btrace->size;
1353
1354 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1355 config.cpu.family = btrace->config.cpu.family;
1356 config.cpu.model = btrace->config.cpu.model;
1357 config.cpu.stepping = btrace->config.cpu.stepping;
1358
1359 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1360 if (errcode < 0)
1361 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1362 pt_errstr (pt_errcode (errcode)));
1363
1364 decoder = pt_insn_alloc_decoder (&config);
1365 if (decoder == NULL)
1366 error (_("Failed to allocate the Intel Processor Trace decoder."));
1367
1368 TRY
1369 {
1370 struct pt_image *image;
1371
1372 image = pt_insn_get_image(decoder);
1373 if (image == NULL)
1374 error (_("Failed to configure the Intel Processor Trace decoder."));
1375
1376 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1377 if (errcode < 0)
1378 error (_("Failed to configure the Intel Processor Trace decoder: "
1379 "%s."), pt_errstr (pt_errcode (errcode)));
1380
1381 ftrace_add_pt (btinfo, decoder, &level, gaps);
1382 }
1383 CATCH (error, RETURN_MASK_ALL)
1384 {
1385 /* Indicate a gap in the trace if we quit trace processing. */
1386 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1387 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1388
1389 btrace_finalize_ftrace_pt (decoder, tp, level);
1390
1391 throw_exception (error);
1392 }
1393 END_CATCH
1394
1395 btrace_finalize_ftrace_pt (decoder, tp, level);
1396 }
1397
1398 #else /* defined (HAVE_LIBIPT) */
1399
1400 static void
1401 btrace_compute_ftrace_pt (struct thread_info *tp,
1402 const struct btrace_data_pt *btrace,
1403 std::vector<unsigned int> &gaps)
1404 {
1405 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1406 }
1407
1408 #endif /* defined (HAVE_LIBIPT) */
1409
1410 /* Compute the function branch trace from a block branch trace BTRACE for
1411 a thread given by BTINFO. */
1412
1413 static void
1414 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1415 std::vector<unsigned int> &gaps)
1416 {
1417 DEBUG ("compute ftrace");
1418
1419 switch (btrace->format)
1420 {
1421 case BTRACE_FORMAT_NONE:
1422 return;
1423
1424 case BTRACE_FORMAT_BTS:
1425 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1426 return;
1427
1428 case BTRACE_FORMAT_PT:
1429 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1430 return;
1431 }
1432
1433 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1434 }
1435
1436 static void
1437 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1438 {
1439 if (!gaps.empty ())
1440 {
1441 tp->btrace.ngaps += gaps.size ();
1442 btrace_bridge_gaps (tp, gaps);
1443 }
1444 }
1445
1446 static void
1447 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1448 {
1449 std::vector<unsigned int> gaps;
1450
1451 TRY
1452 {
1453 btrace_compute_ftrace_1 (tp, btrace, gaps);
1454 }
1455 CATCH (error, RETURN_MASK_ALL)
1456 {
1457 btrace_finalize_ftrace (tp, gaps);
1458
1459 throw_exception (error);
1460 }
1461 END_CATCH
1462
1463 btrace_finalize_ftrace (tp, gaps);
1464 }
1465
1466 /* Add an entry for the current PC. */
1467
1468 static void
1469 btrace_add_pc (struct thread_info *tp)
1470 {
1471 struct btrace_data btrace;
1472 struct btrace_block *block;
1473 struct regcache *regcache;
1474 struct cleanup *cleanup;
1475 CORE_ADDR pc;
1476
1477 regcache = get_thread_regcache (tp->ptid);
1478 pc = regcache_read_pc (regcache);
1479
1480 btrace_data_init (&btrace);
1481 btrace.format = BTRACE_FORMAT_BTS;
1482 btrace.variant.bts.blocks = NULL;
1483
1484 cleanup = make_cleanup_btrace_data (&btrace);
1485
1486 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1487 block->begin = pc;
1488 block->end = pc;
1489
1490 btrace_compute_ftrace (tp, &btrace);
1491
1492 do_cleanups (cleanup);
1493 }
1494
1495 /* See btrace.h. */
1496
1497 void
1498 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1499 {
1500 if (tp->btrace.target != NULL)
1501 return;
1502
1503 #if !defined (HAVE_LIBIPT)
1504 if (conf->format == BTRACE_FORMAT_PT)
1505 error (_("GDB does not support Intel Processor Trace."));
1506 #endif /* !defined (HAVE_LIBIPT) */
1507
1508 if (!target_supports_btrace (conf->format))
1509 error (_("Target does not support branch tracing."));
1510
1511 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1512 target_pid_to_str (tp->ptid));
1513
1514 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1515
1516 /* We're done if we failed to enable tracing. */
1517 if (tp->btrace.target == NULL)
1518 return;
1519
1520 /* We need to undo the enable in case of errors. */
1521 TRY
1522 {
1523 /* Add an entry for the current PC so we start tracing from where we
1524 enabled it.
1525
1526 If we can't access TP's registers, TP is most likely running. In this
1527 case, we can't really say where tracing was enabled so it should be
1528 safe to simply skip this step.
1529
1530 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1531 start at the PC at which tracing was enabled. */
1532 if (conf->format != BTRACE_FORMAT_PT
1533 && can_access_registers_ptid (tp->ptid))
1534 btrace_add_pc (tp);
1535 }
1536 CATCH (exception, RETURN_MASK_ALL)
1537 {
1538 btrace_disable (tp);
1539
1540 throw_exception (exception);
1541 }
1542 END_CATCH
1543 }
1544
1545 /* See btrace.h. */
1546
1547 const struct btrace_config *
1548 btrace_conf (const struct btrace_thread_info *btinfo)
1549 {
1550 if (btinfo->target == NULL)
1551 return NULL;
1552
1553 return target_btrace_conf (btinfo->target);
1554 }
1555
1556 /* See btrace.h. */
1557
1558 void
1559 btrace_disable (struct thread_info *tp)
1560 {
1561 struct btrace_thread_info *btp = &tp->btrace;
1562 int errcode = 0;
1563
1564 if (btp->target == NULL)
1565 return;
1566
1567 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1568 target_pid_to_str (tp->ptid));
1569
1570 target_disable_btrace (btp->target);
1571 btp->target = NULL;
1572
1573 btrace_clear (tp);
1574 }
1575
1576 /* See btrace.h. */
1577
1578 void
1579 btrace_teardown (struct thread_info *tp)
1580 {
1581 struct btrace_thread_info *btp = &tp->btrace;
1582 int errcode = 0;
1583
1584 if (btp->target == NULL)
1585 return;
1586
1587 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1588 target_pid_to_str (tp->ptid));
1589
1590 target_teardown_btrace (btp->target);
1591 btp->target = NULL;
1592
1593 btrace_clear (tp);
1594 }
1595
1596 /* Stitch branch trace in BTS format. */
1597
1598 static int
1599 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1600 {
1601 struct btrace_thread_info *btinfo;
1602 struct btrace_function *last_bfun;
1603 struct btrace_insn *last_insn;
1604 btrace_block_s *first_new_block;
1605
1606 btinfo = &tp->btrace;
1607 gdb_assert (!btinfo->functions.empty ());
1608 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1609
1610 last_bfun = btinfo->functions.back ();
1611
1612 /* If the existing trace ends with a gap, we just glue the traces
1613 together. We need to drop the last (i.e. chronologically first) block
1614 of the new trace, though, since we can't fill in the start address.*/
1615 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1616 {
1617 VEC_pop (btrace_block_s, btrace->blocks);
1618 return 0;
1619 }
1620
1621 /* Beware that block trace starts with the most recent block, so the
1622 chronologically first block in the new trace is the last block in
1623 the new trace's block vector. */
1624 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1625 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1626
1627 /* If the current PC at the end of the block is the same as in our current
1628 trace, there are two explanations:
1629 1. we executed the instruction and some branch brought us back.
1630 2. we have not made any progress.
1631 In the first case, the delta trace vector should contain at least two
1632 entries.
1633 In the second case, the delta trace vector should contain exactly one
1634 entry for the partial block containing the current PC. Remove it. */
1635 if (first_new_block->end == last_insn->pc
1636 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1637 {
1638 VEC_pop (btrace_block_s, btrace->blocks);
1639 return 0;
1640 }
1641
1642 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1643 core_addr_to_string_nz (first_new_block->end));
1644
1645 /* Do a simple sanity check to make sure we don't accidentally end up
1646 with a bad block. This should not occur in practice. */
1647 if (first_new_block->end < last_insn->pc)
1648 {
1649 warning (_("Error while trying to read delta trace. Falling back to "
1650 "a full read."));
1651 return -1;
1652 }
1653
1654 /* We adjust the last block to start at the end of our current trace. */
1655 gdb_assert (first_new_block->begin == 0);
1656 first_new_block->begin = last_insn->pc;
1657
1658 /* We simply pop the last insn so we can insert it again as part of
1659 the normal branch trace computation.
1660 Since instruction iterators are based on indices in the instructions
1661 vector, we don't leave any pointers dangling. */
1662 DEBUG ("pruning insn at %s for stitching",
1663 ftrace_print_insn_addr (last_insn));
1664
1665 VEC_pop (btrace_insn_s, last_bfun->insn);
1666
1667 /* The instructions vector may become empty temporarily if this has
1668 been the only instruction in this function segment.
1669 This violates the invariant but will be remedied shortly by
1670 btrace_compute_ftrace when we add the new trace. */
1671
1672 /* The only case where this would hurt is if the entire trace consisted
1673 of just that one instruction. If we remove it, we might turn the now
1674 empty btrace function segment into a gap. But we don't want gaps at
1675 the beginning. To avoid this, we remove the entire old trace. */
1676 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
1677 btrace_clear (tp);
1678
1679 return 0;
1680 }
1681
1682 /* Adjust the block trace in order to stitch old and new trace together.
1683 BTRACE is the new delta trace between the last and the current stop.
1684 TP is the traced thread.
1685 May modifx BTRACE as well as the existing trace in TP.
1686 Return 0 on success, -1 otherwise. */
1687
1688 static int
1689 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1690 {
1691 /* If we don't have trace, there's nothing to do. */
1692 if (btrace_data_empty (btrace))
1693 return 0;
1694
1695 switch (btrace->format)
1696 {
1697 case BTRACE_FORMAT_NONE:
1698 return 0;
1699
1700 case BTRACE_FORMAT_BTS:
1701 return btrace_stitch_bts (&btrace->variant.bts, tp);
1702
1703 case BTRACE_FORMAT_PT:
1704 /* Delta reads are not supported. */
1705 return -1;
1706 }
1707
1708 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1709 }
1710
1711 /* Clear the branch trace histories in BTINFO. */
1712
1713 static void
1714 btrace_clear_history (struct btrace_thread_info *btinfo)
1715 {
1716 xfree (btinfo->insn_history);
1717 xfree (btinfo->call_history);
1718 xfree (btinfo->replay);
1719
1720 btinfo->insn_history = NULL;
1721 btinfo->call_history = NULL;
1722 btinfo->replay = NULL;
1723 }
1724
1725 /* Clear the branch trace maintenance histories in BTINFO. */
1726
1727 static void
1728 btrace_maint_clear (struct btrace_thread_info *btinfo)
1729 {
1730 switch (btinfo->data.format)
1731 {
1732 default:
1733 break;
1734
1735 case BTRACE_FORMAT_BTS:
1736 btinfo->maint.variant.bts.packet_history.begin = 0;
1737 btinfo->maint.variant.bts.packet_history.end = 0;
1738 break;
1739
1740 #if defined (HAVE_LIBIPT)
1741 case BTRACE_FORMAT_PT:
1742 xfree (btinfo->maint.variant.pt.packets);
1743
1744 btinfo->maint.variant.pt.packets = NULL;
1745 btinfo->maint.variant.pt.packet_history.begin = 0;
1746 btinfo->maint.variant.pt.packet_history.end = 0;
1747 break;
1748 #endif /* defined (HAVE_LIBIPT) */
1749 }
1750 }
1751
1752 /* See btrace.h. */
1753
1754 const char *
1755 btrace_decode_error (enum btrace_format format, int errcode)
1756 {
1757 switch (format)
1758 {
1759 case BTRACE_FORMAT_BTS:
1760 switch (errcode)
1761 {
1762 case BDE_BTS_OVERFLOW:
1763 return _("instruction overflow");
1764
1765 case BDE_BTS_INSN_SIZE:
1766 return _("unknown instruction");
1767
1768 default:
1769 break;
1770 }
1771 break;
1772
1773 #if defined (HAVE_LIBIPT)
1774 case BTRACE_FORMAT_PT:
1775 switch (errcode)
1776 {
1777 case BDE_PT_USER_QUIT:
1778 return _("trace decode cancelled");
1779
1780 case BDE_PT_DISABLED:
1781 return _("disabled");
1782
1783 case BDE_PT_OVERFLOW:
1784 return _("overflow");
1785
1786 default:
1787 if (errcode < 0)
1788 return pt_errstr (pt_errcode (errcode));
1789 break;
1790 }
1791 break;
1792 #endif /* defined (HAVE_LIBIPT) */
1793
1794 default:
1795 break;
1796 }
1797
1798 return _("unknown");
1799 }
1800
1801 /* See btrace.h. */
1802
1803 void
1804 btrace_fetch (struct thread_info *tp)
1805 {
1806 struct btrace_thread_info *btinfo;
1807 struct btrace_target_info *tinfo;
1808 struct btrace_data btrace;
1809 struct cleanup *cleanup;
1810 int errcode;
1811
1812 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1813 target_pid_to_str (tp->ptid));
1814
1815 btinfo = &tp->btrace;
1816 tinfo = btinfo->target;
1817 if (tinfo == NULL)
1818 return;
1819
1820 /* There's no way we could get new trace while replaying.
1821 On the other hand, delta trace would return a partial record with the
1822 current PC, which is the replay PC, not the last PC, as expected. */
1823 if (btinfo->replay != NULL)
1824 return;
1825
1826 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1827 can store a gdb.Record object in Python referring to a different thread
1828 than the current one, temporarily set INFERIOR_PTID. */
1829 cleanup = save_inferior_ptid ();
1830 inferior_ptid = tp->ptid;
1831
1832 /* We should not be called on running or exited threads. */
1833 gdb_assert (can_access_registers_ptid (tp->ptid));
1834
1835 btrace_data_init (&btrace);
1836 make_cleanup_btrace_data (&btrace);
1837
1838 /* Let's first try to extend the trace we already have. */
1839 if (!btinfo->functions.empty ())
1840 {
1841 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1842 if (errcode == 0)
1843 {
1844 /* Success. Let's try to stitch the traces together. */
1845 errcode = btrace_stitch_trace (&btrace, tp);
1846 }
1847 else
1848 {
1849 /* We failed to read delta trace. Let's try to read new trace. */
1850 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1851
1852 /* If we got any new trace, discard what we have. */
1853 if (errcode == 0 && !btrace_data_empty (&btrace))
1854 btrace_clear (tp);
1855 }
1856
1857 /* If we were not able to read the trace, we start over. */
1858 if (errcode != 0)
1859 {
1860 btrace_clear (tp);
1861 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1862 }
1863 }
1864 else
1865 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1866
1867 /* If we were not able to read the branch trace, signal an error. */
1868 if (errcode != 0)
1869 error (_("Failed to read branch trace."));
1870
1871 /* Compute the trace, provided we have any. */
1872 if (!btrace_data_empty (&btrace))
1873 {
1874 /* Store the raw trace data. The stored data will be cleared in
1875 btrace_clear, so we always append the new trace. */
1876 btrace_data_append (&btinfo->data, &btrace);
1877 btrace_maint_clear (btinfo);
1878
1879 btrace_clear_history (btinfo);
1880 btrace_compute_ftrace (tp, &btrace);
1881 }
1882
1883 do_cleanups (cleanup);
1884 }
1885
1886 /* See btrace.h. */
1887
1888 void
1889 btrace_clear (struct thread_info *tp)
1890 {
1891 struct btrace_thread_info *btinfo;
1892
1893 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1894 target_pid_to_str (tp->ptid));
1895
1896 /* Make sure btrace frames that may hold a pointer into the branch
1897 trace data are destroyed. */
1898 reinit_frame_cache ();
1899
1900 btinfo = &tp->btrace;
1901 for (auto &bfun : btinfo->functions)
1902 {
1903 VEC_free (btrace_insn_s, bfun->insn);
1904 xfree (bfun);
1905 }
1906
1907 btinfo->functions.clear ();
1908 btinfo->ngaps = 0;
1909
1910 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1911 btrace_maint_clear (btinfo);
1912 btrace_data_clear (&btinfo->data);
1913 btrace_clear_history (btinfo);
1914 }
1915
1916 /* See btrace.h. */
1917
1918 void
1919 btrace_free_objfile (struct objfile *objfile)
1920 {
1921 struct thread_info *tp;
1922
1923 DEBUG ("free objfile");
1924
1925 ALL_NON_EXITED_THREADS (tp)
1926 btrace_clear (tp);
1927 }
1928
1929 #if defined (HAVE_LIBEXPAT)
1930
1931 /* Check the btrace document version. */
1932
1933 static void
1934 check_xml_btrace_version (struct gdb_xml_parser *parser,
1935 const struct gdb_xml_element *element,
1936 void *user_data, VEC (gdb_xml_value_s) *attributes)
1937 {
1938 const char *version
1939 = (const char *) xml_find_attribute (attributes, "version")->value;
1940
1941 if (strcmp (version, "1.0") != 0)
1942 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1943 }
1944
1945 /* Parse a btrace "block" xml record. */
1946
1947 static void
1948 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1949 const struct gdb_xml_element *element,
1950 void *user_data, VEC (gdb_xml_value_s) *attributes)
1951 {
1952 struct btrace_data *btrace;
1953 struct btrace_block *block;
1954 ULONGEST *begin, *end;
1955
1956 btrace = (struct btrace_data *) user_data;
1957
1958 switch (btrace->format)
1959 {
1960 case BTRACE_FORMAT_BTS:
1961 break;
1962
1963 case BTRACE_FORMAT_NONE:
1964 btrace->format = BTRACE_FORMAT_BTS;
1965 btrace->variant.bts.blocks = NULL;
1966 break;
1967
1968 default:
1969 gdb_xml_error (parser, _("Btrace format error."));
1970 }
1971
1972 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1973 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1974
1975 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1976 block->begin = *begin;
1977 block->end = *end;
1978 }
1979
1980 /* Parse a "raw" xml record. */
1981
1982 static void
1983 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1984 gdb_byte **pdata, size_t *psize)
1985 {
1986 struct cleanup *cleanup;
1987 gdb_byte *data, *bin;
1988 size_t len, size;
1989
1990 len = strlen (body_text);
1991 if (len % 2 != 0)
1992 gdb_xml_error (parser, _("Bad raw data size."));
1993
1994 size = len / 2;
1995
1996 bin = data = (gdb_byte *) xmalloc (size);
1997 cleanup = make_cleanup (xfree, data);
1998
1999 /* We use hex encoding - see common/rsp-low.h. */
2000 while (len > 0)
2001 {
2002 char hi, lo;
2003
2004 hi = *body_text++;
2005 lo = *body_text++;
2006
2007 if (hi == 0 || lo == 0)
2008 gdb_xml_error (parser, _("Bad hex encoding."));
2009
2010 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2011 len -= 2;
2012 }
2013
2014 discard_cleanups (cleanup);
2015
2016 *pdata = data;
2017 *psize = size;
2018 }
2019
2020 /* Parse a btrace pt-config "cpu" xml record. */
2021
2022 static void
2023 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2024 const struct gdb_xml_element *element,
2025 void *user_data,
2026 VEC (gdb_xml_value_s) *attributes)
2027 {
2028 struct btrace_data *btrace;
2029 const char *vendor;
2030 ULONGEST *family, *model, *stepping;
2031
2032 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2033 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2034 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2035 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2036
2037 btrace = (struct btrace_data *) user_data;
2038
2039 if (strcmp (vendor, "GenuineIntel") == 0)
2040 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2041
2042 btrace->variant.pt.config.cpu.family = *family;
2043 btrace->variant.pt.config.cpu.model = *model;
2044 btrace->variant.pt.config.cpu.stepping = *stepping;
2045 }
2046
2047 /* Parse a btrace pt "raw" xml record. */
2048
2049 static void
2050 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2051 const struct gdb_xml_element *element,
2052 void *user_data, const char *body_text)
2053 {
2054 struct btrace_data *btrace;
2055
2056 btrace = (struct btrace_data *) user_data;
2057 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2058 &btrace->variant.pt.size);
2059 }
2060
2061 /* Parse a btrace "pt" xml record. */
2062
2063 static void
2064 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2065 const struct gdb_xml_element *element,
2066 void *user_data, VEC (gdb_xml_value_s) *attributes)
2067 {
2068 struct btrace_data *btrace;
2069
2070 btrace = (struct btrace_data *) user_data;
2071 btrace->format = BTRACE_FORMAT_PT;
2072 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2073 btrace->variant.pt.data = NULL;
2074 btrace->variant.pt.size = 0;
2075 }
2076
2077 static const struct gdb_xml_attribute block_attributes[] = {
2078 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2079 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2080 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2081 };
2082
2083 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2084 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2085 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2086 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2087 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2088 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2089 };
2090
2091 static const struct gdb_xml_element btrace_pt_config_children[] = {
2092 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2093 parse_xml_btrace_pt_config_cpu, NULL },
2094 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2095 };
2096
2097 static const struct gdb_xml_element btrace_pt_children[] = {
2098 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2099 NULL },
2100 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2101 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2102 };
2103
2104 static const struct gdb_xml_attribute btrace_attributes[] = {
2105 { "version", GDB_XML_AF_NONE, NULL, NULL },
2106 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2107 };
2108
2109 static const struct gdb_xml_element btrace_children[] = {
2110 { "block", block_attributes, NULL,
2111 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2112 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2113 NULL },
2114 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2115 };
2116
2117 static const struct gdb_xml_element btrace_elements[] = {
2118 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2119 check_xml_btrace_version, NULL },
2120 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2121 };
2122
2123 #endif /* defined (HAVE_LIBEXPAT) */
2124
2125 /* See btrace.h. */
2126
2127 void
2128 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2129 {
2130 struct cleanup *cleanup;
2131 int errcode;
2132
2133 #if defined (HAVE_LIBEXPAT)
2134
2135 btrace->format = BTRACE_FORMAT_NONE;
2136
2137 cleanup = make_cleanup_btrace_data (btrace);
2138 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2139 buffer, btrace);
2140 if (errcode != 0)
2141 error (_("Error parsing branch trace."));
2142
2143 /* Keep parse results. */
2144 discard_cleanups (cleanup);
2145
2146 #else /* !defined (HAVE_LIBEXPAT) */
2147
2148 error (_("Cannot process branch trace. XML parsing is not supported."));
2149
2150 #endif /* !defined (HAVE_LIBEXPAT) */
2151 }
2152
2153 #if defined (HAVE_LIBEXPAT)
2154
2155 /* Parse a btrace-conf "bts" xml record. */
2156
2157 static void
2158 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2159 const struct gdb_xml_element *element,
2160 void *user_data, VEC (gdb_xml_value_s) *attributes)
2161 {
2162 struct btrace_config *conf;
2163 struct gdb_xml_value *size;
2164
2165 conf = (struct btrace_config *) user_data;
2166 conf->format = BTRACE_FORMAT_BTS;
2167 conf->bts.size = 0;
2168
2169 size = xml_find_attribute (attributes, "size");
2170 if (size != NULL)
2171 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2172 }
2173
2174 /* Parse a btrace-conf "pt" xml record. */
2175
2176 static void
2177 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2178 const struct gdb_xml_element *element,
2179 void *user_data, VEC (gdb_xml_value_s) *attributes)
2180 {
2181 struct btrace_config *conf;
2182 struct gdb_xml_value *size;
2183
2184 conf = (struct btrace_config *) user_data;
2185 conf->format = BTRACE_FORMAT_PT;
2186 conf->pt.size = 0;
2187
2188 size = xml_find_attribute (attributes, "size");
2189 if (size != NULL)
2190 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2191 }
2192
2193 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2194 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2195 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2196 };
2197
2198 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2199 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2200 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2201 };
2202
2203 static const struct gdb_xml_element btrace_conf_children[] = {
2204 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2205 parse_xml_btrace_conf_bts, NULL },
2206 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2207 parse_xml_btrace_conf_pt, NULL },
2208 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2209 };
2210
2211 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2212 { "version", GDB_XML_AF_NONE, NULL, NULL },
2213 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2214 };
2215
2216 static const struct gdb_xml_element btrace_conf_elements[] = {
2217 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2218 GDB_XML_EF_NONE, NULL, NULL },
2219 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2220 };
2221
2222 #endif /* defined (HAVE_LIBEXPAT) */
2223
2224 /* See btrace.h. */
2225
2226 void
2227 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2228 {
2229 int errcode;
2230
2231 #if defined (HAVE_LIBEXPAT)
2232
2233 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2234 btrace_conf_elements, xml, conf);
2235 if (errcode != 0)
2236 error (_("Error parsing branch trace configuration."));
2237
2238 #else /* !defined (HAVE_LIBEXPAT) */
2239
2240 error (_("XML parsing is not supported."));
2241
2242 #endif /* !defined (HAVE_LIBEXPAT) */
2243 }
2244
2245 /* See btrace.h. */
2246
2247 const struct btrace_insn *
2248 btrace_insn_get (const struct btrace_insn_iterator *it)
2249 {
2250 const struct btrace_function *bfun;
2251 unsigned int index, end;
2252
2253 index = it->insn_index;
2254 bfun = it->btinfo->functions[it->call_index];
2255
2256 /* Check if the iterator points to a gap in the trace. */
2257 if (bfun->errcode != 0)
2258 return NULL;
2259
2260 /* The index is within the bounds of this function's instruction vector. */
2261 end = VEC_length (btrace_insn_s, bfun->insn);
2262 gdb_assert (0 < end);
2263 gdb_assert (index < end);
2264
2265 return VEC_index (btrace_insn_s, bfun->insn, index);
2266 }
2267
2268 /* See btrace.h. */
2269
2270 int
2271 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2272 {
2273 const struct btrace_function *bfun;
2274
2275 bfun = it->btinfo->functions[it->call_index];
2276 return bfun->errcode;
2277 }
2278
2279 /* See btrace.h. */
2280
2281 unsigned int
2282 btrace_insn_number (const struct btrace_insn_iterator *it)
2283 {
2284 const struct btrace_function *bfun;
2285
2286 bfun = it->btinfo->functions[it->call_index];
2287 return bfun->insn_offset + it->insn_index;
2288 }
2289
2290 /* See btrace.h. */
2291
2292 void
2293 btrace_insn_begin (struct btrace_insn_iterator *it,
2294 const struct btrace_thread_info *btinfo)
2295 {
2296 if (btinfo->functions.empty ())
2297 error (_("No trace."));
2298
2299 it->btinfo = btinfo;
2300 it->call_index = 0;
2301 it->insn_index = 0;
2302 }
2303
2304 /* See btrace.h. */
2305
2306 void
2307 btrace_insn_end (struct btrace_insn_iterator *it,
2308 const struct btrace_thread_info *btinfo)
2309 {
2310 const struct btrace_function *bfun;
2311 unsigned int length;
2312
2313 if (btinfo->functions.empty ())
2314 error (_("No trace."));
2315
2316 bfun = btinfo->functions.back ();
2317 length = VEC_length (btrace_insn_s, bfun->insn);
2318
2319 /* The last function may either be a gap or it contains the current
2320 instruction, which is one past the end of the execution trace; ignore
2321 it. */
2322 if (length > 0)
2323 length -= 1;
2324
2325 it->btinfo = btinfo;
2326 it->call_index = bfun->number - 1;
2327 it->insn_index = length;
2328 }
2329
2330 /* See btrace.h. */
2331
2332 unsigned int
2333 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2334 {
2335 const struct btrace_function *bfun;
2336 unsigned int index, steps;
2337
2338 bfun = it->btinfo->functions[it->call_index];
2339 steps = 0;
2340 index = it->insn_index;
2341
2342 while (stride != 0)
2343 {
2344 unsigned int end, space, adv;
2345
2346 end = VEC_length (btrace_insn_s, bfun->insn);
2347
2348 /* An empty function segment represents a gap in the trace. We count
2349 it as one instruction. */
2350 if (end == 0)
2351 {
2352 const struct btrace_function *next;
2353
2354 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2355 if (next == NULL)
2356 break;
2357
2358 stride -= 1;
2359 steps += 1;
2360
2361 bfun = next;
2362 index = 0;
2363
2364 continue;
2365 }
2366
2367 gdb_assert (0 < end);
2368 gdb_assert (index < end);
2369
2370 /* Compute the number of instructions remaining in this segment. */
2371 space = end - index;
2372
2373 /* Advance the iterator as far as possible within this segment. */
2374 adv = std::min (space, stride);
2375 stride -= adv;
2376 index += adv;
2377 steps += adv;
2378
2379 /* Move to the next function if we're at the end of this one. */
2380 if (index == end)
2381 {
2382 const struct btrace_function *next;
2383
2384 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2385 if (next == NULL)
2386 {
2387 /* We stepped past the last function.
2388
2389 Let's adjust the index to point to the last instruction in
2390 the previous function. */
2391 index -= 1;
2392 steps -= 1;
2393 break;
2394 }
2395
2396 /* We now point to the first instruction in the new function. */
2397 bfun = next;
2398 index = 0;
2399 }
2400
2401 /* We did make progress. */
2402 gdb_assert (adv > 0);
2403 }
2404
2405 /* Update the iterator. */
2406 it->call_index = bfun->number - 1;
2407 it->insn_index = index;
2408
2409 return steps;
2410 }
2411
2412 /* See btrace.h. */
2413
2414 unsigned int
2415 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2416 {
2417 const struct btrace_function *bfun;
2418 unsigned int index, steps;
2419
2420 bfun = it->btinfo->functions[it->call_index];
2421 steps = 0;
2422 index = it->insn_index;
2423
2424 while (stride != 0)
2425 {
2426 unsigned int adv;
2427
2428 /* Move to the previous function if we're at the start of this one. */
2429 if (index == 0)
2430 {
2431 const struct btrace_function *prev;
2432
2433 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2434 if (prev == NULL)
2435 break;
2436
2437 /* We point to one after the last instruction in the new function. */
2438 bfun = prev;
2439 index = VEC_length (btrace_insn_s, bfun->insn);
2440
2441 /* An empty function segment represents a gap in the trace. We count
2442 it as one instruction. */
2443 if (index == 0)
2444 {
2445 stride -= 1;
2446 steps += 1;
2447
2448 continue;
2449 }
2450 }
2451
2452 /* Advance the iterator as far as possible within this segment. */
2453 adv = std::min (index, stride);
2454
2455 stride -= adv;
2456 index -= adv;
2457 steps += adv;
2458
2459 /* We did make progress. */
2460 gdb_assert (adv > 0);
2461 }
2462
2463 /* Update the iterator. */
2464 it->call_index = bfun->number - 1;
2465 it->insn_index = index;
2466
2467 return steps;
2468 }
2469
2470 /* See btrace.h. */
2471
2472 int
2473 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2474 const struct btrace_insn_iterator *rhs)
2475 {
2476 gdb_assert (lhs->btinfo == rhs->btinfo);
2477
2478 if (lhs->call_index != rhs->call_index)
2479 return lhs->call_index - rhs->call_index;
2480
2481 return lhs->insn_index - rhs->insn_index;
2482 }
2483
2484 /* See btrace.h. */
2485
2486 int
2487 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2488 const struct btrace_thread_info *btinfo,
2489 unsigned int number)
2490 {
2491 const struct btrace_function *bfun;
2492 unsigned int upper, lower;
2493
2494 if (btinfo->functions.empty ())
2495 return 0;
2496
2497 lower = 0;
2498 bfun = btinfo->functions[lower];
2499 if (number < bfun->insn_offset)
2500 return 0;
2501
2502 upper = btinfo->functions.size () - 1;
2503 bfun = btinfo->functions[upper];
2504 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2505 return 0;
2506
2507 /* We assume that there are no holes in the numbering. */
2508 for (;;)
2509 {
2510 const unsigned int average = lower + (upper - lower) / 2;
2511
2512 bfun = btinfo->functions[average];
2513
2514 if (number < bfun->insn_offset)
2515 {
2516 upper = average - 1;
2517 continue;
2518 }
2519
2520 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2521 {
2522 lower = average + 1;
2523 continue;
2524 }
2525
2526 break;
2527 }
2528
2529 it->btinfo = btinfo;
2530 it->call_index = bfun->number - 1;
2531 it->insn_index = number - bfun->insn_offset;
2532 return 1;
2533 }
2534
2535 /* Returns true if the recording ends with a function segment that
2536 contains only a single (i.e. the current) instruction. */
2537
2538 static bool
2539 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2540 {
2541 const btrace_function *bfun;
2542
2543 if (btinfo->functions.empty ())
2544 return false;
2545
2546 bfun = btinfo->functions.back ();
2547 if (bfun->errcode != 0)
2548 return false;
2549
2550 return ftrace_call_num_insn (bfun) == 1;
2551 }
2552
2553 /* See btrace.h. */
2554
2555 const struct btrace_function *
2556 btrace_call_get (const struct btrace_call_iterator *it)
2557 {
2558 if (it->index >= it->btinfo->functions.size ())
2559 return NULL;
2560
2561 return it->btinfo->functions[it->index];
2562 }
2563
2564 /* See btrace.h. */
2565
2566 unsigned int
2567 btrace_call_number (const struct btrace_call_iterator *it)
2568 {
2569 const unsigned int length = it->btinfo->functions.size ();
2570
2571 /* If the last function segment contains only a single instruction (i.e. the
2572 current instruction), skip it. */
2573 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2574 return length;
2575
2576 return it->index + 1;
2577 }
2578
2579 /* See btrace.h. */
2580
2581 void
2582 btrace_call_begin (struct btrace_call_iterator *it,
2583 const struct btrace_thread_info *btinfo)
2584 {
2585 if (btinfo->functions.empty ())
2586 error (_("No trace."));
2587
2588 it->btinfo = btinfo;
2589 it->index = 0;
2590 }
2591
2592 /* See btrace.h. */
2593
2594 void
2595 btrace_call_end (struct btrace_call_iterator *it,
2596 const struct btrace_thread_info *btinfo)
2597 {
2598 if (btinfo->functions.empty ())
2599 error (_("No trace."));
2600
2601 it->btinfo = btinfo;
2602 it->index = btinfo->functions.size ();
2603 }
2604
2605 /* See btrace.h. */
2606
2607 unsigned int
2608 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2609 {
2610 const unsigned int length = it->btinfo->functions.size ();
2611
2612 if (it->index + stride < length - 1)
2613 /* Default case: Simply advance the iterator. */
2614 it->index += stride;
2615 else if (it->index + stride == length - 1)
2616 {
2617 /* We land exactly at the last function segment. If it contains only one
2618 instruction (i.e. the current instruction) it is not actually part of
2619 the trace. */
2620 if (btrace_ends_with_single_insn (it->btinfo))
2621 it->index = length;
2622 else
2623 it->index = length - 1;
2624 }
2625 else
2626 {
2627 /* We land past the last function segment and have to adjust the stride.
2628 If the last function segment contains only one instruction (i.e. the
2629 current instruction) it is not actually part of the trace. */
2630 if (btrace_ends_with_single_insn (it->btinfo))
2631 stride = length - it->index - 1;
2632 else
2633 stride = length - it->index;
2634
2635 it->index = length;
2636 }
2637
2638 return stride;
2639 }
2640
2641 /* See btrace.h. */
2642
2643 unsigned int
2644 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2645 {
2646 const unsigned int length = it->btinfo->functions.size ();
2647 int steps = 0;
2648
2649 gdb_assert (it->index <= length);
2650
2651 if (stride == 0 || it->index == 0)
2652 return 0;
2653
2654 /* If we are at the end, the first step is a special case. If the last
2655 function segment contains only one instruction (i.e. the current
2656 instruction) it is not actually part of the trace. To be able to step
2657 over this instruction, we need at least one more function segment. */
2658 if ((it->index == length) && (length > 1))
2659 {
2660 if (btrace_ends_with_single_insn (it->btinfo))
2661 it->index = length - 2;
2662 else
2663 it->index = length - 1;
2664
2665 steps = 1;
2666 stride -= 1;
2667 }
2668
2669 stride = std::min (stride, it->index);
2670
2671 it->index -= stride;
2672 return steps + stride;
2673 }
2674
2675 /* See btrace.h. */
2676
2677 int
2678 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2679 const struct btrace_call_iterator *rhs)
2680 {
2681 gdb_assert (lhs->btinfo == rhs->btinfo);
2682 return (int) (lhs->index - rhs->index);
2683 }
2684
2685 /* See btrace.h. */
2686
2687 int
2688 btrace_find_call_by_number (struct btrace_call_iterator *it,
2689 const struct btrace_thread_info *btinfo,
2690 unsigned int number)
2691 {
2692 const unsigned int length = btinfo->functions.size ();
2693
2694 if ((number == 0) || (number > length))
2695 return 0;
2696
2697 it->btinfo = btinfo;
2698 it->index = number - 1;
2699 return 1;
2700 }
2701
2702 /* See btrace.h. */
2703
2704 void
2705 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2706 const struct btrace_insn_iterator *begin,
2707 const struct btrace_insn_iterator *end)
2708 {
2709 if (btinfo->insn_history == NULL)
2710 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2711
2712 btinfo->insn_history->begin = *begin;
2713 btinfo->insn_history->end = *end;
2714 }
2715
2716 /* See btrace.h. */
2717
2718 void
2719 btrace_set_call_history (struct btrace_thread_info *btinfo,
2720 const struct btrace_call_iterator *begin,
2721 const struct btrace_call_iterator *end)
2722 {
2723 gdb_assert (begin->btinfo == end->btinfo);
2724
2725 if (btinfo->call_history == NULL)
2726 btinfo->call_history = XCNEW (struct btrace_call_history);
2727
2728 btinfo->call_history->begin = *begin;
2729 btinfo->call_history->end = *end;
2730 }
2731
2732 /* See btrace.h. */
2733
2734 int
2735 btrace_is_replaying (struct thread_info *tp)
2736 {
2737 return tp->btrace.replay != NULL;
2738 }
2739
2740 /* See btrace.h. */
2741
2742 int
2743 btrace_is_empty (struct thread_info *tp)
2744 {
2745 struct btrace_insn_iterator begin, end;
2746 struct btrace_thread_info *btinfo;
2747
2748 btinfo = &tp->btrace;
2749
2750 if (btinfo->functions.empty ())
2751 return 1;
2752
2753 btrace_insn_begin (&begin, btinfo);
2754 btrace_insn_end (&end, btinfo);
2755
2756 return btrace_insn_cmp (&begin, &end) == 0;
2757 }
2758
2759 /* Forward the cleanup request. */
2760
2761 static void
2762 do_btrace_data_cleanup (void *arg)
2763 {
2764 btrace_data_fini ((struct btrace_data *) arg);
2765 }
2766
2767 /* See btrace.h. */
2768
2769 struct cleanup *
2770 make_cleanup_btrace_data (struct btrace_data *data)
2771 {
2772 return make_cleanup (do_btrace_data_cleanup, data);
2773 }
2774
2775 #if defined (HAVE_LIBIPT)
2776
2777 /* Print a single packet. */
2778
2779 static void
2780 pt_print_packet (const struct pt_packet *packet)
2781 {
2782 switch (packet->type)
2783 {
2784 default:
2785 printf_unfiltered (("[??: %x]"), packet->type);
2786 break;
2787
2788 case ppt_psb:
2789 printf_unfiltered (("psb"));
2790 break;
2791
2792 case ppt_psbend:
2793 printf_unfiltered (("psbend"));
2794 break;
2795
2796 case ppt_pad:
2797 printf_unfiltered (("pad"));
2798 break;
2799
2800 case ppt_tip:
2801 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2802 packet->payload.ip.ipc,
2803 packet->payload.ip.ip);
2804 break;
2805
2806 case ppt_tip_pge:
2807 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2808 packet->payload.ip.ipc,
2809 packet->payload.ip.ip);
2810 break;
2811
2812 case ppt_tip_pgd:
2813 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2814 packet->payload.ip.ipc,
2815 packet->payload.ip.ip);
2816 break;
2817
2818 case ppt_fup:
2819 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2820 packet->payload.ip.ipc,
2821 packet->payload.ip.ip);
2822 break;
2823
2824 case ppt_tnt_8:
2825 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2826 packet->payload.tnt.bit_size,
2827 packet->payload.tnt.payload);
2828 break;
2829
2830 case ppt_tnt_64:
2831 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2832 packet->payload.tnt.bit_size,
2833 packet->payload.tnt.payload);
2834 break;
2835
2836 case ppt_pip:
2837 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2838 packet->payload.pip.nr ? (" nr") : (""));
2839 break;
2840
2841 case ppt_tsc:
2842 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2843 break;
2844
2845 case ppt_cbr:
2846 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2847 break;
2848
2849 case ppt_mode:
2850 switch (packet->payload.mode.leaf)
2851 {
2852 default:
2853 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2854 break;
2855
2856 case pt_mol_exec:
2857 printf_unfiltered (("mode.exec%s%s"),
2858 packet->payload.mode.bits.exec.csl
2859 ? (" cs.l") : (""),
2860 packet->payload.mode.bits.exec.csd
2861 ? (" cs.d") : (""));
2862 break;
2863
2864 case pt_mol_tsx:
2865 printf_unfiltered (("mode.tsx%s%s"),
2866 packet->payload.mode.bits.tsx.intx
2867 ? (" intx") : (""),
2868 packet->payload.mode.bits.tsx.abrt
2869 ? (" abrt") : (""));
2870 break;
2871 }
2872 break;
2873
2874 case ppt_ovf:
2875 printf_unfiltered (("ovf"));
2876 break;
2877
2878 case ppt_stop:
2879 printf_unfiltered (("stop"));
2880 break;
2881
2882 case ppt_vmcs:
2883 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2884 break;
2885
2886 case ppt_tma:
2887 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2888 packet->payload.tma.fc);
2889 break;
2890
2891 case ppt_mtc:
2892 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2893 break;
2894
2895 case ppt_cyc:
2896 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2897 break;
2898
2899 case ppt_mnt:
2900 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2901 break;
2902 }
2903 }
2904
2905 /* Decode packets into MAINT using DECODER. */
2906
2907 static void
2908 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2909 struct pt_packet_decoder *decoder)
2910 {
2911 int errcode;
2912
2913 for (;;)
2914 {
2915 struct btrace_pt_packet packet;
2916
2917 errcode = pt_pkt_sync_forward (decoder);
2918 if (errcode < 0)
2919 break;
2920
2921 for (;;)
2922 {
2923 pt_pkt_get_offset (decoder, &packet.offset);
2924
2925 errcode = pt_pkt_next (decoder, &packet.packet,
2926 sizeof(packet.packet));
2927 if (errcode < 0)
2928 break;
2929
2930 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2931 {
2932 packet.errcode = pt_errcode (errcode);
2933 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2934 &packet);
2935 }
2936 }
2937
2938 if (errcode == -pte_eos)
2939 break;
2940
2941 packet.errcode = pt_errcode (errcode);
2942 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2943 &packet);
2944
2945 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2946 packet.offset, pt_errstr (packet.errcode));
2947 }
2948
2949 if (errcode != -pte_eos)
2950 warning (_("Failed to synchronize onto the Intel Processor Trace "
2951 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2952 }
2953
2954 /* Update the packet history in BTINFO. */
2955
2956 static void
2957 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2958 {
2959 volatile struct gdb_exception except;
2960 struct pt_packet_decoder *decoder;
2961 struct btrace_data_pt *pt;
2962 struct pt_config config;
2963 int errcode;
2964
2965 pt = &btinfo->data.variant.pt;
2966
2967 /* Nothing to do if there is no trace. */
2968 if (pt->size == 0)
2969 return;
2970
2971 memset (&config, 0, sizeof(config));
2972
2973 config.size = sizeof (config);
2974 config.begin = pt->data;
2975 config.end = pt->data + pt->size;
2976
2977 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2978 config.cpu.family = pt->config.cpu.family;
2979 config.cpu.model = pt->config.cpu.model;
2980 config.cpu.stepping = pt->config.cpu.stepping;
2981
2982 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2983 if (errcode < 0)
2984 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2985 pt_errstr (pt_errcode (errcode)));
2986
2987 decoder = pt_pkt_alloc_decoder (&config);
2988 if (decoder == NULL)
2989 error (_("Failed to allocate the Intel Processor Trace decoder."));
2990
2991 TRY
2992 {
2993 btrace_maint_decode_pt (&btinfo->maint, decoder);
2994 }
2995 CATCH (except, RETURN_MASK_ALL)
2996 {
2997 pt_pkt_free_decoder (decoder);
2998
2999 if (except.reason < 0)
3000 throw_exception (except);
3001 }
3002 END_CATCH
3003
3004 pt_pkt_free_decoder (decoder);
3005 }
3006
3007 #endif /* !defined (HAVE_LIBIPT) */
3008
3009 /* Update the packet maintenance information for BTINFO and store the
3010 low and high bounds into BEGIN and END, respectively.
3011 Store the current iterator state into FROM and TO. */
3012
3013 static void
3014 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3015 unsigned int *begin, unsigned int *end,
3016 unsigned int *from, unsigned int *to)
3017 {
3018 switch (btinfo->data.format)
3019 {
3020 default:
3021 *begin = 0;
3022 *end = 0;
3023 *from = 0;
3024 *to = 0;
3025 break;
3026
3027 case BTRACE_FORMAT_BTS:
3028 /* Nothing to do - we operate directly on BTINFO->DATA. */
3029 *begin = 0;
3030 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3031 *from = btinfo->maint.variant.bts.packet_history.begin;
3032 *to = btinfo->maint.variant.bts.packet_history.end;
3033 break;
3034
3035 #if defined (HAVE_LIBIPT)
3036 case BTRACE_FORMAT_PT:
3037 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3038 btrace_maint_update_pt_packets (btinfo);
3039
3040 *begin = 0;
3041 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3042 *from = btinfo->maint.variant.pt.packet_history.begin;
3043 *to = btinfo->maint.variant.pt.packet_history.end;
3044 break;
3045 #endif /* defined (HAVE_LIBIPT) */
3046 }
3047 }
3048
3049 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3050 update the current iterator position. */
3051
3052 static void
3053 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3054 unsigned int begin, unsigned int end)
3055 {
3056 switch (btinfo->data.format)
3057 {
3058 default:
3059 break;
3060
3061 case BTRACE_FORMAT_BTS:
3062 {
3063 VEC (btrace_block_s) *blocks;
3064 unsigned int blk;
3065
3066 blocks = btinfo->data.variant.bts.blocks;
3067 for (blk = begin; blk < end; ++blk)
3068 {
3069 const btrace_block_s *block;
3070
3071 block = VEC_index (btrace_block_s, blocks, blk);
3072
3073 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3074 core_addr_to_string_nz (block->begin),
3075 core_addr_to_string_nz (block->end));
3076 }
3077
3078 btinfo->maint.variant.bts.packet_history.begin = begin;
3079 btinfo->maint.variant.bts.packet_history.end = end;
3080 }
3081 break;
3082
3083 #if defined (HAVE_LIBIPT)
3084 case BTRACE_FORMAT_PT:
3085 {
3086 VEC (btrace_pt_packet_s) *packets;
3087 unsigned int pkt;
3088
3089 packets = btinfo->maint.variant.pt.packets;
3090 for (pkt = begin; pkt < end; ++pkt)
3091 {
3092 const struct btrace_pt_packet *packet;
3093
3094 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3095
3096 printf_unfiltered ("%u\t", pkt);
3097 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3098
3099 if (packet->errcode == pte_ok)
3100 pt_print_packet (&packet->packet);
3101 else
3102 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3103
3104 printf_unfiltered ("\n");
3105 }
3106
3107 btinfo->maint.variant.pt.packet_history.begin = begin;
3108 btinfo->maint.variant.pt.packet_history.end = end;
3109 }
3110 break;
3111 #endif /* defined (HAVE_LIBIPT) */
3112 }
3113 }
3114
3115 /* Read a number from an argument string. */
3116
3117 static unsigned int
3118 get_uint (char **arg)
3119 {
3120 char *begin, *end, *pos;
3121 unsigned long number;
3122
3123 begin = *arg;
3124 pos = skip_spaces (begin);
3125
3126 if (!isdigit (*pos))
3127 error (_("Expected positive number, got: %s."), pos);
3128
3129 number = strtoul (pos, &end, 10);
3130 if (number > UINT_MAX)
3131 error (_("Number too big."));
3132
3133 *arg += (end - begin);
3134
3135 return (unsigned int) number;
3136 }
3137
3138 /* Read a context size from an argument string. */
3139
3140 static int
3141 get_context_size (char **arg)
3142 {
3143 char *pos;
3144 int number;
3145
3146 pos = skip_spaces (*arg);
3147
3148 if (!isdigit (*pos))
3149 error (_("Expected positive number, got: %s."), pos);
3150
3151 return strtol (pos, arg, 10);
3152 }
3153
3154 /* Complain about junk at the end of an argument string. */
3155
3156 static void
3157 no_chunk (char *arg)
3158 {
3159 if (*arg != 0)
3160 error (_("Junk after argument: %s."), arg);
3161 }
3162
3163 /* The "maintenance btrace packet-history" command. */
3164
3165 static void
3166 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3167 {
3168 struct btrace_thread_info *btinfo;
3169 struct thread_info *tp;
3170 unsigned int size, begin, end, from, to;
3171
3172 tp = find_thread_ptid (inferior_ptid);
3173 if (tp == NULL)
3174 error (_("No thread."));
3175
3176 size = 10;
3177 btinfo = &tp->btrace;
3178
3179 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3180 if (begin == end)
3181 {
3182 printf_unfiltered (_("No trace.\n"));
3183 return;
3184 }
3185
3186 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3187 {
3188 from = to;
3189
3190 if (end - from < size)
3191 size = end - from;
3192 to = from + size;
3193 }
3194 else if (strcmp (arg, "-") == 0)
3195 {
3196 to = from;
3197
3198 if (to - begin < size)
3199 size = to - begin;
3200 from = to - size;
3201 }
3202 else
3203 {
3204 from = get_uint (&arg);
3205 if (end <= from)
3206 error (_("'%u' is out of range."), from);
3207
3208 arg = skip_spaces (arg);
3209 if (*arg == ',')
3210 {
3211 arg = skip_spaces (++arg);
3212
3213 if (*arg == '+')
3214 {
3215 arg += 1;
3216 size = get_context_size (&arg);
3217
3218 no_chunk (arg);
3219
3220 if (end - from < size)
3221 size = end - from;
3222 to = from + size;
3223 }
3224 else if (*arg == '-')
3225 {
3226 arg += 1;
3227 size = get_context_size (&arg);
3228
3229 no_chunk (arg);
3230
3231 /* Include the packet given as first argument. */
3232 from += 1;
3233 to = from;
3234
3235 if (to - begin < size)
3236 size = to - begin;
3237 from = to - size;
3238 }
3239 else
3240 {
3241 to = get_uint (&arg);
3242
3243 /* Include the packet at the second argument and silently
3244 truncate the range. */
3245 if (to < end)
3246 to += 1;
3247 else
3248 to = end;
3249
3250 no_chunk (arg);
3251 }
3252 }
3253 else
3254 {
3255 no_chunk (arg);
3256
3257 if (end - from < size)
3258 size = end - from;
3259 to = from + size;
3260 }
3261
3262 dont_repeat ();
3263 }
3264
3265 btrace_maint_print_packets (btinfo, from, to);
3266 }
3267
3268 /* The "maintenance btrace clear-packet-history" command. */
3269
3270 static void
3271 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3272 {
3273 struct btrace_thread_info *btinfo;
3274 struct thread_info *tp;
3275
3276 if (args != NULL && *args != 0)
3277 error (_("Invalid argument."));
3278
3279 tp = find_thread_ptid (inferior_ptid);
3280 if (tp == NULL)
3281 error (_("No thread."));
3282
3283 btinfo = &tp->btrace;
3284
3285 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3286 btrace_maint_clear (btinfo);
3287 btrace_data_clear (&btinfo->data);
3288 }
3289
3290 /* The "maintenance btrace clear" command. */
3291
3292 static void
3293 maint_btrace_clear_cmd (char *args, int from_tty)
3294 {
3295 struct btrace_thread_info *btinfo;
3296 struct thread_info *tp;
3297
3298 if (args != NULL && *args != 0)
3299 error (_("Invalid argument."));
3300
3301 tp = find_thread_ptid (inferior_ptid);
3302 if (tp == NULL)
3303 error (_("No thread."));
3304
3305 btrace_clear (tp);
3306 }
3307
3308 /* The "maintenance btrace" command. */
3309
3310 static void
3311 maint_btrace_cmd (char *args, int from_tty)
3312 {
3313 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3314 gdb_stdout);
3315 }
3316
3317 /* The "maintenance set btrace" command. */
3318
3319 static void
3320 maint_btrace_set_cmd (char *args, int from_tty)
3321 {
3322 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3323 gdb_stdout);
3324 }
3325
3326 /* The "maintenance show btrace" command. */
3327
3328 static void
3329 maint_btrace_show_cmd (char *args, int from_tty)
3330 {
3331 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3332 all_commands, gdb_stdout);
3333 }
3334
3335 /* The "maintenance set btrace pt" command. */
3336
3337 static void
3338 maint_btrace_pt_set_cmd (char *args, int from_tty)
3339 {
3340 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3341 all_commands, gdb_stdout);
3342 }
3343
3344 /* The "maintenance show btrace pt" command. */
3345
3346 static void
3347 maint_btrace_pt_show_cmd (char *args, int from_tty)
3348 {
3349 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3350 all_commands, gdb_stdout);
3351 }
3352
3353 /* The "maintenance info btrace" command. */
3354
3355 static void
3356 maint_info_btrace_cmd (char *args, int from_tty)
3357 {
3358 struct btrace_thread_info *btinfo;
3359 struct thread_info *tp;
3360 const struct btrace_config *conf;
3361
3362 if (args != NULL && *args != 0)
3363 error (_("Invalid argument."));
3364
3365 tp = find_thread_ptid (inferior_ptid);
3366 if (tp == NULL)
3367 error (_("No thread."));
3368
3369 btinfo = &tp->btrace;
3370
3371 conf = btrace_conf (btinfo);
3372 if (conf == NULL)
3373 error (_("No btrace configuration."));
3374
3375 printf_unfiltered (_("Format: %s.\n"),
3376 btrace_format_string (conf->format));
3377
3378 switch (conf->format)
3379 {
3380 default:
3381 break;
3382
3383 case BTRACE_FORMAT_BTS:
3384 printf_unfiltered (_("Number of packets: %u.\n"),
3385 VEC_length (btrace_block_s,
3386 btinfo->data.variant.bts.blocks));
3387 break;
3388
3389 #if defined (HAVE_LIBIPT)
3390 case BTRACE_FORMAT_PT:
3391 {
3392 struct pt_version version;
3393
3394 version = pt_library_version ();
3395 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3396 version.minor, version.build,
3397 version.ext != NULL ? version.ext : "");
3398
3399 btrace_maint_update_pt_packets (btinfo);
3400 printf_unfiltered (_("Number of packets: %u.\n"),
3401 VEC_length (btrace_pt_packet_s,
3402 btinfo->maint.variant.pt.packets));
3403 }
3404 break;
3405 #endif /* defined (HAVE_LIBIPT) */
3406 }
3407 }
3408
3409 /* The "maint show btrace pt skip-pad" show value function. */
3410
3411 static void
3412 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3413 struct cmd_list_element *c,
3414 const char *value)
3415 {
3416 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3417 }
3418
3419
3420 /* Initialize btrace maintenance commands. */
3421
3422 void _initialize_btrace (void);
3423 void
3424 _initialize_btrace (void)
3425 {
3426 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3427 _("Info about branch tracing data."), &maintenanceinfolist);
3428
3429 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3430 _("Branch tracing maintenance commands."),
3431 &maint_btrace_cmdlist, "maintenance btrace ",
3432 0, &maintenancelist);
3433
3434 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3435 Set branch tracing specific variables."),
3436 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3437 0, &maintenance_set_cmdlist);
3438
3439 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3440 Set Intel Processor Trace specific variables."),
3441 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3442 0, &maint_btrace_set_cmdlist);
3443
3444 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3445 Show branch tracing specific variables."),
3446 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3447 0, &maintenance_show_cmdlist);
3448
3449 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3450 Show Intel Processor Trace specific variables."),
3451 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3452 0, &maint_btrace_show_cmdlist);
3453
3454 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3455 &maint_btrace_pt_skip_pad, _("\
3456 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3457 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3458 When enabled, PAD packets are ignored in the btrace packet history."),
3459 NULL, show_maint_btrace_pt_skip_pad,
3460 &maint_btrace_pt_set_cmdlist,
3461 &maint_btrace_pt_show_cmdlist);
3462
3463 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3464 _("Print the raw branch tracing data.\n\
3465 With no argument, print ten more packets after the previous ten-line print.\n\
3466 With '-' as argument print ten packets before a previous ten-line print.\n\
3467 One argument specifies the starting packet of a ten-line print.\n\
3468 Two arguments with comma between specify starting and ending packets to \
3469 print.\n\
3470 Preceded with '+'/'-' the second argument specifies the distance from the \
3471 first.\n"),
3472 &maint_btrace_cmdlist);
3473
3474 add_cmd ("clear-packet-history", class_maintenance,
3475 maint_btrace_clear_packet_history_cmd,
3476 _("Clears the branch tracing packet history.\n\
3477 Discards the raw branch tracing data but not the execution history data.\n\
3478 "),
3479 &maint_btrace_cmdlist);
3480
3481 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3482 _("Clears the branch tracing data.\n\
3483 Discards the raw branch tracing data and the execution history data.\n\
3484 The next 'record' command will fetch the branch tracing data anew.\n\
3485 "),
3486 &maint_btrace_cmdlist);
3487
3488 }
This page took 0.105081 seconds and 4 git commands to generate.