config, btrace: check for pt_insn_event in libipt
[deliverable/binutils-gdb.git] / gdb / btrace.c
... / ...
CommitLineData
1/* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "btrace.h"
24#include "gdbthread.h"
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
32#include "xml-support.h"
33#include "regcache.h"
34#include "rsp-low.h"
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
37
38#include <inttypes.h>
39#include <ctype.h>
40#include <algorithm>
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
51
52static void btrace_add_pc (struct thread_info *tp);
53
54/* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57#define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
68/* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71static const char *
72ftrace_print_function_name (const struct btrace_function *bfun)
73{
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
84 return MSYMBOL_PRINT_NAME (msym);
85
86 return "<unknown>";
87}
88
89/* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92static const char *
93ftrace_print_filename (const struct btrace_function *bfun)
94{
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
102 else
103 filename = "<unknown>";
104
105 return filename;
106}
107
108/* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
110
111static const char *
112ftrace_print_insn_addr (const struct btrace_insn *insn)
113{
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
118}
119
120/* Print an ftrace debug status message. */
121
122static void
123ftrace_debug (const struct btrace_function *bfun, const char *prefix)
124{
125 const char *fun, *file;
126 unsigned int ibegin, iend;
127 int level;
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
138}
139
140/* Return the number of instructions in a given function call segment. */
141
142static unsigned int
143ftrace_call_num_insn (const struct btrace_function* bfun)
144{
145 if (bfun == NULL)
146 return 0;
147
148 /* A gap is always counted as one instruction. */
149 if (bfun->errcode != 0)
150 return 1;
151
152 return VEC_length (btrace_insn_s, bfun->insn);
153}
154
155/* Return the function segment with the given NUMBER or NULL if no such segment
156 exists. BTINFO is the branch trace information for the current thread. */
157
158static struct btrace_function *
159ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
160 unsigned int number)
161{
162 if (number == 0 || number > btinfo->functions.size ())
163 return NULL;
164
165 return &btinfo->functions[number - 1];
166}
167
168/* A const version of the function above. */
169
170static const struct btrace_function *
171ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
172 unsigned int number)
173{
174 if (number == 0 || number > btinfo->functions.size ())
175 return NULL;
176
177 return &btinfo->functions[number - 1];
178}
179
180/* Return non-zero if BFUN does not match MFUN and FUN,
181 return zero otherwise. */
182
183static int
184ftrace_function_switched (const struct btrace_function *bfun,
185 const struct minimal_symbol *mfun,
186 const struct symbol *fun)
187{
188 struct minimal_symbol *msym;
189 struct symbol *sym;
190
191 msym = bfun->msym;
192 sym = bfun->sym;
193
194 /* If the minimal symbol changed, we certainly switched functions. */
195 if (mfun != NULL && msym != NULL
196 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
197 return 1;
198
199 /* If the symbol changed, we certainly switched functions. */
200 if (fun != NULL && sym != NULL)
201 {
202 const char *bfname, *fname;
203
204 /* Check the function name. */
205 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
206 return 1;
207
208 /* Check the location of those functions, as well. */
209 bfname = symtab_to_fullname (symbol_symtab (sym));
210 fname = symtab_to_fullname (symbol_symtab (fun));
211 if (filename_cmp (fname, bfname) != 0)
212 return 1;
213 }
214
215 /* If we lost symbol information, we switched functions. */
216 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
217 return 1;
218
219 /* If we gained symbol information, we switched functions. */
220 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
221 return 1;
222
223 return 0;
224}
225
226/* Allocate and initialize a new branch trace function segment at the end of
227 the trace.
228 BTINFO is the branch trace information for the current thread.
229 MFUN and FUN are the symbol information we have for this function.
230 This invalidates all struct btrace_function pointer currently held. */
231
232static struct btrace_function *
233ftrace_new_function (struct btrace_thread_info *btinfo,
234 struct minimal_symbol *mfun,
235 struct symbol *fun)
236{
237 int level;
238 unsigned int number, insn_offset;
239
240 if (btinfo->functions.empty ())
241 {
242 /* Start counting NUMBER and INSN_OFFSET at one. */
243 level = 0;
244 number = 1;
245 insn_offset = 1;
246 }
247 else
248 {
249 const struct btrace_function *prev = &btinfo->functions.back ();
250 level = prev->level;
251 number = prev->number + 1;
252 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
253 }
254
255 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
256 return &btinfo->functions.back ();
257}
258
259/* Update the UP field of a function segment. */
260
261static void
262ftrace_update_caller (struct btrace_function *bfun,
263 struct btrace_function *caller,
264 enum btrace_function_flag flags)
265{
266 if (bfun->up != 0)
267 ftrace_debug (bfun, "updating caller");
268
269 bfun->up = caller->number;
270 bfun->flags = flags;
271
272 ftrace_debug (bfun, "set caller");
273 ftrace_debug (caller, "..to");
274}
275
276/* Fix up the caller for all segments of a function. */
277
278static void
279ftrace_fixup_caller (struct btrace_thread_info *btinfo,
280 struct btrace_function *bfun,
281 struct btrace_function *caller,
282 enum btrace_function_flag flags)
283{
284 unsigned int prev, next;
285
286 prev = bfun->prev;
287 next = bfun->next;
288 ftrace_update_caller (bfun, caller, flags);
289
290 /* Update all function segments belonging to the same function. */
291 for (; prev != 0; prev = bfun->prev)
292 {
293 bfun = ftrace_find_call_by_number (btinfo, prev);
294 ftrace_update_caller (bfun, caller, flags);
295 }
296
297 for (; next != 0; next = bfun->next)
298 {
299 bfun = ftrace_find_call_by_number (btinfo, next);
300 ftrace_update_caller (bfun, caller, flags);
301 }
302}
303
304/* Add a new function segment for a call at the end of the trace.
305 BTINFO is the branch trace information for the current thread.
306 MFUN and FUN are the symbol information we have for this function. */
307
308static struct btrace_function *
309ftrace_new_call (struct btrace_thread_info *btinfo,
310 struct minimal_symbol *mfun,
311 struct symbol *fun)
312{
313 const unsigned int length = btinfo->functions.size ();
314 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
315
316 bfun->up = length;
317 bfun->level += 1;
318
319 ftrace_debug (bfun, "new call");
320
321 return bfun;
322}
323
324/* Add a new function segment for a tail call at the end of the trace.
325 BTINFO is the branch trace information for the current thread.
326 MFUN and FUN are the symbol information we have for this function. */
327
328static struct btrace_function *
329ftrace_new_tailcall (struct btrace_thread_info *btinfo,
330 struct minimal_symbol *mfun,
331 struct symbol *fun)
332{
333 const unsigned int length = btinfo->functions.size ();
334 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
335
336 bfun->up = length;
337 bfun->level += 1;
338 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
339
340 ftrace_debug (bfun, "new tail call");
341
342 return bfun;
343}
344
345/* Return the caller of BFUN or NULL if there is none. This function skips
346 tail calls in the call chain. BTINFO is the branch trace information for
347 the current thread. */
348static struct btrace_function *
349ftrace_get_caller (struct btrace_thread_info *btinfo,
350 struct btrace_function *bfun)
351{
352 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
353 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
354 return ftrace_find_call_by_number (btinfo, bfun->up);
355
356 return NULL;
357}
358
359/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
360 symbol information. BTINFO is the branch trace information for the current
361 thread. */
362
363static struct btrace_function *
364ftrace_find_caller (struct btrace_thread_info *btinfo,
365 struct btrace_function *bfun,
366 struct minimal_symbol *mfun,
367 struct symbol *fun)
368{
369 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
370 {
371 /* Skip functions with incompatible symbol information. */
372 if (ftrace_function_switched (bfun, mfun, fun))
373 continue;
374
375 /* This is the function segment we're looking for. */
376 break;
377 }
378
379 return bfun;
380}
381
382/* Find the innermost caller in the back trace of BFUN, skipping all
383 function segments that do not end with a call instruction (e.g.
384 tail calls ending with a jump). BTINFO is the branch trace information for
385 the current thread. */
386
387static struct btrace_function *
388ftrace_find_call (struct btrace_thread_info *btinfo,
389 struct btrace_function *bfun)
390{
391 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
392 {
393 struct btrace_insn *last;
394
395 /* Skip gaps. */
396 if (bfun->errcode != 0)
397 continue;
398
399 last = VEC_last (btrace_insn_s, bfun->insn);
400
401 if (last->iclass == BTRACE_INSN_CALL)
402 break;
403 }
404
405 return bfun;
406}
407
408/* Add a continuation segment for a function into which we return at the end of
409 the trace.
410 BTINFO is the branch trace information for the current thread.
411 MFUN and FUN are the symbol information we have for this function. */
412
413static struct btrace_function *
414ftrace_new_return (struct btrace_thread_info *btinfo,
415 struct minimal_symbol *mfun,
416 struct symbol *fun)
417{
418 struct btrace_function *prev, *bfun, *caller;
419
420 bfun = ftrace_new_function (btinfo, mfun, fun);
421 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
422
423 /* It is important to start at PREV's caller. Otherwise, we might find
424 PREV itself, if PREV is a recursive function. */
425 caller = ftrace_find_call_by_number (btinfo, prev->up);
426 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
427 if (caller != NULL)
428 {
429 /* The caller of PREV is the preceding btrace function segment in this
430 function instance. */
431 gdb_assert (caller->next == 0);
432
433 caller->next = bfun->number;
434 bfun->prev = caller->number;
435
436 /* Maintain the function level. */
437 bfun->level = caller->level;
438
439 /* Maintain the call stack. */
440 bfun->up = caller->up;
441 bfun->flags = caller->flags;
442
443 ftrace_debug (bfun, "new return");
444 }
445 else
446 {
447 /* We did not find a caller. This could mean that something went
448 wrong or that the call is simply not included in the trace. */
449
450 /* Let's search for some actual call. */
451 caller = ftrace_find_call_by_number (btinfo, prev->up);
452 caller = ftrace_find_call (btinfo, caller);
453 if (caller == NULL)
454 {
455 /* There is no call in PREV's back trace. We assume that the
456 branch trace did not include it. */
457
458 /* Let's find the topmost function and add a new caller for it.
459 This should handle a series of initial tail calls. */
460 while (prev->up != 0)
461 prev = ftrace_find_call_by_number (btinfo, prev->up);
462
463 bfun->level = prev->level - 1;
464
465 /* Fix up the call stack for PREV. */
466 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
467
468 ftrace_debug (bfun, "new return - no caller");
469 }
470 else
471 {
472 /* There is a call in PREV's back trace to which we should have
473 returned but didn't. Let's start a new, separate back trace
474 from PREV's level. */
475 bfun->level = prev->level - 1;
476
477 /* We fix up the back trace for PREV but leave other function segments
478 on the same level as they are.
479 This should handle things like schedule () correctly where we're
480 switching contexts. */
481 prev->up = bfun->number;
482 prev->flags = BFUN_UP_LINKS_TO_RET;
483
484 ftrace_debug (bfun, "new return - unknown caller");
485 }
486 }
487
488 return bfun;
489}
490
491/* Add a new function segment for a function switch at the end of the trace.
492 BTINFO is the branch trace information for the current thread.
493 MFUN and FUN are the symbol information we have for this function. */
494
495static struct btrace_function *
496ftrace_new_switch (struct btrace_thread_info *btinfo,
497 struct minimal_symbol *mfun,
498 struct symbol *fun)
499{
500 struct btrace_function *prev, *bfun;
501
502 /* This is an unexplained function switch. We can't really be sure about the
503 call stack, yet the best I can think of right now is to preserve it. */
504 bfun = ftrace_new_function (btinfo, mfun, fun);
505 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
506 bfun->up = prev->up;
507 bfun->flags = prev->flags;
508
509 ftrace_debug (bfun, "new switch");
510
511 return bfun;
512}
513
514/* Add a new function segment for a gap in the trace due to a decode error at
515 the end of the trace.
516 BTINFO is the branch trace information for the current thread.
517 ERRCODE is the format-specific error code. */
518
519static struct btrace_function *
520ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
521 std::vector<unsigned int> &gaps)
522{
523 struct btrace_function *bfun;
524
525 if (btinfo->functions.empty ())
526 bfun = ftrace_new_function (btinfo, NULL, NULL);
527 else
528 {
529 /* We hijack the previous function segment if it was empty. */
530 bfun = &btinfo->functions.back ();
531 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
532 bfun = ftrace_new_function (btinfo, NULL, NULL);
533 }
534
535 bfun->errcode = errcode;
536 gaps.push_back (bfun->number);
537
538 ftrace_debug (bfun, "new gap");
539
540 return bfun;
541}
542
543/* Update the current function segment at the end of the trace in BTINFO with
544 respect to the instruction at PC. This may create new function segments.
545 Return the chronologically latest function segment, never NULL. */
546
547static struct btrace_function *
548ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
549{
550 struct bound_minimal_symbol bmfun;
551 struct minimal_symbol *mfun;
552 struct symbol *fun;
553 struct btrace_insn *last;
554 struct btrace_function *bfun;
555
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun = find_pc_function (pc);
560 bmfun = lookup_minimal_symbol_by_pc (pc);
561 mfun = bmfun.minsym;
562
563 if (fun == NULL && mfun == NULL)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
565
566 /* If we didn't have a function, we create one. */
567 if (btinfo->functions.empty ())
568 return ftrace_new_function (btinfo, mfun, fun);
569
570 /* If we had a gap before, we create a function. */
571 bfun = &btinfo->functions.back ();
572 if (bfun->errcode != 0)
573 return ftrace_new_function (btinfo, mfun, fun);
574
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
578 last = NULL;
579 if (!VEC_empty (btrace_insn_s, bfun->insn))
580 last = VEC_last (btrace_insn_s, bfun->insn);
581
582 if (last != NULL)
583 {
584 switch (last->iclass)
585 {
586 case BTRACE_INSN_RETURN:
587 {
588 const char *fname;
589
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname = ftrace_print_function_name (bfun);
600 if (strcmp (fname, "_dl_runtime_resolve") == 0)
601 return ftrace_new_tailcall (btinfo, mfun, fun);
602
603 return ftrace_new_return (btinfo, mfun, fun);
604 }
605
606 case BTRACE_INSN_CALL:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last->pc + last->size == pc)
609 break;
610
611 return ftrace_new_call (btinfo, mfun, fun);
612
613 case BTRACE_INSN_JUMP:
614 {
615 CORE_ADDR start;
616
617 start = get_pc_function_start (pc);
618
619 /* A jump to the start of a function is (typically) a tail call. */
620 if (start == pc)
621 return ftrace_new_tailcall (btinfo, mfun, fun);
622
623 /* If we can't determine the function for PC, we treat a jump at
624 the end of the block as tail call if we're switching functions
625 and as an intra-function branch if we don't. */
626 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
627 return ftrace_new_tailcall (btinfo, mfun, fun);
628
629 break;
630 }
631 }
632 }
633
634 /* Check if we're switching functions for some other reason. */
635 if (ftrace_function_switched (bfun, mfun, fun))
636 {
637 DEBUG_FTRACE ("switching from %s in %s at %s",
638 ftrace_print_insn_addr (last),
639 ftrace_print_function_name (bfun),
640 ftrace_print_filename (bfun));
641
642 return ftrace_new_switch (btinfo, mfun, fun);
643 }
644
645 return bfun;
646}
647
648/* Add the instruction at PC to BFUN's instructions. */
649
650static void
651ftrace_update_insns (struct btrace_function *bfun,
652 const struct btrace_insn *insn)
653{
654 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
655
656 if (record_debug > 1)
657 ftrace_debug (bfun, "update insn");
658}
659
660/* Classify the instruction at PC. */
661
662static enum btrace_insn_class
663ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
664{
665 enum btrace_insn_class iclass;
666
667 iclass = BTRACE_INSN_OTHER;
668 TRY
669 {
670 if (gdbarch_insn_is_call (gdbarch, pc))
671 iclass = BTRACE_INSN_CALL;
672 else if (gdbarch_insn_is_ret (gdbarch, pc))
673 iclass = BTRACE_INSN_RETURN;
674 else if (gdbarch_insn_is_jump (gdbarch, pc))
675 iclass = BTRACE_INSN_JUMP;
676 }
677 CATCH (error, RETURN_MASK_ERROR)
678 {
679 }
680 END_CATCH
681
682 return iclass;
683}
684
685/* Try to match the back trace at LHS to the back trace at RHS. Returns the
686 number of matching function segments or zero if the back traces do not
687 match. BTINFO is the branch trace information for the current thread. */
688
689static int
690ftrace_match_backtrace (struct btrace_thread_info *btinfo,
691 struct btrace_function *lhs,
692 struct btrace_function *rhs)
693{
694 int matches;
695
696 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
697 {
698 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
699 return 0;
700
701 lhs = ftrace_get_caller (btinfo, lhs);
702 rhs = ftrace_get_caller (btinfo, rhs);
703 }
704
705 return matches;
706}
707
708/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
709 BTINFO is the branch trace information for the current thread. */
710
711static void
712ftrace_fixup_level (struct btrace_thread_info *btinfo,
713 struct btrace_function *bfun, int adjustment)
714{
715 if (adjustment == 0)
716 return;
717
718 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
719 ftrace_debug (bfun, "..bfun");
720
721 while (bfun != NULL)
722 {
723 bfun->level += adjustment;
724 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
725 }
726}
727
728/* Recompute the global level offset. Traverse the function trace and compute
729 the global level offset as the negative of the minimal function level. */
730
731static void
732ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
733{
734 int level = INT_MAX;
735
736 if (btinfo == NULL)
737 return;
738
739 if (btinfo->functions.empty ())
740 return;
741
742 unsigned int length = btinfo->functions.size() - 1;
743 for (unsigned int i = 0; i < length; ++i)
744 level = std::min (level, btinfo->functions[i].level);
745
746 /* The last function segment contains the current instruction, which is not
747 really part of the trace. If it contains just this one instruction, we
748 ignore the segment. */
749 struct btrace_function *last = &btinfo->functions.back();
750 if (VEC_length (btrace_insn_s, last->insn) != 1)
751 level = std::min (level, last->level);
752
753 DEBUG_FTRACE ("setting global level offset: %d", -level);
754 btinfo->level = -level;
755}
756
757/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
758 ftrace_connect_backtrace. BTINFO is the branch trace information for the
759 current thread. */
760
761static void
762ftrace_connect_bfun (struct btrace_thread_info *btinfo,
763 struct btrace_function *prev,
764 struct btrace_function *next)
765{
766 DEBUG_FTRACE ("connecting...");
767 ftrace_debug (prev, "..prev");
768 ftrace_debug (next, "..next");
769
770 /* The function segments are not yet connected. */
771 gdb_assert (prev->next == 0);
772 gdb_assert (next->prev == 0);
773
774 prev->next = next->number;
775 next->prev = prev->number;
776
777 /* We may have moved NEXT to a different function level. */
778 ftrace_fixup_level (btinfo, next, prev->level - next->level);
779
780 /* If we run out of back trace for one, let's use the other's. */
781 if (prev->up == 0)
782 {
783 const btrace_function_flags flags = next->flags;
784
785 next = ftrace_find_call_by_number (btinfo, next->up);
786 if (next != NULL)
787 {
788 DEBUG_FTRACE ("using next's callers");
789 ftrace_fixup_caller (btinfo, prev, next, flags);
790 }
791 }
792 else if (next->up == 0)
793 {
794 const btrace_function_flags flags = prev->flags;
795
796 prev = ftrace_find_call_by_number (btinfo, prev->up);
797 if (prev != NULL)
798 {
799 DEBUG_FTRACE ("using prev's callers");
800 ftrace_fixup_caller (btinfo, next, prev, flags);
801 }
802 }
803 else
804 {
805 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
806 link to add the tail callers to NEXT's back trace.
807
808 This removes NEXT->UP from NEXT's back trace. It will be added back
809 when connecting NEXT and PREV's callers - provided they exist.
810
811 If PREV's back trace consists of a series of tail calls without an
812 actual call, there will be no further connection and NEXT's caller will
813 be removed for good. To catch this case, we handle it here and connect
814 the top of PREV's back trace to NEXT's caller. */
815 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
816 {
817 struct btrace_function *caller;
818 btrace_function_flags next_flags, prev_flags;
819
820 /* We checked NEXT->UP above so CALLER can't be NULL. */
821 caller = ftrace_find_call_by_number (btinfo, next->up);
822 next_flags = next->flags;
823 prev_flags = prev->flags;
824
825 DEBUG_FTRACE ("adding prev's tail calls to next");
826
827 prev = ftrace_find_call_by_number (btinfo, prev->up);
828 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
829
830 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
831 prev->up))
832 {
833 /* At the end of PREV's back trace, continue with CALLER. */
834 if (prev->up == 0)
835 {
836 DEBUG_FTRACE ("fixing up link for tailcall chain");
837 ftrace_debug (prev, "..top");
838 ftrace_debug (caller, "..up");
839
840 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
841
842 /* If we skipped any tail calls, this may move CALLER to a
843 different function level.
844
845 Note that changing CALLER's level is only OK because we
846 know that this is the last iteration of the bottom-to-top
847 walk in ftrace_connect_backtrace.
848
849 Otherwise we will fix up CALLER's level when we connect it
850 to PREV's caller in the next iteration. */
851 ftrace_fixup_level (btinfo, caller,
852 prev->level - caller->level - 1);
853 break;
854 }
855
856 /* There's nothing to do if we find a real call. */
857 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
858 {
859 DEBUG_FTRACE ("will fix up link in next iteration");
860 break;
861 }
862 }
863 }
864 }
865}
866
867/* Connect function segments on the same level in the back trace at LHS and RHS.
868 The back traces at LHS and RHS are expected to match according to
869 ftrace_match_backtrace. BTINFO is the branch trace information for the
870 current thread. */
871
872static void
873ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
874 struct btrace_function *lhs,
875 struct btrace_function *rhs)
876{
877 while (lhs != NULL && rhs != NULL)
878 {
879 struct btrace_function *prev, *next;
880
881 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
882
883 /* Connecting LHS and RHS may change the up link. */
884 prev = lhs;
885 next = rhs;
886
887 lhs = ftrace_get_caller (btinfo, lhs);
888 rhs = ftrace_get_caller (btinfo, rhs);
889
890 ftrace_connect_bfun (btinfo, prev, next);
891 }
892}
893
894/* Bridge the gap between two function segments left and right of a gap if their
895 respective back traces match in at least MIN_MATCHES functions. BTINFO is
896 the branch trace information for the current thread.
897
898 Returns non-zero if the gap could be bridged, zero otherwise. */
899
900static int
901ftrace_bridge_gap (struct btrace_thread_info *btinfo,
902 struct btrace_function *lhs, struct btrace_function *rhs,
903 int min_matches)
904{
905 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
906 int best_matches;
907
908 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
909 rhs->insn_offset - 1, min_matches);
910
911 best_matches = 0;
912 best_l = NULL;
913 best_r = NULL;
914
915 /* We search the back traces of LHS and RHS for valid connections and connect
916 the two functon segments that give the longest combined back trace. */
917
918 for (cand_l = lhs; cand_l != NULL;
919 cand_l = ftrace_get_caller (btinfo, cand_l))
920 for (cand_r = rhs; cand_r != NULL;
921 cand_r = ftrace_get_caller (btinfo, cand_r))
922 {
923 int matches;
924
925 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
926 if (best_matches < matches)
927 {
928 best_matches = matches;
929 best_l = cand_l;
930 best_r = cand_r;
931 }
932 }
933
934 /* We need at least MIN_MATCHES matches. */
935 gdb_assert (min_matches > 0);
936 if (best_matches < min_matches)
937 return 0;
938
939 DEBUG_FTRACE ("..matches: %d", best_matches);
940
941 /* We will fix up the level of BEST_R and succeeding function segments such
942 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
943
944 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
945 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
946
947 To catch this, we already fix up the level here where we can start at RHS
948 instead of at BEST_R. We will ignore the level fixup when connecting
949 BEST_L to BEST_R as they will already be on the same level. */
950 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
951
952 ftrace_connect_backtrace (btinfo, best_l, best_r);
953
954 return best_matches;
955}
956
957/* Try to bridge gaps due to overflow or decode errors by connecting the
958 function segments that are separated by the gap. */
959
960static void
961btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
962{
963 struct btrace_thread_info *btinfo = &tp->btrace;
964 std::vector<unsigned int> remaining;
965 int min_matches;
966
967 DEBUG ("bridge gaps");
968
969 /* We require a minimum amount of matches for bridging a gap. The number of
970 required matches will be lowered with each iteration.
971
972 The more matches the higher our confidence that the bridging is correct.
973 For big gaps or small traces, however, it may not be feasible to require a
974 high number of matches. */
975 for (min_matches = 5; min_matches > 0; --min_matches)
976 {
977 /* Let's try to bridge as many gaps as we can. In some cases, we need to
978 skip a gap and revisit it again after we closed later gaps. */
979 while (!gaps.empty ())
980 {
981 for (const unsigned int number : gaps)
982 {
983 struct btrace_function *gap, *lhs, *rhs;
984 int bridged;
985
986 gap = ftrace_find_call_by_number (btinfo, number);
987
988 /* We may have a sequence of gaps if we run from one error into
989 the next as we try to re-sync onto the trace stream. Ignore
990 all but the leftmost gap in such a sequence.
991
992 Also ignore gaps at the beginning of the trace. */
993 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
994 if (lhs == NULL || lhs->errcode != 0)
995 continue;
996
997 /* Skip gaps to the right. */
998 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
999 while (rhs != NULL && rhs->errcode != 0)
1000 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1001
1002 /* Ignore gaps at the end of the trace. */
1003 if (rhs == NULL)
1004 continue;
1005
1006 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1007
1008 /* Keep track of gaps we were not able to bridge and try again.
1009 If we just pushed them to the end of GAPS we would risk an
1010 infinite loop in case we simply cannot bridge a gap. */
1011 if (bridged == 0)
1012 remaining.push_back (number);
1013 }
1014
1015 /* Let's see if we made any progress. */
1016 if (remaining.size () == gaps.size ())
1017 break;
1018
1019 gaps.clear ();
1020 gaps.swap (remaining);
1021 }
1022
1023 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1024 if (gaps.empty ())
1025 break;
1026
1027 remaining.clear ();
1028 }
1029
1030 /* We may omit this in some cases. Not sure it is worth the extra
1031 complication, though. */
1032 ftrace_compute_global_level_offset (btinfo);
1033}
1034
1035/* Compute the function branch trace from BTS trace. */
1036
1037static void
1038btrace_compute_ftrace_bts (struct thread_info *tp,
1039 const struct btrace_data_bts *btrace,
1040 std::vector<unsigned int> &gaps)
1041{
1042 struct btrace_thread_info *btinfo;
1043 struct gdbarch *gdbarch;
1044 unsigned int blk;
1045 int level;
1046
1047 gdbarch = target_gdbarch ();
1048 btinfo = &tp->btrace;
1049 blk = VEC_length (btrace_block_s, btrace->blocks);
1050
1051 if (btinfo->functions.empty ())
1052 level = INT_MAX;
1053 else
1054 level = -btinfo->level;
1055
1056 while (blk != 0)
1057 {
1058 btrace_block_s *block;
1059 CORE_ADDR pc;
1060
1061 blk -= 1;
1062
1063 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1064 pc = block->begin;
1065
1066 for (;;)
1067 {
1068 struct btrace_function *bfun;
1069 struct btrace_insn insn;
1070 int size;
1071
1072 /* We should hit the end of the block. Warn if we went too far. */
1073 if (block->end < pc)
1074 {
1075 /* Indicate the gap in the trace. */
1076 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1077
1078 warning (_("Recorded trace may be corrupted at instruction "
1079 "%u (pc = %s)."), bfun->insn_offset - 1,
1080 core_addr_to_string_nz (pc));
1081
1082 break;
1083 }
1084
1085 bfun = ftrace_update_function (btinfo, pc);
1086
1087 /* Maintain the function level offset.
1088 For all but the last block, we do it here. */
1089 if (blk != 0)
1090 level = std::min (level, bfun->level);
1091
1092 size = 0;
1093 TRY
1094 {
1095 size = gdb_insn_length (gdbarch, pc);
1096 }
1097 CATCH (error, RETURN_MASK_ERROR)
1098 {
1099 }
1100 END_CATCH
1101
1102 insn.pc = pc;
1103 insn.size = size;
1104 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1105 insn.flags = 0;
1106
1107 ftrace_update_insns (bfun, &insn);
1108
1109 /* We're done once we pushed the instruction at the end. */
1110 if (block->end == pc)
1111 break;
1112
1113 /* We can't continue if we fail to compute the size. */
1114 if (size <= 0)
1115 {
1116 /* Indicate the gap in the trace. We just added INSN so we're
1117 not at the beginning. */
1118 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1119
1120 warning (_("Recorded trace may be incomplete at instruction %u "
1121 "(pc = %s)."), bfun->insn_offset - 1,
1122 core_addr_to_string_nz (pc));
1123
1124 break;
1125 }
1126
1127 pc += size;
1128
1129 /* Maintain the function level offset.
1130 For the last block, we do it here to not consider the last
1131 instruction.
1132 Since the last instruction corresponds to the current instruction
1133 and is not really part of the execution history, it shouldn't
1134 affect the level. */
1135 if (blk == 0)
1136 level = std::min (level, bfun->level);
1137 }
1138 }
1139
1140 /* LEVEL is the minimal function level of all btrace function segments.
1141 Define the global level offset to -LEVEL so all function levels are
1142 normalized to start at zero. */
1143 btinfo->level = -level;
1144}
1145
1146#if defined (HAVE_LIBIPT)
1147
1148static enum btrace_insn_class
1149pt_reclassify_insn (enum pt_insn_class iclass)
1150{
1151 switch (iclass)
1152 {
1153 case ptic_call:
1154 return BTRACE_INSN_CALL;
1155
1156 case ptic_return:
1157 return BTRACE_INSN_RETURN;
1158
1159 case ptic_jump:
1160 return BTRACE_INSN_JUMP;
1161
1162 default:
1163 return BTRACE_INSN_OTHER;
1164 }
1165}
1166
1167/* Return the btrace instruction flags for INSN. */
1168
1169static btrace_insn_flags
1170pt_btrace_insn_flags (const struct pt_insn &insn)
1171{
1172 btrace_insn_flags flags = 0;
1173
1174 if (insn.speculative)
1175 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1176
1177 return flags;
1178}
1179
1180/* Return the btrace instruction for INSN. */
1181
1182static btrace_insn
1183pt_btrace_insn (const struct pt_insn &insn)
1184{
1185 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1186 pt_reclassify_insn (insn.iclass),
1187 pt_btrace_insn_flags (insn)};
1188}
1189
1190
1191/* Add function branch trace to BTINFO using DECODER. */
1192
1193static void
1194ftrace_add_pt (struct btrace_thread_info *btinfo,
1195 struct pt_insn_decoder *decoder,
1196 int *plevel,
1197 std::vector<unsigned int> &gaps)
1198{
1199 struct btrace_function *bfun;
1200 uint64_t offset;
1201 int errcode;
1202
1203 for (;;)
1204 {
1205 struct pt_insn insn;
1206
1207 errcode = pt_insn_sync_forward (decoder);
1208 if (errcode < 0)
1209 {
1210 if (errcode != -pte_eos)
1211 warning (_("Failed to synchronize onto the Intel Processor "
1212 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1213 break;
1214 }
1215
1216 for (;;)
1217 {
1218 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1219 if (errcode < 0)
1220 break;
1221
1222 /* Look for gaps in the trace - unless we're at the beginning. */
1223 if (!btinfo->functions.empty ())
1224 {
1225 /* Tracing is disabled and re-enabled each time we enter the
1226 kernel. Most times, we continue from the same instruction we
1227 stopped before. This is indicated via the RESUMED instruction
1228 flag. The ENABLED instruction flag means that we continued
1229 from some other instruction. Indicate this as a trace gap. */
1230 if (insn.enabled)
1231 {
1232 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1233
1234 pt_insn_get_offset (decoder, &offset);
1235
1236 warning (_("Non-contiguous trace at instruction %u (offset "
1237 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1238 bfun->insn_offset - 1, offset, insn.ip);
1239 }
1240 }
1241
1242 /* Indicate trace overflows. */
1243 if (insn.resynced)
1244 {
1245 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1246
1247 pt_insn_get_offset (decoder, &offset);
1248
1249 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1250 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
1251 offset, insn.ip);
1252 }
1253
1254 bfun = ftrace_update_function (btinfo, insn.ip);
1255
1256 /* Maintain the function level offset. */
1257 *plevel = std::min (*plevel, bfun->level);
1258
1259 btrace_insn btinsn = pt_btrace_insn (insn);
1260 ftrace_update_insns (bfun, &btinsn);
1261 }
1262
1263 if (errcode == -pte_eos)
1264 break;
1265
1266 /* Indicate the gap in the trace. */
1267 bfun = ftrace_new_gap (btinfo, errcode, gaps);
1268
1269 pt_insn_get_offset (decoder, &offset);
1270
1271 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1272 ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
1273 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1274 }
1275}
1276
1277/* A callback function to allow the trace decoder to read the inferior's
1278 memory. */
1279
1280static int
1281btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1282 const struct pt_asid *asid, uint64_t pc,
1283 void *context)
1284{
1285 int result, errcode;
1286
1287 result = (int) size;
1288 TRY
1289 {
1290 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1291 if (errcode != 0)
1292 result = -pte_nomap;
1293 }
1294 CATCH (error, RETURN_MASK_ERROR)
1295 {
1296 result = -pte_nomap;
1297 }
1298 END_CATCH
1299
1300 return result;
1301}
1302
1303/* Translate the vendor from one enum to another. */
1304
1305static enum pt_cpu_vendor
1306pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1307{
1308 switch (vendor)
1309 {
1310 default:
1311 return pcv_unknown;
1312
1313 case CV_INTEL:
1314 return pcv_intel;
1315 }
1316}
1317
1318/* Finalize the function branch trace after decode. */
1319
1320static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1321 struct thread_info *tp, int level)
1322{
1323 pt_insn_free_decoder (decoder);
1324
1325 /* LEVEL is the minimal function level of all btrace function segments.
1326 Define the global level offset to -LEVEL so all function levels are
1327 normalized to start at zero. */
1328 tp->btrace.level = -level;
1329
1330 /* Add a single last instruction entry for the current PC.
1331 This allows us to compute the backtrace at the current PC using both
1332 standard unwind and btrace unwind.
1333 This extra entry is ignored by all record commands. */
1334 btrace_add_pc (tp);
1335}
1336
1337/* Compute the function branch trace from Intel Processor Trace
1338 format. */
1339
1340static void
1341btrace_compute_ftrace_pt (struct thread_info *tp,
1342 const struct btrace_data_pt *btrace,
1343 std::vector<unsigned int> &gaps)
1344{
1345 struct btrace_thread_info *btinfo;
1346 struct pt_insn_decoder *decoder;
1347 struct pt_config config;
1348 int level, errcode;
1349
1350 if (btrace->size == 0)
1351 return;
1352
1353 btinfo = &tp->btrace;
1354 if (btinfo->functions.empty ())
1355 level = INT_MAX;
1356 else
1357 level = -btinfo->level;
1358
1359 pt_config_init(&config);
1360 config.begin = btrace->data;
1361 config.end = btrace->data + btrace->size;
1362
1363 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1364 config.cpu.family = btrace->config.cpu.family;
1365 config.cpu.model = btrace->config.cpu.model;
1366 config.cpu.stepping = btrace->config.cpu.stepping;
1367
1368 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1369 if (errcode < 0)
1370 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1371 pt_errstr (pt_errcode (errcode)));
1372
1373 decoder = pt_insn_alloc_decoder (&config);
1374 if (decoder == NULL)
1375 error (_("Failed to allocate the Intel Processor Trace decoder."));
1376
1377 TRY
1378 {
1379 struct pt_image *image;
1380
1381 image = pt_insn_get_image(decoder);
1382 if (image == NULL)
1383 error (_("Failed to configure the Intel Processor Trace decoder."));
1384
1385 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1386 if (errcode < 0)
1387 error (_("Failed to configure the Intel Processor Trace decoder: "
1388 "%s."), pt_errstr (pt_errcode (errcode)));
1389
1390 ftrace_add_pt (btinfo, decoder, &level, gaps);
1391 }
1392 CATCH (error, RETURN_MASK_ALL)
1393 {
1394 /* Indicate a gap in the trace if we quit trace processing. */
1395 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1396 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1397
1398 btrace_finalize_ftrace_pt (decoder, tp, level);
1399
1400 throw_exception (error);
1401 }
1402 END_CATCH
1403
1404 btrace_finalize_ftrace_pt (decoder, tp, level);
1405}
1406
1407#else /* defined (HAVE_LIBIPT) */
1408
1409static void
1410btrace_compute_ftrace_pt (struct thread_info *tp,
1411 const struct btrace_data_pt *btrace,
1412 std::vector<unsigned int> &gaps)
1413{
1414 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1415}
1416
1417#endif /* defined (HAVE_LIBIPT) */
1418
1419/* Compute the function branch trace from a block branch trace BTRACE for
1420 a thread given by BTINFO. */
1421
1422static void
1423btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1424 std::vector<unsigned int> &gaps)
1425{
1426 DEBUG ("compute ftrace");
1427
1428 switch (btrace->format)
1429 {
1430 case BTRACE_FORMAT_NONE:
1431 return;
1432
1433 case BTRACE_FORMAT_BTS:
1434 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1435 return;
1436
1437 case BTRACE_FORMAT_PT:
1438 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1439 return;
1440 }
1441
1442 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1443}
1444
1445static void
1446btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1447{
1448 if (!gaps.empty ())
1449 {
1450 tp->btrace.ngaps += gaps.size ();
1451 btrace_bridge_gaps (tp, gaps);
1452 }
1453}
1454
1455static void
1456btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1457{
1458 std::vector<unsigned int> gaps;
1459
1460 TRY
1461 {
1462 btrace_compute_ftrace_1 (tp, btrace, gaps);
1463 }
1464 CATCH (error, RETURN_MASK_ALL)
1465 {
1466 btrace_finalize_ftrace (tp, gaps);
1467
1468 throw_exception (error);
1469 }
1470 END_CATCH
1471
1472 btrace_finalize_ftrace (tp, gaps);
1473}
1474
1475/* Add an entry for the current PC. */
1476
1477static void
1478btrace_add_pc (struct thread_info *tp)
1479{
1480 struct btrace_data btrace;
1481 struct btrace_block *block;
1482 struct regcache *regcache;
1483 struct cleanup *cleanup;
1484 CORE_ADDR pc;
1485
1486 regcache = get_thread_regcache (tp->ptid);
1487 pc = regcache_read_pc (regcache);
1488
1489 btrace_data_init (&btrace);
1490 btrace.format = BTRACE_FORMAT_BTS;
1491 btrace.variant.bts.blocks = NULL;
1492
1493 cleanup = make_cleanup_btrace_data (&btrace);
1494
1495 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1496 block->begin = pc;
1497 block->end = pc;
1498
1499 btrace_compute_ftrace (tp, &btrace);
1500
1501 do_cleanups (cleanup);
1502}
1503
1504/* See btrace.h. */
1505
1506void
1507btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1508{
1509 if (tp->btrace.target != NULL)
1510 return;
1511
1512#if !defined (HAVE_LIBIPT)
1513 if (conf->format == BTRACE_FORMAT_PT)
1514 error (_("GDB does not support Intel Processor Trace."));
1515#endif /* !defined (HAVE_LIBIPT) */
1516
1517 if (!target_supports_btrace (conf->format))
1518 error (_("Target does not support branch tracing."));
1519
1520 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1521 target_pid_to_str (tp->ptid));
1522
1523 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1524
1525 /* We're done if we failed to enable tracing. */
1526 if (tp->btrace.target == NULL)
1527 return;
1528
1529 /* We need to undo the enable in case of errors. */
1530 TRY
1531 {
1532 /* Add an entry for the current PC so we start tracing from where we
1533 enabled it.
1534
1535 If we can't access TP's registers, TP is most likely running. In this
1536 case, we can't really say where tracing was enabled so it should be
1537 safe to simply skip this step.
1538
1539 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1540 start at the PC at which tracing was enabled. */
1541 if (conf->format != BTRACE_FORMAT_PT
1542 && can_access_registers_ptid (tp->ptid))
1543 btrace_add_pc (tp);
1544 }
1545 CATCH (exception, RETURN_MASK_ALL)
1546 {
1547 btrace_disable (tp);
1548
1549 throw_exception (exception);
1550 }
1551 END_CATCH
1552}
1553
1554/* See btrace.h. */
1555
1556const struct btrace_config *
1557btrace_conf (const struct btrace_thread_info *btinfo)
1558{
1559 if (btinfo->target == NULL)
1560 return NULL;
1561
1562 return target_btrace_conf (btinfo->target);
1563}
1564
1565/* See btrace.h. */
1566
1567void
1568btrace_disable (struct thread_info *tp)
1569{
1570 struct btrace_thread_info *btp = &tp->btrace;
1571 int errcode = 0;
1572
1573 if (btp->target == NULL)
1574 return;
1575
1576 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1577 target_pid_to_str (tp->ptid));
1578
1579 target_disable_btrace (btp->target);
1580 btp->target = NULL;
1581
1582 btrace_clear (tp);
1583}
1584
1585/* See btrace.h. */
1586
1587void
1588btrace_teardown (struct thread_info *tp)
1589{
1590 struct btrace_thread_info *btp = &tp->btrace;
1591 int errcode = 0;
1592
1593 if (btp->target == NULL)
1594 return;
1595
1596 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1597 target_pid_to_str (tp->ptid));
1598
1599 target_teardown_btrace (btp->target);
1600 btp->target = NULL;
1601
1602 btrace_clear (tp);
1603}
1604
1605/* Stitch branch trace in BTS format. */
1606
1607static int
1608btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1609{
1610 struct btrace_thread_info *btinfo;
1611 struct btrace_function *last_bfun;
1612 struct btrace_insn *last_insn;
1613 btrace_block_s *first_new_block;
1614
1615 btinfo = &tp->btrace;
1616 gdb_assert (!btinfo->functions.empty ());
1617 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1618
1619 last_bfun = &btinfo->functions.back ();
1620
1621 /* If the existing trace ends with a gap, we just glue the traces
1622 together. We need to drop the last (i.e. chronologically first) block
1623 of the new trace, though, since we can't fill in the start address.*/
1624 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1625 {
1626 VEC_pop (btrace_block_s, btrace->blocks);
1627 return 0;
1628 }
1629
1630 /* Beware that block trace starts with the most recent block, so the
1631 chronologically first block in the new trace is the last block in
1632 the new trace's block vector. */
1633 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1634 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1635
1636 /* If the current PC at the end of the block is the same as in our current
1637 trace, there are two explanations:
1638 1. we executed the instruction and some branch brought us back.
1639 2. we have not made any progress.
1640 In the first case, the delta trace vector should contain at least two
1641 entries.
1642 In the second case, the delta trace vector should contain exactly one
1643 entry for the partial block containing the current PC. Remove it. */
1644 if (first_new_block->end == last_insn->pc
1645 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1646 {
1647 VEC_pop (btrace_block_s, btrace->blocks);
1648 return 0;
1649 }
1650
1651 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1652 core_addr_to_string_nz (first_new_block->end));
1653
1654 /* Do a simple sanity check to make sure we don't accidentally end up
1655 with a bad block. This should not occur in practice. */
1656 if (first_new_block->end < last_insn->pc)
1657 {
1658 warning (_("Error while trying to read delta trace. Falling back to "
1659 "a full read."));
1660 return -1;
1661 }
1662
1663 /* We adjust the last block to start at the end of our current trace. */
1664 gdb_assert (first_new_block->begin == 0);
1665 first_new_block->begin = last_insn->pc;
1666
1667 /* We simply pop the last insn so we can insert it again as part of
1668 the normal branch trace computation.
1669 Since instruction iterators are based on indices in the instructions
1670 vector, we don't leave any pointers dangling. */
1671 DEBUG ("pruning insn at %s for stitching",
1672 ftrace_print_insn_addr (last_insn));
1673
1674 VEC_pop (btrace_insn_s, last_bfun->insn);
1675
1676 /* The instructions vector may become empty temporarily if this has
1677 been the only instruction in this function segment.
1678 This violates the invariant but will be remedied shortly by
1679 btrace_compute_ftrace when we add the new trace. */
1680
1681 /* The only case where this would hurt is if the entire trace consisted
1682 of just that one instruction. If we remove it, we might turn the now
1683 empty btrace function segment into a gap. But we don't want gaps at
1684 the beginning. To avoid this, we remove the entire old trace. */
1685 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
1686 btrace_clear (tp);
1687
1688 return 0;
1689}
1690
1691/* Adjust the block trace in order to stitch old and new trace together.
1692 BTRACE is the new delta trace between the last and the current stop.
1693 TP is the traced thread.
1694 May modifx BTRACE as well as the existing trace in TP.
1695 Return 0 on success, -1 otherwise. */
1696
1697static int
1698btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1699{
1700 /* If we don't have trace, there's nothing to do. */
1701 if (btrace_data_empty (btrace))
1702 return 0;
1703
1704 switch (btrace->format)
1705 {
1706 case BTRACE_FORMAT_NONE:
1707 return 0;
1708
1709 case BTRACE_FORMAT_BTS:
1710 return btrace_stitch_bts (&btrace->variant.bts, tp);
1711
1712 case BTRACE_FORMAT_PT:
1713 /* Delta reads are not supported. */
1714 return -1;
1715 }
1716
1717 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1718}
1719
1720/* Clear the branch trace histories in BTINFO. */
1721
1722static void
1723btrace_clear_history (struct btrace_thread_info *btinfo)
1724{
1725 xfree (btinfo->insn_history);
1726 xfree (btinfo->call_history);
1727 xfree (btinfo->replay);
1728
1729 btinfo->insn_history = NULL;
1730 btinfo->call_history = NULL;
1731 btinfo->replay = NULL;
1732}
1733
1734/* Clear the branch trace maintenance histories in BTINFO. */
1735
1736static void
1737btrace_maint_clear (struct btrace_thread_info *btinfo)
1738{
1739 switch (btinfo->data.format)
1740 {
1741 default:
1742 break;
1743
1744 case BTRACE_FORMAT_BTS:
1745 btinfo->maint.variant.bts.packet_history.begin = 0;
1746 btinfo->maint.variant.bts.packet_history.end = 0;
1747 break;
1748
1749#if defined (HAVE_LIBIPT)
1750 case BTRACE_FORMAT_PT:
1751 xfree (btinfo->maint.variant.pt.packets);
1752
1753 btinfo->maint.variant.pt.packets = NULL;
1754 btinfo->maint.variant.pt.packet_history.begin = 0;
1755 btinfo->maint.variant.pt.packet_history.end = 0;
1756 break;
1757#endif /* defined (HAVE_LIBIPT) */
1758 }
1759}
1760
1761/* See btrace.h. */
1762
1763const char *
1764btrace_decode_error (enum btrace_format format, int errcode)
1765{
1766 switch (format)
1767 {
1768 case BTRACE_FORMAT_BTS:
1769 switch (errcode)
1770 {
1771 case BDE_BTS_OVERFLOW:
1772 return _("instruction overflow");
1773
1774 case BDE_BTS_INSN_SIZE:
1775 return _("unknown instruction");
1776
1777 default:
1778 break;
1779 }
1780 break;
1781
1782#if defined (HAVE_LIBIPT)
1783 case BTRACE_FORMAT_PT:
1784 switch (errcode)
1785 {
1786 case BDE_PT_USER_QUIT:
1787 return _("trace decode cancelled");
1788
1789 case BDE_PT_DISABLED:
1790 return _("disabled");
1791
1792 case BDE_PT_OVERFLOW:
1793 return _("overflow");
1794
1795 default:
1796 if (errcode < 0)
1797 return pt_errstr (pt_errcode (errcode));
1798 break;
1799 }
1800 break;
1801#endif /* defined (HAVE_LIBIPT) */
1802
1803 default:
1804 break;
1805 }
1806
1807 return _("unknown");
1808}
1809
1810/* See btrace.h. */
1811
1812void
1813btrace_fetch (struct thread_info *tp)
1814{
1815 struct btrace_thread_info *btinfo;
1816 struct btrace_target_info *tinfo;
1817 struct btrace_data btrace;
1818 struct cleanup *cleanup;
1819 int errcode;
1820
1821 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1822 target_pid_to_str (tp->ptid));
1823
1824 btinfo = &tp->btrace;
1825 tinfo = btinfo->target;
1826 if (tinfo == NULL)
1827 return;
1828
1829 /* There's no way we could get new trace while replaying.
1830 On the other hand, delta trace would return a partial record with the
1831 current PC, which is the replay PC, not the last PC, as expected. */
1832 if (btinfo->replay != NULL)
1833 return;
1834
1835 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1836 can store a gdb.Record object in Python referring to a different thread
1837 than the current one, temporarily set INFERIOR_PTID. */
1838 cleanup = save_inferior_ptid ();
1839 inferior_ptid = tp->ptid;
1840
1841 /* We should not be called on running or exited threads. */
1842 gdb_assert (can_access_registers_ptid (tp->ptid));
1843
1844 btrace_data_init (&btrace);
1845 make_cleanup_btrace_data (&btrace);
1846
1847 /* Let's first try to extend the trace we already have. */
1848 if (!btinfo->functions.empty ())
1849 {
1850 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1851 if (errcode == 0)
1852 {
1853 /* Success. Let's try to stitch the traces together. */
1854 errcode = btrace_stitch_trace (&btrace, tp);
1855 }
1856 else
1857 {
1858 /* We failed to read delta trace. Let's try to read new trace. */
1859 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1860
1861 /* If we got any new trace, discard what we have. */
1862 if (errcode == 0 && !btrace_data_empty (&btrace))
1863 btrace_clear (tp);
1864 }
1865
1866 /* If we were not able to read the trace, we start over. */
1867 if (errcode != 0)
1868 {
1869 btrace_clear (tp);
1870 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1871 }
1872 }
1873 else
1874 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1875
1876 /* If we were not able to read the branch trace, signal an error. */
1877 if (errcode != 0)
1878 error (_("Failed to read branch trace."));
1879
1880 /* Compute the trace, provided we have any. */
1881 if (!btrace_data_empty (&btrace))
1882 {
1883 /* Store the raw trace data. The stored data will be cleared in
1884 btrace_clear, so we always append the new trace. */
1885 btrace_data_append (&btinfo->data, &btrace);
1886 btrace_maint_clear (btinfo);
1887
1888 btrace_clear_history (btinfo);
1889 btrace_compute_ftrace (tp, &btrace);
1890 }
1891
1892 do_cleanups (cleanup);
1893}
1894
1895/* See btrace.h. */
1896
1897void
1898btrace_clear (struct thread_info *tp)
1899{
1900 struct btrace_thread_info *btinfo;
1901
1902 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1903 target_pid_to_str (tp->ptid));
1904
1905 /* Make sure btrace frames that may hold a pointer into the branch
1906 trace data are destroyed. */
1907 reinit_frame_cache ();
1908
1909 btinfo = &tp->btrace;
1910 for (auto &bfun : btinfo->functions)
1911 VEC_free (btrace_insn_s, bfun.insn);
1912
1913 btinfo->functions.clear ();
1914 btinfo->ngaps = 0;
1915
1916 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1917 btrace_maint_clear (btinfo);
1918 btrace_data_clear (&btinfo->data);
1919 btrace_clear_history (btinfo);
1920}
1921
1922/* See btrace.h. */
1923
1924void
1925btrace_free_objfile (struct objfile *objfile)
1926{
1927 struct thread_info *tp;
1928
1929 DEBUG ("free objfile");
1930
1931 ALL_NON_EXITED_THREADS (tp)
1932 btrace_clear (tp);
1933}
1934
1935#if defined (HAVE_LIBEXPAT)
1936
1937/* Check the btrace document version. */
1938
1939static void
1940check_xml_btrace_version (struct gdb_xml_parser *parser,
1941 const struct gdb_xml_element *element,
1942 void *user_data, VEC (gdb_xml_value_s) *attributes)
1943{
1944 const char *version
1945 = (const char *) xml_find_attribute (attributes, "version")->value;
1946
1947 if (strcmp (version, "1.0") != 0)
1948 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1949}
1950
1951/* Parse a btrace "block" xml record. */
1952
1953static void
1954parse_xml_btrace_block (struct gdb_xml_parser *parser,
1955 const struct gdb_xml_element *element,
1956 void *user_data, VEC (gdb_xml_value_s) *attributes)
1957{
1958 struct btrace_data *btrace;
1959 struct btrace_block *block;
1960 ULONGEST *begin, *end;
1961
1962 btrace = (struct btrace_data *) user_data;
1963
1964 switch (btrace->format)
1965 {
1966 case BTRACE_FORMAT_BTS:
1967 break;
1968
1969 case BTRACE_FORMAT_NONE:
1970 btrace->format = BTRACE_FORMAT_BTS;
1971 btrace->variant.bts.blocks = NULL;
1972 break;
1973
1974 default:
1975 gdb_xml_error (parser, _("Btrace format error."));
1976 }
1977
1978 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1979 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1980
1981 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1982 block->begin = *begin;
1983 block->end = *end;
1984}
1985
1986/* Parse a "raw" xml record. */
1987
1988static void
1989parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1990 gdb_byte **pdata, size_t *psize)
1991{
1992 struct cleanup *cleanup;
1993 gdb_byte *data, *bin;
1994 size_t len, size;
1995
1996 len = strlen (body_text);
1997 if (len % 2 != 0)
1998 gdb_xml_error (parser, _("Bad raw data size."));
1999
2000 size = len / 2;
2001
2002 bin = data = (gdb_byte *) xmalloc (size);
2003 cleanup = make_cleanup (xfree, data);
2004
2005 /* We use hex encoding - see common/rsp-low.h. */
2006 while (len > 0)
2007 {
2008 char hi, lo;
2009
2010 hi = *body_text++;
2011 lo = *body_text++;
2012
2013 if (hi == 0 || lo == 0)
2014 gdb_xml_error (parser, _("Bad hex encoding."));
2015
2016 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2017 len -= 2;
2018 }
2019
2020 discard_cleanups (cleanup);
2021
2022 *pdata = data;
2023 *psize = size;
2024}
2025
2026/* Parse a btrace pt-config "cpu" xml record. */
2027
2028static void
2029parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2030 const struct gdb_xml_element *element,
2031 void *user_data,
2032 VEC (gdb_xml_value_s) *attributes)
2033{
2034 struct btrace_data *btrace;
2035 const char *vendor;
2036 ULONGEST *family, *model, *stepping;
2037
2038 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2039 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2040 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2041 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2042
2043 btrace = (struct btrace_data *) user_data;
2044
2045 if (strcmp (vendor, "GenuineIntel") == 0)
2046 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2047
2048 btrace->variant.pt.config.cpu.family = *family;
2049 btrace->variant.pt.config.cpu.model = *model;
2050 btrace->variant.pt.config.cpu.stepping = *stepping;
2051}
2052
2053/* Parse a btrace pt "raw" xml record. */
2054
2055static void
2056parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2057 const struct gdb_xml_element *element,
2058 void *user_data, const char *body_text)
2059{
2060 struct btrace_data *btrace;
2061
2062 btrace = (struct btrace_data *) user_data;
2063 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2064 &btrace->variant.pt.size);
2065}
2066
2067/* Parse a btrace "pt" xml record. */
2068
2069static void
2070parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2071 const struct gdb_xml_element *element,
2072 void *user_data, VEC (gdb_xml_value_s) *attributes)
2073{
2074 struct btrace_data *btrace;
2075
2076 btrace = (struct btrace_data *) user_data;
2077 btrace->format = BTRACE_FORMAT_PT;
2078 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2079 btrace->variant.pt.data = NULL;
2080 btrace->variant.pt.size = 0;
2081}
2082
2083static const struct gdb_xml_attribute block_attributes[] = {
2084 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2085 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2086 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2087};
2088
2089static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2090 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2091 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2092 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2093 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2094 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2095};
2096
2097static const struct gdb_xml_element btrace_pt_config_children[] = {
2098 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2099 parse_xml_btrace_pt_config_cpu, NULL },
2100 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2101};
2102
2103static const struct gdb_xml_element btrace_pt_children[] = {
2104 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2105 NULL },
2106 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2107 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2108};
2109
2110static const struct gdb_xml_attribute btrace_attributes[] = {
2111 { "version", GDB_XML_AF_NONE, NULL, NULL },
2112 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2113};
2114
2115static const struct gdb_xml_element btrace_children[] = {
2116 { "block", block_attributes, NULL,
2117 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2118 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2119 NULL },
2120 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2121};
2122
2123static const struct gdb_xml_element btrace_elements[] = {
2124 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2125 check_xml_btrace_version, NULL },
2126 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2127};
2128
2129#endif /* defined (HAVE_LIBEXPAT) */
2130
2131/* See btrace.h. */
2132
2133void
2134parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2135{
2136 struct cleanup *cleanup;
2137 int errcode;
2138
2139#if defined (HAVE_LIBEXPAT)
2140
2141 btrace->format = BTRACE_FORMAT_NONE;
2142
2143 cleanup = make_cleanup_btrace_data (btrace);
2144 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2145 buffer, btrace);
2146 if (errcode != 0)
2147 error (_("Error parsing branch trace."));
2148
2149 /* Keep parse results. */
2150 discard_cleanups (cleanup);
2151
2152#else /* !defined (HAVE_LIBEXPAT) */
2153
2154 error (_("Cannot process branch trace. XML parsing is not supported."));
2155
2156#endif /* !defined (HAVE_LIBEXPAT) */
2157}
2158
2159#if defined (HAVE_LIBEXPAT)
2160
2161/* Parse a btrace-conf "bts" xml record. */
2162
2163static void
2164parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2165 const struct gdb_xml_element *element,
2166 void *user_data, VEC (gdb_xml_value_s) *attributes)
2167{
2168 struct btrace_config *conf;
2169 struct gdb_xml_value *size;
2170
2171 conf = (struct btrace_config *) user_data;
2172 conf->format = BTRACE_FORMAT_BTS;
2173 conf->bts.size = 0;
2174
2175 size = xml_find_attribute (attributes, "size");
2176 if (size != NULL)
2177 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2178}
2179
2180/* Parse a btrace-conf "pt" xml record. */
2181
2182static void
2183parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2184 const struct gdb_xml_element *element,
2185 void *user_data, VEC (gdb_xml_value_s) *attributes)
2186{
2187 struct btrace_config *conf;
2188 struct gdb_xml_value *size;
2189
2190 conf = (struct btrace_config *) user_data;
2191 conf->format = BTRACE_FORMAT_PT;
2192 conf->pt.size = 0;
2193
2194 size = xml_find_attribute (attributes, "size");
2195 if (size != NULL)
2196 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2197}
2198
2199static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2200 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2201 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2202};
2203
2204static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2205 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2206 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2207};
2208
2209static const struct gdb_xml_element btrace_conf_children[] = {
2210 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2211 parse_xml_btrace_conf_bts, NULL },
2212 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2213 parse_xml_btrace_conf_pt, NULL },
2214 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2215};
2216
2217static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2218 { "version", GDB_XML_AF_NONE, NULL, NULL },
2219 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2220};
2221
2222static const struct gdb_xml_element btrace_conf_elements[] = {
2223 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2224 GDB_XML_EF_NONE, NULL, NULL },
2225 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2226};
2227
2228#endif /* defined (HAVE_LIBEXPAT) */
2229
2230/* See btrace.h. */
2231
2232void
2233parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2234{
2235 int errcode;
2236
2237#if defined (HAVE_LIBEXPAT)
2238
2239 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2240 btrace_conf_elements, xml, conf);
2241 if (errcode != 0)
2242 error (_("Error parsing branch trace configuration."));
2243
2244#else /* !defined (HAVE_LIBEXPAT) */
2245
2246 error (_("XML parsing is not supported."));
2247
2248#endif /* !defined (HAVE_LIBEXPAT) */
2249}
2250
2251/* See btrace.h. */
2252
2253const struct btrace_insn *
2254btrace_insn_get (const struct btrace_insn_iterator *it)
2255{
2256 const struct btrace_function *bfun;
2257 unsigned int index, end;
2258
2259 index = it->insn_index;
2260 bfun = &it->btinfo->functions[it->call_index];
2261
2262 /* Check if the iterator points to a gap in the trace. */
2263 if (bfun->errcode != 0)
2264 return NULL;
2265
2266 /* The index is within the bounds of this function's instruction vector. */
2267 end = VEC_length (btrace_insn_s, bfun->insn);
2268 gdb_assert (0 < end);
2269 gdb_assert (index < end);
2270
2271 return VEC_index (btrace_insn_s, bfun->insn, index);
2272}
2273
2274/* See btrace.h. */
2275
2276int
2277btrace_insn_get_error (const struct btrace_insn_iterator *it)
2278{
2279 return it->btinfo->functions[it->call_index].errcode;
2280}
2281
2282/* See btrace.h. */
2283
2284unsigned int
2285btrace_insn_number (const struct btrace_insn_iterator *it)
2286{
2287 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2288}
2289
2290/* See btrace.h. */
2291
2292void
2293btrace_insn_begin (struct btrace_insn_iterator *it,
2294 const struct btrace_thread_info *btinfo)
2295{
2296 if (btinfo->functions.empty ())
2297 error (_("No trace."));
2298
2299 it->btinfo = btinfo;
2300 it->call_index = 0;
2301 it->insn_index = 0;
2302}
2303
2304/* See btrace.h. */
2305
2306void
2307btrace_insn_end (struct btrace_insn_iterator *it,
2308 const struct btrace_thread_info *btinfo)
2309{
2310 const struct btrace_function *bfun;
2311 unsigned int length;
2312
2313 if (btinfo->functions.empty ())
2314 error (_("No trace."));
2315
2316 bfun = &btinfo->functions.back ();
2317 length = VEC_length (btrace_insn_s, bfun->insn);
2318
2319 /* The last function may either be a gap or it contains the current
2320 instruction, which is one past the end of the execution trace; ignore
2321 it. */
2322 if (length > 0)
2323 length -= 1;
2324
2325 it->btinfo = btinfo;
2326 it->call_index = bfun->number - 1;
2327 it->insn_index = length;
2328}
2329
2330/* See btrace.h. */
2331
2332unsigned int
2333btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2334{
2335 const struct btrace_function *bfun;
2336 unsigned int index, steps;
2337
2338 bfun = &it->btinfo->functions[it->call_index];
2339 steps = 0;
2340 index = it->insn_index;
2341
2342 while (stride != 0)
2343 {
2344 unsigned int end, space, adv;
2345
2346 end = VEC_length (btrace_insn_s, bfun->insn);
2347
2348 /* An empty function segment represents a gap in the trace. We count
2349 it as one instruction. */
2350 if (end == 0)
2351 {
2352 const struct btrace_function *next;
2353
2354 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2355 if (next == NULL)
2356 break;
2357
2358 stride -= 1;
2359 steps += 1;
2360
2361 bfun = next;
2362 index = 0;
2363
2364 continue;
2365 }
2366
2367 gdb_assert (0 < end);
2368 gdb_assert (index < end);
2369
2370 /* Compute the number of instructions remaining in this segment. */
2371 space = end - index;
2372
2373 /* Advance the iterator as far as possible within this segment. */
2374 adv = std::min (space, stride);
2375 stride -= adv;
2376 index += adv;
2377 steps += adv;
2378
2379 /* Move to the next function if we're at the end of this one. */
2380 if (index == end)
2381 {
2382 const struct btrace_function *next;
2383
2384 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2385 if (next == NULL)
2386 {
2387 /* We stepped past the last function.
2388
2389 Let's adjust the index to point to the last instruction in
2390 the previous function. */
2391 index -= 1;
2392 steps -= 1;
2393 break;
2394 }
2395
2396 /* We now point to the first instruction in the new function. */
2397 bfun = next;
2398 index = 0;
2399 }
2400
2401 /* We did make progress. */
2402 gdb_assert (adv > 0);
2403 }
2404
2405 /* Update the iterator. */
2406 it->call_index = bfun->number - 1;
2407 it->insn_index = index;
2408
2409 return steps;
2410}
2411
2412/* See btrace.h. */
2413
2414unsigned int
2415btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2416{
2417 const struct btrace_function *bfun;
2418 unsigned int index, steps;
2419
2420 bfun = &it->btinfo->functions[it->call_index];
2421 steps = 0;
2422 index = it->insn_index;
2423
2424 while (stride != 0)
2425 {
2426 unsigned int adv;
2427
2428 /* Move to the previous function if we're at the start of this one. */
2429 if (index == 0)
2430 {
2431 const struct btrace_function *prev;
2432
2433 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2434 if (prev == NULL)
2435 break;
2436
2437 /* We point to one after the last instruction in the new function. */
2438 bfun = prev;
2439 index = VEC_length (btrace_insn_s, bfun->insn);
2440
2441 /* An empty function segment represents a gap in the trace. We count
2442 it as one instruction. */
2443 if (index == 0)
2444 {
2445 stride -= 1;
2446 steps += 1;
2447
2448 continue;
2449 }
2450 }
2451
2452 /* Advance the iterator as far as possible within this segment. */
2453 adv = std::min (index, stride);
2454
2455 stride -= adv;
2456 index -= adv;
2457 steps += adv;
2458
2459 /* We did make progress. */
2460 gdb_assert (adv > 0);
2461 }
2462
2463 /* Update the iterator. */
2464 it->call_index = bfun->number - 1;
2465 it->insn_index = index;
2466
2467 return steps;
2468}
2469
2470/* See btrace.h. */
2471
2472int
2473btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2474 const struct btrace_insn_iterator *rhs)
2475{
2476 gdb_assert (lhs->btinfo == rhs->btinfo);
2477
2478 if (lhs->call_index != rhs->call_index)
2479 return lhs->call_index - rhs->call_index;
2480
2481 return lhs->insn_index - rhs->insn_index;
2482}
2483
2484/* See btrace.h. */
2485
2486int
2487btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2488 const struct btrace_thread_info *btinfo,
2489 unsigned int number)
2490{
2491 const struct btrace_function *bfun;
2492 unsigned int upper, lower;
2493
2494 if (btinfo->functions.empty ())
2495 return 0;
2496
2497 lower = 0;
2498 bfun = &btinfo->functions[lower];
2499 if (number < bfun->insn_offset)
2500 return 0;
2501
2502 upper = btinfo->functions.size () - 1;
2503 bfun = &btinfo->functions[upper];
2504 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2505 return 0;
2506
2507 /* We assume that there are no holes in the numbering. */
2508 for (;;)
2509 {
2510 const unsigned int average = lower + (upper - lower) / 2;
2511
2512 bfun = &btinfo->functions[average];
2513
2514 if (number < bfun->insn_offset)
2515 {
2516 upper = average - 1;
2517 continue;
2518 }
2519
2520 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2521 {
2522 lower = average + 1;
2523 continue;
2524 }
2525
2526 break;
2527 }
2528
2529 it->btinfo = btinfo;
2530 it->call_index = bfun->number - 1;
2531 it->insn_index = number - bfun->insn_offset;
2532 return 1;
2533}
2534
2535/* Returns true if the recording ends with a function segment that
2536 contains only a single (i.e. the current) instruction. */
2537
2538static bool
2539btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2540{
2541 const btrace_function *bfun;
2542
2543 if (btinfo->functions.empty ())
2544 return false;
2545
2546 bfun = &btinfo->functions.back ();
2547 if (bfun->errcode != 0)
2548 return false;
2549
2550 return ftrace_call_num_insn (bfun) == 1;
2551}
2552
2553/* See btrace.h. */
2554
2555const struct btrace_function *
2556btrace_call_get (const struct btrace_call_iterator *it)
2557{
2558 if (it->index >= it->btinfo->functions.size ())
2559 return NULL;
2560
2561 return &it->btinfo->functions[it->index];
2562}
2563
2564/* See btrace.h. */
2565
2566unsigned int
2567btrace_call_number (const struct btrace_call_iterator *it)
2568{
2569 const unsigned int length = it->btinfo->functions.size ();
2570
2571 /* If the last function segment contains only a single instruction (i.e. the
2572 current instruction), skip it. */
2573 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2574 return length;
2575
2576 return it->index + 1;
2577}
2578
2579/* See btrace.h. */
2580
2581void
2582btrace_call_begin (struct btrace_call_iterator *it,
2583 const struct btrace_thread_info *btinfo)
2584{
2585 if (btinfo->functions.empty ())
2586 error (_("No trace."));
2587
2588 it->btinfo = btinfo;
2589 it->index = 0;
2590}
2591
2592/* See btrace.h. */
2593
2594void
2595btrace_call_end (struct btrace_call_iterator *it,
2596 const struct btrace_thread_info *btinfo)
2597{
2598 if (btinfo->functions.empty ())
2599 error (_("No trace."));
2600
2601 it->btinfo = btinfo;
2602 it->index = btinfo->functions.size ();
2603}
2604
2605/* See btrace.h. */
2606
2607unsigned int
2608btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2609{
2610 const unsigned int length = it->btinfo->functions.size ();
2611
2612 if (it->index + stride < length - 1)
2613 /* Default case: Simply advance the iterator. */
2614 it->index += stride;
2615 else if (it->index + stride == length - 1)
2616 {
2617 /* We land exactly at the last function segment. If it contains only one
2618 instruction (i.e. the current instruction) it is not actually part of
2619 the trace. */
2620 if (btrace_ends_with_single_insn (it->btinfo))
2621 it->index = length;
2622 else
2623 it->index = length - 1;
2624 }
2625 else
2626 {
2627 /* We land past the last function segment and have to adjust the stride.
2628 If the last function segment contains only one instruction (i.e. the
2629 current instruction) it is not actually part of the trace. */
2630 if (btrace_ends_with_single_insn (it->btinfo))
2631 stride = length - it->index - 1;
2632 else
2633 stride = length - it->index;
2634
2635 it->index = length;
2636 }
2637
2638 return stride;
2639}
2640
2641/* See btrace.h. */
2642
2643unsigned int
2644btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2645{
2646 const unsigned int length = it->btinfo->functions.size ();
2647 int steps = 0;
2648
2649 gdb_assert (it->index <= length);
2650
2651 if (stride == 0 || it->index == 0)
2652 return 0;
2653
2654 /* If we are at the end, the first step is a special case. If the last
2655 function segment contains only one instruction (i.e. the current
2656 instruction) it is not actually part of the trace. To be able to step
2657 over this instruction, we need at least one more function segment. */
2658 if ((it->index == length) && (length > 1))
2659 {
2660 if (btrace_ends_with_single_insn (it->btinfo))
2661 it->index = length - 2;
2662 else
2663 it->index = length - 1;
2664
2665 steps = 1;
2666 stride -= 1;
2667 }
2668
2669 stride = std::min (stride, it->index);
2670
2671 it->index -= stride;
2672 return steps + stride;
2673}
2674
2675/* See btrace.h. */
2676
2677int
2678btrace_call_cmp (const struct btrace_call_iterator *lhs,
2679 const struct btrace_call_iterator *rhs)
2680{
2681 gdb_assert (lhs->btinfo == rhs->btinfo);
2682 return (int) (lhs->index - rhs->index);
2683}
2684
2685/* See btrace.h. */
2686
2687int
2688btrace_find_call_by_number (struct btrace_call_iterator *it,
2689 const struct btrace_thread_info *btinfo,
2690 unsigned int number)
2691{
2692 const unsigned int length = btinfo->functions.size ();
2693
2694 if ((number == 0) || (number > length))
2695 return 0;
2696
2697 it->btinfo = btinfo;
2698 it->index = number - 1;
2699 return 1;
2700}
2701
2702/* See btrace.h. */
2703
2704void
2705btrace_set_insn_history (struct btrace_thread_info *btinfo,
2706 const struct btrace_insn_iterator *begin,
2707 const struct btrace_insn_iterator *end)
2708{
2709 if (btinfo->insn_history == NULL)
2710 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2711
2712 btinfo->insn_history->begin = *begin;
2713 btinfo->insn_history->end = *end;
2714}
2715
2716/* See btrace.h. */
2717
2718void
2719btrace_set_call_history (struct btrace_thread_info *btinfo,
2720 const struct btrace_call_iterator *begin,
2721 const struct btrace_call_iterator *end)
2722{
2723 gdb_assert (begin->btinfo == end->btinfo);
2724
2725 if (btinfo->call_history == NULL)
2726 btinfo->call_history = XCNEW (struct btrace_call_history);
2727
2728 btinfo->call_history->begin = *begin;
2729 btinfo->call_history->end = *end;
2730}
2731
2732/* See btrace.h. */
2733
2734int
2735btrace_is_replaying (struct thread_info *tp)
2736{
2737 return tp->btrace.replay != NULL;
2738}
2739
2740/* See btrace.h. */
2741
2742int
2743btrace_is_empty (struct thread_info *tp)
2744{
2745 struct btrace_insn_iterator begin, end;
2746 struct btrace_thread_info *btinfo;
2747
2748 btinfo = &tp->btrace;
2749
2750 if (btinfo->functions.empty ())
2751 return 1;
2752
2753 btrace_insn_begin (&begin, btinfo);
2754 btrace_insn_end (&end, btinfo);
2755
2756 return btrace_insn_cmp (&begin, &end) == 0;
2757}
2758
2759/* Forward the cleanup request. */
2760
2761static void
2762do_btrace_data_cleanup (void *arg)
2763{
2764 btrace_data_fini ((struct btrace_data *) arg);
2765}
2766
2767/* See btrace.h. */
2768
2769struct cleanup *
2770make_cleanup_btrace_data (struct btrace_data *data)
2771{
2772 return make_cleanup (do_btrace_data_cleanup, data);
2773}
2774
2775#if defined (HAVE_LIBIPT)
2776
2777/* Print a single packet. */
2778
2779static void
2780pt_print_packet (const struct pt_packet *packet)
2781{
2782 switch (packet->type)
2783 {
2784 default:
2785 printf_unfiltered (("[??: %x]"), packet->type);
2786 break;
2787
2788 case ppt_psb:
2789 printf_unfiltered (("psb"));
2790 break;
2791
2792 case ppt_psbend:
2793 printf_unfiltered (("psbend"));
2794 break;
2795
2796 case ppt_pad:
2797 printf_unfiltered (("pad"));
2798 break;
2799
2800 case ppt_tip:
2801 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2802 packet->payload.ip.ipc,
2803 packet->payload.ip.ip);
2804 break;
2805
2806 case ppt_tip_pge:
2807 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2808 packet->payload.ip.ipc,
2809 packet->payload.ip.ip);
2810 break;
2811
2812 case ppt_tip_pgd:
2813 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2814 packet->payload.ip.ipc,
2815 packet->payload.ip.ip);
2816 break;
2817
2818 case ppt_fup:
2819 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2820 packet->payload.ip.ipc,
2821 packet->payload.ip.ip);
2822 break;
2823
2824 case ppt_tnt_8:
2825 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2826 packet->payload.tnt.bit_size,
2827 packet->payload.tnt.payload);
2828 break;
2829
2830 case ppt_tnt_64:
2831 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2832 packet->payload.tnt.bit_size,
2833 packet->payload.tnt.payload);
2834 break;
2835
2836 case ppt_pip:
2837 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2838 packet->payload.pip.nr ? (" nr") : (""));
2839 break;
2840
2841 case ppt_tsc:
2842 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2843 break;
2844
2845 case ppt_cbr:
2846 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2847 break;
2848
2849 case ppt_mode:
2850 switch (packet->payload.mode.leaf)
2851 {
2852 default:
2853 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2854 break;
2855
2856 case pt_mol_exec:
2857 printf_unfiltered (("mode.exec%s%s"),
2858 packet->payload.mode.bits.exec.csl
2859 ? (" cs.l") : (""),
2860 packet->payload.mode.bits.exec.csd
2861 ? (" cs.d") : (""));
2862 break;
2863
2864 case pt_mol_tsx:
2865 printf_unfiltered (("mode.tsx%s%s"),
2866 packet->payload.mode.bits.tsx.intx
2867 ? (" intx") : (""),
2868 packet->payload.mode.bits.tsx.abrt
2869 ? (" abrt") : (""));
2870 break;
2871 }
2872 break;
2873
2874 case ppt_ovf:
2875 printf_unfiltered (("ovf"));
2876 break;
2877
2878 case ppt_stop:
2879 printf_unfiltered (("stop"));
2880 break;
2881
2882 case ppt_vmcs:
2883 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2884 break;
2885
2886 case ppt_tma:
2887 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2888 packet->payload.tma.fc);
2889 break;
2890
2891 case ppt_mtc:
2892 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2893 break;
2894
2895 case ppt_cyc:
2896 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2897 break;
2898
2899 case ppt_mnt:
2900 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2901 break;
2902 }
2903}
2904
2905/* Decode packets into MAINT using DECODER. */
2906
2907static void
2908btrace_maint_decode_pt (struct btrace_maint_info *maint,
2909 struct pt_packet_decoder *decoder)
2910{
2911 int errcode;
2912
2913 for (;;)
2914 {
2915 struct btrace_pt_packet packet;
2916
2917 errcode = pt_pkt_sync_forward (decoder);
2918 if (errcode < 0)
2919 break;
2920
2921 for (;;)
2922 {
2923 pt_pkt_get_offset (decoder, &packet.offset);
2924
2925 errcode = pt_pkt_next (decoder, &packet.packet,
2926 sizeof(packet.packet));
2927 if (errcode < 0)
2928 break;
2929
2930 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2931 {
2932 packet.errcode = pt_errcode (errcode);
2933 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2934 &packet);
2935 }
2936 }
2937
2938 if (errcode == -pte_eos)
2939 break;
2940
2941 packet.errcode = pt_errcode (errcode);
2942 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2943 &packet);
2944
2945 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2946 packet.offset, pt_errstr (packet.errcode));
2947 }
2948
2949 if (errcode != -pte_eos)
2950 warning (_("Failed to synchronize onto the Intel Processor Trace "
2951 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2952}
2953
2954/* Update the packet history in BTINFO. */
2955
2956static void
2957btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2958{
2959 volatile struct gdb_exception except;
2960 struct pt_packet_decoder *decoder;
2961 struct btrace_data_pt *pt;
2962 struct pt_config config;
2963 int errcode;
2964
2965 pt = &btinfo->data.variant.pt;
2966
2967 /* Nothing to do if there is no trace. */
2968 if (pt->size == 0)
2969 return;
2970
2971 memset (&config, 0, sizeof(config));
2972
2973 config.size = sizeof (config);
2974 config.begin = pt->data;
2975 config.end = pt->data + pt->size;
2976
2977 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2978 config.cpu.family = pt->config.cpu.family;
2979 config.cpu.model = pt->config.cpu.model;
2980 config.cpu.stepping = pt->config.cpu.stepping;
2981
2982 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2983 if (errcode < 0)
2984 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2985 pt_errstr (pt_errcode (errcode)));
2986
2987 decoder = pt_pkt_alloc_decoder (&config);
2988 if (decoder == NULL)
2989 error (_("Failed to allocate the Intel Processor Trace decoder."));
2990
2991 TRY
2992 {
2993 btrace_maint_decode_pt (&btinfo->maint, decoder);
2994 }
2995 CATCH (except, RETURN_MASK_ALL)
2996 {
2997 pt_pkt_free_decoder (decoder);
2998
2999 if (except.reason < 0)
3000 throw_exception (except);
3001 }
3002 END_CATCH
3003
3004 pt_pkt_free_decoder (decoder);
3005}
3006
3007#endif /* !defined (HAVE_LIBIPT) */
3008
3009/* Update the packet maintenance information for BTINFO and store the
3010 low and high bounds into BEGIN and END, respectively.
3011 Store the current iterator state into FROM and TO. */
3012
3013static void
3014btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3015 unsigned int *begin, unsigned int *end,
3016 unsigned int *from, unsigned int *to)
3017{
3018 switch (btinfo->data.format)
3019 {
3020 default:
3021 *begin = 0;
3022 *end = 0;
3023 *from = 0;
3024 *to = 0;
3025 break;
3026
3027 case BTRACE_FORMAT_BTS:
3028 /* Nothing to do - we operate directly on BTINFO->DATA. */
3029 *begin = 0;
3030 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3031 *from = btinfo->maint.variant.bts.packet_history.begin;
3032 *to = btinfo->maint.variant.bts.packet_history.end;
3033 break;
3034
3035#if defined (HAVE_LIBIPT)
3036 case BTRACE_FORMAT_PT:
3037 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3038 btrace_maint_update_pt_packets (btinfo);
3039
3040 *begin = 0;
3041 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3042 *from = btinfo->maint.variant.pt.packet_history.begin;
3043 *to = btinfo->maint.variant.pt.packet_history.end;
3044 break;
3045#endif /* defined (HAVE_LIBIPT) */
3046 }
3047}
3048
3049/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3050 update the current iterator position. */
3051
3052static void
3053btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3054 unsigned int begin, unsigned int end)
3055{
3056 switch (btinfo->data.format)
3057 {
3058 default:
3059 break;
3060
3061 case BTRACE_FORMAT_BTS:
3062 {
3063 VEC (btrace_block_s) *blocks;
3064 unsigned int blk;
3065
3066 blocks = btinfo->data.variant.bts.blocks;
3067 for (blk = begin; blk < end; ++blk)
3068 {
3069 const btrace_block_s *block;
3070
3071 block = VEC_index (btrace_block_s, blocks, blk);
3072
3073 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3074 core_addr_to_string_nz (block->begin),
3075 core_addr_to_string_nz (block->end));
3076 }
3077
3078 btinfo->maint.variant.bts.packet_history.begin = begin;
3079 btinfo->maint.variant.bts.packet_history.end = end;
3080 }
3081 break;
3082
3083#if defined (HAVE_LIBIPT)
3084 case BTRACE_FORMAT_PT:
3085 {
3086 VEC (btrace_pt_packet_s) *packets;
3087 unsigned int pkt;
3088
3089 packets = btinfo->maint.variant.pt.packets;
3090 for (pkt = begin; pkt < end; ++pkt)
3091 {
3092 const struct btrace_pt_packet *packet;
3093
3094 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3095
3096 printf_unfiltered ("%u\t", pkt);
3097 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3098
3099 if (packet->errcode == pte_ok)
3100 pt_print_packet (&packet->packet);
3101 else
3102 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3103
3104 printf_unfiltered ("\n");
3105 }
3106
3107 btinfo->maint.variant.pt.packet_history.begin = begin;
3108 btinfo->maint.variant.pt.packet_history.end = end;
3109 }
3110 break;
3111#endif /* defined (HAVE_LIBIPT) */
3112 }
3113}
3114
3115/* Read a number from an argument string. */
3116
3117static unsigned int
3118get_uint (char **arg)
3119{
3120 char *begin, *end, *pos;
3121 unsigned long number;
3122
3123 begin = *arg;
3124 pos = skip_spaces (begin);
3125
3126 if (!isdigit (*pos))
3127 error (_("Expected positive number, got: %s."), pos);
3128
3129 number = strtoul (pos, &end, 10);
3130 if (number > UINT_MAX)
3131 error (_("Number too big."));
3132
3133 *arg += (end - begin);
3134
3135 return (unsigned int) number;
3136}
3137
3138/* Read a context size from an argument string. */
3139
3140static int
3141get_context_size (char **arg)
3142{
3143 char *pos;
3144 int number;
3145
3146 pos = skip_spaces (*arg);
3147
3148 if (!isdigit (*pos))
3149 error (_("Expected positive number, got: %s."), pos);
3150
3151 return strtol (pos, arg, 10);
3152}
3153
3154/* Complain about junk at the end of an argument string. */
3155
3156static void
3157no_chunk (char *arg)
3158{
3159 if (*arg != 0)
3160 error (_("Junk after argument: %s."), arg);
3161}
3162
3163/* The "maintenance btrace packet-history" command. */
3164
3165static void
3166maint_btrace_packet_history_cmd (char *arg, int from_tty)
3167{
3168 struct btrace_thread_info *btinfo;
3169 struct thread_info *tp;
3170 unsigned int size, begin, end, from, to;
3171
3172 tp = find_thread_ptid (inferior_ptid);
3173 if (tp == NULL)
3174 error (_("No thread."));
3175
3176 size = 10;
3177 btinfo = &tp->btrace;
3178
3179 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3180 if (begin == end)
3181 {
3182 printf_unfiltered (_("No trace.\n"));
3183 return;
3184 }
3185
3186 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3187 {
3188 from = to;
3189
3190 if (end - from < size)
3191 size = end - from;
3192 to = from + size;
3193 }
3194 else if (strcmp (arg, "-") == 0)
3195 {
3196 to = from;
3197
3198 if (to - begin < size)
3199 size = to - begin;
3200 from = to - size;
3201 }
3202 else
3203 {
3204 from = get_uint (&arg);
3205 if (end <= from)
3206 error (_("'%u' is out of range."), from);
3207
3208 arg = skip_spaces (arg);
3209 if (*arg == ',')
3210 {
3211 arg = skip_spaces (++arg);
3212
3213 if (*arg == '+')
3214 {
3215 arg += 1;
3216 size = get_context_size (&arg);
3217
3218 no_chunk (arg);
3219
3220 if (end - from < size)
3221 size = end - from;
3222 to = from + size;
3223 }
3224 else if (*arg == '-')
3225 {
3226 arg += 1;
3227 size = get_context_size (&arg);
3228
3229 no_chunk (arg);
3230
3231 /* Include the packet given as first argument. */
3232 from += 1;
3233 to = from;
3234
3235 if (to - begin < size)
3236 size = to - begin;
3237 from = to - size;
3238 }
3239 else
3240 {
3241 to = get_uint (&arg);
3242
3243 /* Include the packet at the second argument and silently
3244 truncate the range. */
3245 if (to < end)
3246 to += 1;
3247 else
3248 to = end;
3249
3250 no_chunk (arg);
3251 }
3252 }
3253 else
3254 {
3255 no_chunk (arg);
3256
3257 if (end - from < size)
3258 size = end - from;
3259 to = from + size;
3260 }
3261
3262 dont_repeat ();
3263 }
3264
3265 btrace_maint_print_packets (btinfo, from, to);
3266}
3267
3268/* The "maintenance btrace clear-packet-history" command. */
3269
3270static void
3271maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3272{
3273 struct btrace_thread_info *btinfo;
3274 struct thread_info *tp;
3275
3276 if (args != NULL && *args != 0)
3277 error (_("Invalid argument."));
3278
3279 tp = find_thread_ptid (inferior_ptid);
3280 if (tp == NULL)
3281 error (_("No thread."));
3282
3283 btinfo = &tp->btrace;
3284
3285 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3286 btrace_maint_clear (btinfo);
3287 btrace_data_clear (&btinfo->data);
3288}
3289
3290/* The "maintenance btrace clear" command. */
3291
3292static void
3293maint_btrace_clear_cmd (char *args, int from_tty)
3294{
3295 struct btrace_thread_info *btinfo;
3296 struct thread_info *tp;
3297
3298 if (args != NULL && *args != 0)
3299 error (_("Invalid argument."));
3300
3301 tp = find_thread_ptid (inferior_ptid);
3302 if (tp == NULL)
3303 error (_("No thread."));
3304
3305 btrace_clear (tp);
3306}
3307
3308/* The "maintenance btrace" command. */
3309
3310static void
3311maint_btrace_cmd (char *args, int from_tty)
3312{
3313 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3314 gdb_stdout);
3315}
3316
3317/* The "maintenance set btrace" command. */
3318
3319static void
3320maint_btrace_set_cmd (char *args, int from_tty)
3321{
3322 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3323 gdb_stdout);
3324}
3325
3326/* The "maintenance show btrace" command. */
3327
3328static void
3329maint_btrace_show_cmd (char *args, int from_tty)
3330{
3331 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3332 all_commands, gdb_stdout);
3333}
3334
3335/* The "maintenance set btrace pt" command. */
3336
3337static void
3338maint_btrace_pt_set_cmd (char *args, int from_tty)
3339{
3340 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3341 all_commands, gdb_stdout);
3342}
3343
3344/* The "maintenance show btrace pt" command. */
3345
3346static void
3347maint_btrace_pt_show_cmd (char *args, int from_tty)
3348{
3349 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3350 all_commands, gdb_stdout);
3351}
3352
3353/* The "maintenance info btrace" command. */
3354
3355static void
3356maint_info_btrace_cmd (char *args, int from_tty)
3357{
3358 struct btrace_thread_info *btinfo;
3359 struct thread_info *tp;
3360 const struct btrace_config *conf;
3361
3362 if (args != NULL && *args != 0)
3363 error (_("Invalid argument."));
3364
3365 tp = find_thread_ptid (inferior_ptid);
3366 if (tp == NULL)
3367 error (_("No thread."));
3368
3369 btinfo = &tp->btrace;
3370
3371 conf = btrace_conf (btinfo);
3372 if (conf == NULL)
3373 error (_("No btrace configuration."));
3374
3375 printf_unfiltered (_("Format: %s.\n"),
3376 btrace_format_string (conf->format));
3377
3378 switch (conf->format)
3379 {
3380 default:
3381 break;
3382
3383 case BTRACE_FORMAT_BTS:
3384 printf_unfiltered (_("Number of packets: %u.\n"),
3385 VEC_length (btrace_block_s,
3386 btinfo->data.variant.bts.blocks));
3387 break;
3388
3389#if defined (HAVE_LIBIPT)
3390 case BTRACE_FORMAT_PT:
3391 {
3392 struct pt_version version;
3393
3394 version = pt_library_version ();
3395 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3396 version.minor, version.build,
3397 version.ext != NULL ? version.ext : "");
3398
3399 btrace_maint_update_pt_packets (btinfo);
3400 printf_unfiltered (_("Number of packets: %u.\n"),
3401 VEC_length (btrace_pt_packet_s,
3402 btinfo->maint.variant.pt.packets));
3403 }
3404 break;
3405#endif /* defined (HAVE_LIBIPT) */
3406 }
3407}
3408
3409/* The "maint show btrace pt skip-pad" show value function. */
3410
3411static void
3412show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3413 struct cmd_list_element *c,
3414 const char *value)
3415{
3416 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3417}
3418
3419
3420/* Initialize btrace maintenance commands. */
3421
3422void _initialize_btrace (void);
3423void
3424_initialize_btrace (void)
3425{
3426 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3427 _("Info about branch tracing data."), &maintenanceinfolist);
3428
3429 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3430 _("Branch tracing maintenance commands."),
3431 &maint_btrace_cmdlist, "maintenance btrace ",
3432 0, &maintenancelist);
3433
3434 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3435Set branch tracing specific variables."),
3436 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3437 0, &maintenance_set_cmdlist);
3438
3439 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3440Set Intel Processor Trace specific variables."),
3441 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3442 0, &maint_btrace_set_cmdlist);
3443
3444 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3445Show branch tracing specific variables."),
3446 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3447 0, &maintenance_show_cmdlist);
3448
3449 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3450Show Intel Processor Trace specific variables."),
3451 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3452 0, &maint_btrace_show_cmdlist);
3453
3454 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3455 &maint_btrace_pt_skip_pad, _("\
3456Set whether PAD packets should be skipped in the btrace packet history."), _("\
3457Show whether PAD packets should be skipped in the btrace packet history."),_("\
3458When enabled, PAD packets are ignored in the btrace packet history."),
3459 NULL, show_maint_btrace_pt_skip_pad,
3460 &maint_btrace_pt_set_cmdlist,
3461 &maint_btrace_pt_show_cmdlist);
3462
3463 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3464 _("Print the raw branch tracing data.\n\
3465With no argument, print ten more packets after the previous ten-line print.\n\
3466With '-' as argument print ten packets before a previous ten-line print.\n\
3467One argument specifies the starting packet of a ten-line print.\n\
3468Two arguments with comma between specify starting and ending packets to \
3469print.\n\
3470Preceded with '+'/'-' the second argument specifies the distance from the \
3471first.\n"),
3472 &maint_btrace_cmdlist);
3473
3474 add_cmd ("clear-packet-history", class_maintenance,
3475 maint_btrace_clear_packet_history_cmd,
3476 _("Clears the branch tracing packet history.\n\
3477Discards the raw branch tracing data but not the execution history data.\n\
3478"),
3479 &maint_btrace_cmdlist);
3480
3481 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3482 _("Clears the branch tracing data.\n\
3483Discards the raw branch tracing data and the execution history data.\n\
3484The next 'record' command will fetch the branch tracing data anew.\n\
3485"),
3486 &maint_btrace_cmdlist);
3487
3488}
This page took 0.035503 seconds and 4 git commands to generate.