57788ac405ec5dd2a07302112d58d8c8d881f40d
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
88 return MSYMBOL_PRINT_NAME (msym);
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157 }
158
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
161
162 static int
163 ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
166 {
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
202 return 0;
203 }
204
205 /* Allocate and initialize a new branch trace function segment.
206 BTINFO is the branch trace information for the current thread.
207 PREV is the chronologically preceding function segment.
208 MFUN and FUN are the symbol information we have for this function. */
209
210 static struct btrace_function *
211 ftrace_new_function (struct btrace_thread_info *btinfo,
212 struct btrace_function *prev,
213 struct minimal_symbol *mfun,
214 struct symbol *fun)
215 {
216 struct btrace_function *bfun;
217
218 bfun = XCNEW (struct btrace_function);
219
220 bfun->msym = mfun;
221 bfun->sym = fun;
222 bfun->flow.prev = prev;
223
224 if (prev == NULL)
225 {
226 /* Start counting at one. */
227 bfun->number = 1;
228 bfun->insn_offset = 1;
229 }
230 else
231 {
232 gdb_assert (prev->flow.next == NULL);
233 prev->flow.next = bfun;
234
235 bfun->number = prev->number + 1;
236 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
237 bfun->level = prev->level;
238 }
239
240 btinfo->functions.push_back (bfun);
241 return bfun;
242 }
243
244 /* Update the UP field of a function segment. */
245
246 static void
247 ftrace_update_caller (struct btrace_function *bfun,
248 struct btrace_function *caller,
249 enum btrace_function_flag flags)
250 {
251 if (bfun->up != NULL)
252 ftrace_debug (bfun, "updating caller");
253
254 bfun->up = caller;
255 bfun->flags = flags;
256
257 ftrace_debug (bfun, "set caller");
258 ftrace_debug (caller, "..to");
259 }
260
261 /* Fix up the caller for all segments of a function. */
262
263 static void
264 ftrace_fixup_caller (struct btrace_function *bfun,
265 struct btrace_function *caller,
266 enum btrace_function_flag flags)
267 {
268 struct btrace_function *prev, *next;
269
270 ftrace_update_caller (bfun, caller, flags);
271
272 /* Update all function segments belonging to the same function. */
273 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
274 ftrace_update_caller (prev, caller, flags);
275
276 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
277 ftrace_update_caller (next, caller, flags);
278 }
279
280 /* Add a new function segment for a call.
281 BTINFO is the branch trace information for the current thread.
282 CALLER is the chronologically preceding function segment.
283 MFUN and FUN are the symbol information we have for this function. */
284
285 static struct btrace_function *
286 ftrace_new_call (struct btrace_thread_info *btinfo,
287 struct btrace_function *caller,
288 struct minimal_symbol *mfun,
289 struct symbol *fun)
290 {
291 struct btrace_function *bfun;
292
293 bfun = ftrace_new_function (btinfo, caller, mfun, fun);
294 bfun->up = caller;
295 bfun->level += 1;
296
297 ftrace_debug (bfun, "new call");
298
299 return bfun;
300 }
301
302 /* Add a new function segment for a tail call.
303 BTINFO is the branch trace information for the current thread.
304 CALLER is the chronologically preceding function segment.
305 MFUN and FUN are the symbol information we have for this function. */
306
307 static struct btrace_function *
308 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
309 struct btrace_function *caller,
310 struct minimal_symbol *mfun,
311 struct symbol *fun)
312 {
313 struct btrace_function *bfun;
314
315 bfun = ftrace_new_function (btinfo, caller, mfun, fun);
316 bfun->up = caller;
317 bfun->level += 1;
318 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
319
320 ftrace_debug (bfun, "new tail call");
321
322 return bfun;
323 }
324
325 /* Return the caller of BFUN or NULL if there is none. This function skips
326 tail calls in the call chain. */
327 static struct btrace_function *
328 ftrace_get_caller (struct btrace_function *bfun)
329 {
330 for (; bfun != NULL; bfun = bfun->up)
331 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
332 return bfun->up;
333
334 return NULL;
335 }
336
337 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
338 symbol information. */
339
340 static struct btrace_function *
341 ftrace_find_caller (struct btrace_function *bfun,
342 struct minimal_symbol *mfun,
343 struct symbol *fun)
344 {
345 for (; bfun != NULL; bfun = bfun->up)
346 {
347 /* Skip functions with incompatible symbol information. */
348 if (ftrace_function_switched (bfun, mfun, fun))
349 continue;
350
351 /* This is the function segment we're looking for. */
352 break;
353 }
354
355 return bfun;
356 }
357
358 /* Find the innermost caller in the back trace of BFUN, skipping all
359 function segments that do not end with a call instruction (e.g.
360 tail calls ending with a jump). */
361
362 static struct btrace_function *
363 ftrace_find_call (struct btrace_function *bfun)
364 {
365 for (; bfun != NULL; bfun = bfun->up)
366 {
367 struct btrace_insn *last;
368
369 /* Skip gaps. */
370 if (bfun->errcode != 0)
371 continue;
372
373 last = VEC_last (btrace_insn_s, bfun->insn);
374
375 if (last->iclass == BTRACE_INSN_CALL)
376 break;
377 }
378
379 return bfun;
380 }
381
382 /* Add a continuation segment for a function into which we return.
383 BTINFO is the branch trace information for the current thread.
384 PREV is the chronologically preceding function segment.
385 MFUN and FUN are the symbol information we have for this function. */
386
387 static struct btrace_function *
388 ftrace_new_return (struct btrace_thread_info *btinfo,
389 struct btrace_function *prev,
390 struct minimal_symbol *mfun,
391 struct symbol *fun)
392 {
393 struct btrace_function *bfun, *caller;
394
395 bfun = ftrace_new_function (btinfo, prev, mfun, fun);
396
397 /* It is important to start at PREV's caller. Otherwise, we might find
398 PREV itself, if PREV is a recursive function. */
399 caller = ftrace_find_caller (prev->up, mfun, fun);
400 if (caller != NULL)
401 {
402 /* The caller of PREV is the preceding btrace function segment in this
403 function instance. */
404 gdb_assert (caller->segment.next == NULL);
405
406 caller->segment.next = bfun;
407 bfun->segment.prev = caller;
408
409 /* Maintain the function level. */
410 bfun->level = caller->level;
411
412 /* Maintain the call stack. */
413 bfun->up = caller->up;
414 bfun->flags = caller->flags;
415
416 ftrace_debug (bfun, "new return");
417 }
418 else
419 {
420 /* We did not find a caller. This could mean that something went
421 wrong or that the call is simply not included in the trace. */
422
423 /* Let's search for some actual call. */
424 caller = ftrace_find_call (prev->up);
425 if (caller == NULL)
426 {
427 /* There is no call in PREV's back trace. We assume that the
428 branch trace did not include it. */
429
430 /* Let's find the topmost function and add a new caller for it.
431 This should handle a series of initial tail calls. */
432 while (prev->up != NULL)
433 prev = prev->up;
434
435 bfun->level = prev->level - 1;
436
437 /* Fix up the call stack for PREV. */
438 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
439
440 ftrace_debug (bfun, "new return - no caller");
441 }
442 else
443 {
444 /* There is a call in PREV's back trace to which we should have
445 returned but didn't. Let's start a new, separate back trace
446 from PREV's level. */
447 bfun->level = prev->level - 1;
448
449 /* We fix up the back trace for PREV but leave other function segments
450 on the same level as they are.
451 This should handle things like schedule () correctly where we're
452 switching contexts. */
453 prev->up = bfun;
454 prev->flags = BFUN_UP_LINKS_TO_RET;
455
456 ftrace_debug (bfun, "new return - unknown caller");
457 }
458 }
459
460 return bfun;
461 }
462
463 /* Add a new function segment for a function switch.
464 BTINFO is the branch trace information for the current thread.
465 PREV is the chronologically preceding function segment.
466 MFUN and FUN are the symbol information we have for this function. */
467
468 static struct btrace_function *
469 ftrace_new_switch (struct btrace_thread_info *btinfo,
470 struct btrace_function *prev,
471 struct minimal_symbol *mfun,
472 struct symbol *fun)
473 {
474 struct btrace_function *bfun;
475
476 /* This is an unexplained function switch. We can't really be sure about the
477 call stack, yet the best I can think of right now is to preserve it. */
478 bfun = ftrace_new_function (btinfo, prev, mfun, fun);
479 bfun->up = prev->up;
480 bfun->flags = prev->flags;
481
482 ftrace_debug (bfun, "new switch");
483
484 return bfun;
485 }
486
487 /* Add a new function segment for a gap in the trace due to a decode error.
488 BTINFO is the branch trace information for the current thread.
489 PREV is the chronologically preceding function segment.
490 ERRCODE is the format-specific error code. */
491
492 static struct btrace_function *
493 ftrace_new_gap (struct btrace_thread_info *btinfo,
494 struct btrace_function *prev, int errcode)
495 {
496 struct btrace_function *bfun;
497
498 /* We hijack prev if it was empty. */
499 if (prev != NULL && prev->errcode == 0
500 && VEC_empty (btrace_insn_s, prev->insn))
501 bfun = prev;
502 else
503 bfun = ftrace_new_function (btinfo, prev, NULL, NULL);
504
505 bfun->errcode = errcode;
506
507 ftrace_debug (bfun, "new gap");
508
509 return bfun;
510 }
511
512 /* Update BFUN with respect to the instruction at PC. BTINFO is the branch
513 trace information for the current thread. This may create new function
514 segments.
515 Return the chronologically latest function segment, never NULL. */
516
517 static struct btrace_function *
518 ftrace_update_function (struct btrace_thread_info *btinfo,
519 struct btrace_function *bfun, CORE_ADDR pc)
520 {
521 struct bound_minimal_symbol bmfun;
522 struct minimal_symbol *mfun;
523 struct symbol *fun;
524 struct btrace_insn *last;
525
526 /* Try to determine the function we're in. We use both types of symbols
527 to avoid surprises when we sometimes get a full symbol and sometimes
528 only a minimal symbol. */
529 fun = find_pc_function (pc);
530 bmfun = lookup_minimal_symbol_by_pc (pc);
531 mfun = bmfun.minsym;
532
533 if (fun == NULL && mfun == NULL)
534 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
535
536 /* If we didn't have a function or if we had a gap before, we create one. */
537 if (bfun == NULL || bfun->errcode != 0)
538 return ftrace_new_function (btinfo, bfun, mfun, fun);
539
540 /* Check the last instruction, if we have one.
541 We do this check first, since it allows us to fill in the call stack
542 links in addition to the normal flow links. */
543 last = NULL;
544 if (!VEC_empty (btrace_insn_s, bfun->insn))
545 last = VEC_last (btrace_insn_s, bfun->insn);
546
547 if (last != NULL)
548 {
549 switch (last->iclass)
550 {
551 case BTRACE_INSN_RETURN:
552 {
553 const char *fname;
554
555 /* On some systems, _dl_runtime_resolve returns to the resolved
556 function instead of jumping to it. From our perspective,
557 however, this is a tailcall.
558 If we treated it as return, we wouldn't be able to find the
559 resolved function in our stack back trace. Hence, we would
560 lose the current stack back trace and start anew with an empty
561 back trace. When the resolved function returns, we would then
562 create a stack back trace with the same function names but
563 different frame id's. This will confuse stepping. */
564 fname = ftrace_print_function_name (bfun);
565 if (strcmp (fname, "_dl_runtime_resolve") == 0)
566 return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
567
568 return ftrace_new_return (btinfo, bfun, mfun, fun);
569 }
570
571 case BTRACE_INSN_CALL:
572 /* Ignore calls to the next instruction. They are used for PIC. */
573 if (last->pc + last->size == pc)
574 break;
575
576 return ftrace_new_call (btinfo, bfun, mfun, fun);
577
578 case BTRACE_INSN_JUMP:
579 {
580 CORE_ADDR start;
581
582 start = get_pc_function_start (pc);
583
584 /* A jump to the start of a function is (typically) a tail call. */
585 if (start == pc)
586 return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
587
588 /* If we can't determine the function for PC, we treat a jump at
589 the end of the block as tail call if we're switching functions
590 and as an intra-function branch if we don't. */
591 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
592 return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
593
594 break;
595 }
596 }
597 }
598
599 /* Check if we're switching functions for some other reason. */
600 if (ftrace_function_switched (bfun, mfun, fun))
601 {
602 DEBUG_FTRACE ("switching from %s in %s at %s",
603 ftrace_print_insn_addr (last),
604 ftrace_print_function_name (bfun),
605 ftrace_print_filename (bfun));
606
607 return ftrace_new_switch (btinfo, bfun, mfun, fun);
608 }
609
610 return bfun;
611 }
612
613 /* Add the instruction at PC to BFUN's instructions. */
614
615 static void
616 ftrace_update_insns (struct btrace_function *bfun,
617 const struct btrace_insn *insn)
618 {
619 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
620
621 if (record_debug > 1)
622 ftrace_debug (bfun, "update insn");
623 }
624
625 /* Classify the instruction at PC. */
626
627 static enum btrace_insn_class
628 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
629 {
630 enum btrace_insn_class iclass;
631
632 iclass = BTRACE_INSN_OTHER;
633 TRY
634 {
635 if (gdbarch_insn_is_call (gdbarch, pc))
636 iclass = BTRACE_INSN_CALL;
637 else if (gdbarch_insn_is_ret (gdbarch, pc))
638 iclass = BTRACE_INSN_RETURN;
639 else if (gdbarch_insn_is_jump (gdbarch, pc))
640 iclass = BTRACE_INSN_JUMP;
641 }
642 CATCH (error, RETURN_MASK_ERROR)
643 {
644 }
645 END_CATCH
646
647 return iclass;
648 }
649
650 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
651 number of matching function segments or zero if the back traces do not
652 match. */
653
654 static int
655 ftrace_match_backtrace (struct btrace_function *lhs,
656 struct btrace_function *rhs)
657 {
658 int matches;
659
660 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
661 {
662 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
663 return 0;
664
665 lhs = ftrace_get_caller (lhs);
666 rhs = ftrace_get_caller (rhs);
667 }
668
669 return matches;
670 }
671
672 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
673
674 static void
675 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
676 {
677 if (adjustment == 0)
678 return;
679
680 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
681 ftrace_debug (bfun, "..bfun");
682
683 for (; bfun != NULL; bfun = bfun->flow.next)
684 bfun->level += adjustment;
685 }
686
687 /* Recompute the global level offset. Traverse the function trace and compute
688 the global level offset as the negative of the minimal function level. */
689
690 static void
691 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
692 {
693 struct btrace_function *bfun, *end;
694 int level;
695
696 if (btinfo == NULL)
697 return;
698
699 bfun = btinfo->begin;
700 if (bfun == NULL)
701 return;
702
703 /* The last function segment contains the current instruction, which is not
704 really part of the trace. If it contains just this one instruction, we
705 stop when we reach it; otherwise, we let the below loop run to the end. */
706 end = btinfo->end;
707 if (VEC_length (btrace_insn_s, end->insn) > 1)
708 end = NULL;
709
710 level = INT_MAX;
711 for (; bfun != end; bfun = bfun->flow.next)
712 level = std::min (level, bfun->level);
713
714 DEBUG_FTRACE ("setting global level offset: %d", -level);
715 btinfo->level = -level;
716 }
717
718 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
719 ftrace_connect_backtrace. */
720
721 static void
722 ftrace_connect_bfun (struct btrace_function *prev,
723 struct btrace_function *next)
724 {
725 DEBUG_FTRACE ("connecting...");
726 ftrace_debug (prev, "..prev");
727 ftrace_debug (next, "..next");
728
729 /* The function segments are not yet connected. */
730 gdb_assert (prev->segment.next == NULL);
731 gdb_assert (next->segment.prev == NULL);
732
733 prev->segment.next = next;
734 next->segment.prev = prev;
735
736 /* We may have moved NEXT to a different function level. */
737 ftrace_fixup_level (next, prev->level - next->level);
738
739 /* If we run out of back trace for one, let's use the other's. */
740 if (prev->up == NULL)
741 {
742 if (next->up != NULL)
743 {
744 DEBUG_FTRACE ("using next's callers");
745 ftrace_fixup_caller (prev, next->up, next->flags);
746 }
747 }
748 else if (next->up == NULL)
749 {
750 if (prev->up != NULL)
751 {
752 DEBUG_FTRACE ("using prev's callers");
753 ftrace_fixup_caller (next, prev->up, prev->flags);
754 }
755 }
756 else
757 {
758 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
759 link to add the tail callers to NEXT's back trace.
760
761 This removes NEXT->UP from NEXT's back trace. It will be added back
762 when connecting NEXT and PREV's callers - provided they exist.
763
764 If PREV's back trace consists of a series of tail calls without an
765 actual call, there will be no further connection and NEXT's caller will
766 be removed for good. To catch this case, we handle it here and connect
767 the top of PREV's back trace to NEXT's caller. */
768 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
769 {
770 struct btrace_function *caller;
771 btrace_function_flags flags;
772
773 /* We checked NEXT->UP above so CALLER can't be NULL. */
774 caller = next->up;
775 flags = next->flags;
776
777 DEBUG_FTRACE ("adding prev's tail calls to next");
778
779 ftrace_fixup_caller (next, prev->up, prev->flags);
780
781 for (prev = prev->up; prev != NULL; prev = prev->up)
782 {
783 /* At the end of PREV's back trace, continue with CALLER. */
784 if (prev->up == NULL)
785 {
786 DEBUG_FTRACE ("fixing up link for tailcall chain");
787 ftrace_debug (prev, "..top");
788 ftrace_debug (caller, "..up");
789
790 ftrace_fixup_caller (prev, caller, flags);
791
792 /* If we skipped any tail calls, this may move CALLER to a
793 different function level.
794
795 Note that changing CALLER's level is only OK because we
796 know that this is the last iteration of the bottom-to-top
797 walk in ftrace_connect_backtrace.
798
799 Otherwise we will fix up CALLER's level when we connect it
800 to PREV's caller in the next iteration. */
801 ftrace_fixup_level (caller, prev->level - caller->level - 1);
802 break;
803 }
804
805 /* There's nothing to do if we find a real call. */
806 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
807 {
808 DEBUG_FTRACE ("will fix up link in next iteration");
809 break;
810 }
811 }
812 }
813 }
814 }
815
816 /* Connect function segments on the same level in the back trace at LHS and RHS.
817 The back traces at LHS and RHS are expected to match according to
818 ftrace_match_backtrace. */
819
820 static void
821 ftrace_connect_backtrace (struct btrace_function *lhs,
822 struct btrace_function *rhs)
823 {
824 while (lhs != NULL && rhs != NULL)
825 {
826 struct btrace_function *prev, *next;
827
828 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
829
830 /* Connecting LHS and RHS may change the up link. */
831 prev = lhs;
832 next = rhs;
833
834 lhs = ftrace_get_caller (lhs);
835 rhs = ftrace_get_caller (rhs);
836
837 ftrace_connect_bfun (prev, next);
838 }
839 }
840
841 /* Bridge the gap between two function segments left and right of a gap if their
842 respective back traces match in at least MIN_MATCHES functions.
843
844 Returns non-zero if the gap could be bridged, zero otherwise. */
845
846 static int
847 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
848 int min_matches)
849 {
850 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
851 int best_matches;
852
853 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
854 rhs->insn_offset - 1, min_matches);
855
856 best_matches = 0;
857 best_l = NULL;
858 best_r = NULL;
859
860 /* We search the back traces of LHS and RHS for valid connections and connect
861 the two functon segments that give the longest combined back trace. */
862
863 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
864 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
865 {
866 int matches;
867
868 matches = ftrace_match_backtrace (cand_l, cand_r);
869 if (best_matches < matches)
870 {
871 best_matches = matches;
872 best_l = cand_l;
873 best_r = cand_r;
874 }
875 }
876
877 /* We need at least MIN_MATCHES matches. */
878 gdb_assert (min_matches > 0);
879 if (best_matches < min_matches)
880 return 0;
881
882 DEBUG_FTRACE ("..matches: %d", best_matches);
883
884 /* We will fix up the level of BEST_R and succeeding function segments such
885 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
886
887 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
888 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
889
890 To catch this, we already fix up the level here where we can start at RHS
891 instead of at BEST_R. We will ignore the level fixup when connecting
892 BEST_L to BEST_R as they will already be on the same level. */
893 ftrace_fixup_level (rhs, best_l->level - best_r->level);
894
895 ftrace_connect_backtrace (best_l, best_r);
896
897 return best_matches;
898 }
899
900 /* Try to bridge gaps due to overflow or decode errors by connecting the
901 function segments that are separated by the gap. */
902
903 static void
904 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
905 {
906 VEC (bfun_s) *remaining;
907 struct cleanup *old_chain;
908 int min_matches;
909
910 DEBUG ("bridge gaps");
911
912 remaining = NULL;
913 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
914
915 /* We require a minimum amount of matches for bridging a gap. The number of
916 required matches will be lowered with each iteration.
917
918 The more matches the higher our confidence that the bridging is correct.
919 For big gaps or small traces, however, it may not be feasible to require a
920 high number of matches. */
921 for (min_matches = 5; min_matches > 0; --min_matches)
922 {
923 /* Let's try to bridge as many gaps as we can. In some cases, we need to
924 skip a gap and revisit it again after we closed later gaps. */
925 while (!VEC_empty (bfun_s, *gaps))
926 {
927 struct btrace_function *gap;
928 unsigned int idx;
929
930 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
931 {
932 struct btrace_function *lhs, *rhs;
933 int bridged;
934
935 /* We may have a sequence of gaps if we run from one error into
936 the next as we try to re-sync onto the trace stream. Ignore
937 all but the leftmost gap in such a sequence.
938
939 Also ignore gaps at the beginning of the trace. */
940 lhs = gap->flow.prev;
941 if (lhs == NULL || lhs->errcode != 0)
942 continue;
943
944 /* Skip gaps to the right. */
945 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
946 if (rhs->errcode == 0)
947 break;
948
949 /* Ignore gaps at the end of the trace. */
950 if (rhs == NULL)
951 continue;
952
953 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
954
955 /* Keep track of gaps we were not able to bridge and try again.
956 If we just pushed them to the end of GAPS we would risk an
957 infinite loop in case we simply cannot bridge a gap. */
958 if (bridged == 0)
959 VEC_safe_push (bfun_s, remaining, gap);
960 }
961
962 /* Let's see if we made any progress. */
963 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
964 break;
965
966 VEC_free (bfun_s, *gaps);
967
968 *gaps = remaining;
969 remaining = NULL;
970 }
971
972 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
973 if (VEC_empty (bfun_s, *gaps))
974 break;
975
976 VEC_free (bfun_s, remaining);
977 }
978
979 do_cleanups (old_chain);
980
981 /* We may omit this in some cases. Not sure it is worth the extra
982 complication, though. */
983 ftrace_compute_global_level_offset (&tp->btrace);
984 }
985
986 /* Compute the function branch trace from BTS trace. */
987
988 static void
989 btrace_compute_ftrace_bts (struct thread_info *tp,
990 const struct btrace_data_bts *btrace,
991 VEC (bfun_s) **gaps)
992 {
993 struct btrace_thread_info *btinfo;
994 struct btrace_function *begin, *end;
995 struct gdbarch *gdbarch;
996 unsigned int blk;
997 int level;
998
999 gdbarch = target_gdbarch ();
1000 btinfo = &tp->btrace;
1001 begin = btinfo->begin;
1002 end = btinfo->end;
1003 level = begin != NULL ? -btinfo->level : INT_MAX;
1004 blk = VEC_length (btrace_block_s, btrace->blocks);
1005
1006 while (blk != 0)
1007 {
1008 btrace_block_s *block;
1009 CORE_ADDR pc;
1010
1011 blk -= 1;
1012
1013 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1014 pc = block->begin;
1015
1016 for (;;)
1017 {
1018 struct btrace_insn insn;
1019 int size;
1020
1021 /* We should hit the end of the block. Warn if we went too far. */
1022 if (block->end < pc)
1023 {
1024 /* Indicate the gap in the trace. */
1025 end = ftrace_new_gap (btinfo, end, BDE_BTS_OVERFLOW);
1026 if (begin == NULL)
1027 begin = end;
1028
1029 VEC_safe_push (bfun_s, *gaps, end);
1030
1031 warning (_("Recorded trace may be corrupted at instruction "
1032 "%u (pc = %s)."), end->insn_offset - 1,
1033 core_addr_to_string_nz (pc));
1034
1035 break;
1036 }
1037
1038 end = ftrace_update_function (btinfo, end, pc);
1039 if (begin == NULL)
1040 begin = end;
1041
1042 /* Maintain the function level offset.
1043 For all but the last block, we do it here. */
1044 if (blk != 0)
1045 level = std::min (level, end->level);
1046
1047 size = 0;
1048 TRY
1049 {
1050 size = gdb_insn_length (gdbarch, pc);
1051 }
1052 CATCH (error, RETURN_MASK_ERROR)
1053 {
1054 }
1055 END_CATCH
1056
1057 insn.pc = pc;
1058 insn.size = size;
1059 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1060 insn.flags = 0;
1061
1062 ftrace_update_insns (end, &insn);
1063
1064 /* We're done once we pushed the instruction at the end. */
1065 if (block->end == pc)
1066 break;
1067
1068 /* We can't continue if we fail to compute the size. */
1069 if (size <= 0)
1070 {
1071 /* Indicate the gap in the trace. We just added INSN so we're
1072 not at the beginning. */
1073 end = ftrace_new_gap (btinfo, end, BDE_BTS_INSN_SIZE);
1074
1075 VEC_safe_push (bfun_s, *gaps, end);
1076
1077 warning (_("Recorded trace may be incomplete at instruction %u "
1078 "(pc = %s)."), end->insn_offset - 1,
1079 core_addr_to_string_nz (pc));
1080
1081 break;
1082 }
1083
1084 pc += size;
1085
1086 /* Maintain the function level offset.
1087 For the last block, we do it here to not consider the last
1088 instruction.
1089 Since the last instruction corresponds to the current instruction
1090 and is not really part of the execution history, it shouldn't
1091 affect the level. */
1092 if (blk == 0)
1093 level = std::min (level, end->level);
1094 }
1095 }
1096
1097 btinfo->begin = begin;
1098 btinfo->end = end;
1099
1100 /* LEVEL is the minimal function level of all btrace function segments.
1101 Define the global level offset to -LEVEL so all function levels are
1102 normalized to start at zero. */
1103 btinfo->level = -level;
1104 }
1105
1106 #if defined (HAVE_LIBIPT)
1107
1108 static enum btrace_insn_class
1109 pt_reclassify_insn (enum pt_insn_class iclass)
1110 {
1111 switch (iclass)
1112 {
1113 case ptic_call:
1114 return BTRACE_INSN_CALL;
1115
1116 case ptic_return:
1117 return BTRACE_INSN_RETURN;
1118
1119 case ptic_jump:
1120 return BTRACE_INSN_JUMP;
1121
1122 default:
1123 return BTRACE_INSN_OTHER;
1124 }
1125 }
1126
1127 /* Return the btrace instruction flags for INSN. */
1128
1129 static btrace_insn_flags
1130 pt_btrace_insn_flags (const struct pt_insn &insn)
1131 {
1132 btrace_insn_flags flags = 0;
1133
1134 if (insn.speculative)
1135 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1136
1137 return flags;
1138 }
1139
1140 /* Return the btrace instruction for INSN. */
1141
1142 static btrace_insn
1143 pt_btrace_insn (const struct pt_insn &insn)
1144 {
1145 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1146 pt_reclassify_insn (insn.iclass),
1147 pt_btrace_insn_flags (insn)};
1148 }
1149
1150
1151 /* Add function branch trace to BTINFO using DECODER. */
1152
1153 static void
1154 ftrace_add_pt (struct btrace_thread_info *btinfo,
1155 struct pt_insn_decoder *decoder,
1156 struct btrace_function **pbegin,
1157 struct btrace_function **pend, int *plevel,
1158 VEC (bfun_s) **gaps)
1159 {
1160 struct btrace_function *begin, *end, *upd;
1161 uint64_t offset;
1162 int errcode;
1163
1164 begin = *pbegin;
1165 end = *pend;
1166 for (;;)
1167 {
1168 struct pt_insn insn;
1169
1170 errcode = pt_insn_sync_forward (decoder);
1171 if (errcode < 0)
1172 {
1173 if (errcode != -pte_eos)
1174 warning (_("Failed to synchronize onto the Intel Processor "
1175 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1176 break;
1177 }
1178
1179 for (;;)
1180 {
1181 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1182 if (errcode < 0)
1183 break;
1184
1185 /* Look for gaps in the trace - unless we're at the beginning. */
1186 if (begin != NULL)
1187 {
1188 /* Tracing is disabled and re-enabled each time we enter the
1189 kernel. Most times, we continue from the same instruction we
1190 stopped before. This is indicated via the RESUMED instruction
1191 flag. The ENABLED instruction flag means that we continued
1192 from some other instruction. Indicate this as a trace gap. */
1193 if (insn.enabled)
1194 {
1195 *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_DISABLED);
1196
1197 VEC_safe_push (bfun_s, *gaps, end);
1198
1199 pt_insn_get_offset (decoder, &offset);
1200
1201 warning (_("Non-contiguous trace at instruction %u (offset "
1202 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1203 end->insn_offset - 1, offset, insn.ip);
1204 }
1205 }
1206
1207 /* Indicate trace overflows. */
1208 if (insn.resynced)
1209 {
1210 *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_OVERFLOW);
1211 if (begin == NULL)
1212 *pbegin = begin = end;
1213
1214 VEC_safe_push (bfun_s, *gaps, end);
1215
1216 pt_insn_get_offset (decoder, &offset);
1217
1218 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1219 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1220 offset, insn.ip);
1221 }
1222
1223 upd = ftrace_update_function (btinfo, end, insn.ip);
1224 if (upd != end)
1225 {
1226 *pend = end = upd;
1227
1228 if (begin == NULL)
1229 *pbegin = begin = upd;
1230 }
1231
1232 /* Maintain the function level offset. */
1233 *plevel = std::min (*plevel, end->level);
1234
1235 btrace_insn btinsn = pt_btrace_insn (insn);
1236 ftrace_update_insns (end, &btinsn);
1237 }
1238
1239 if (errcode == -pte_eos)
1240 break;
1241
1242 /* Indicate the gap in the trace. */
1243 *pend = end = ftrace_new_gap (btinfo, end, errcode);
1244 if (begin == NULL)
1245 *pbegin = begin = end;
1246
1247 VEC_safe_push (bfun_s, *gaps, end);
1248
1249 pt_insn_get_offset (decoder, &offset);
1250
1251 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1252 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1253 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1254 }
1255 }
1256
1257 /* A callback function to allow the trace decoder to read the inferior's
1258 memory. */
1259
1260 static int
1261 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1262 const struct pt_asid *asid, uint64_t pc,
1263 void *context)
1264 {
1265 int result, errcode;
1266
1267 result = (int) size;
1268 TRY
1269 {
1270 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1271 if (errcode != 0)
1272 result = -pte_nomap;
1273 }
1274 CATCH (error, RETURN_MASK_ERROR)
1275 {
1276 result = -pte_nomap;
1277 }
1278 END_CATCH
1279
1280 return result;
1281 }
1282
1283 /* Translate the vendor from one enum to another. */
1284
1285 static enum pt_cpu_vendor
1286 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1287 {
1288 switch (vendor)
1289 {
1290 default:
1291 return pcv_unknown;
1292
1293 case CV_INTEL:
1294 return pcv_intel;
1295 }
1296 }
1297
1298 /* Finalize the function branch trace after decode. */
1299
1300 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1301 struct thread_info *tp, int level)
1302 {
1303 pt_insn_free_decoder (decoder);
1304
1305 /* LEVEL is the minimal function level of all btrace function segments.
1306 Define the global level offset to -LEVEL so all function levels are
1307 normalized to start at zero. */
1308 tp->btrace.level = -level;
1309
1310 /* Add a single last instruction entry for the current PC.
1311 This allows us to compute the backtrace at the current PC using both
1312 standard unwind and btrace unwind.
1313 This extra entry is ignored by all record commands. */
1314 btrace_add_pc (tp);
1315 }
1316
1317 /* Compute the function branch trace from Intel Processor Trace
1318 format. */
1319
1320 static void
1321 btrace_compute_ftrace_pt (struct thread_info *tp,
1322 const struct btrace_data_pt *btrace,
1323 VEC (bfun_s) **gaps)
1324 {
1325 struct btrace_thread_info *btinfo;
1326 struct pt_insn_decoder *decoder;
1327 struct pt_config config;
1328 int level, errcode;
1329
1330 if (btrace->size == 0)
1331 return;
1332
1333 btinfo = &tp->btrace;
1334 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1335
1336 pt_config_init(&config);
1337 config.begin = btrace->data;
1338 config.end = btrace->data + btrace->size;
1339
1340 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1341 config.cpu.family = btrace->config.cpu.family;
1342 config.cpu.model = btrace->config.cpu.model;
1343 config.cpu.stepping = btrace->config.cpu.stepping;
1344
1345 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1346 if (errcode < 0)
1347 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1348 pt_errstr (pt_errcode (errcode)));
1349
1350 decoder = pt_insn_alloc_decoder (&config);
1351 if (decoder == NULL)
1352 error (_("Failed to allocate the Intel Processor Trace decoder."));
1353
1354 TRY
1355 {
1356 struct pt_image *image;
1357
1358 image = pt_insn_get_image(decoder);
1359 if (image == NULL)
1360 error (_("Failed to configure the Intel Processor Trace decoder."));
1361
1362 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1363 if (errcode < 0)
1364 error (_("Failed to configure the Intel Processor Trace decoder: "
1365 "%s."), pt_errstr (pt_errcode (errcode)));
1366
1367 ftrace_add_pt (btinfo, decoder, &btinfo->begin, &btinfo->end, &level,
1368 gaps);
1369 }
1370 CATCH (error, RETURN_MASK_ALL)
1371 {
1372 /* Indicate a gap in the trace if we quit trace processing. */
1373 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1374 {
1375 btinfo->end = ftrace_new_gap (btinfo, btinfo->end, BDE_PT_USER_QUIT);
1376
1377 VEC_safe_push (bfun_s, *gaps, btinfo->end);
1378 }
1379
1380 btrace_finalize_ftrace_pt (decoder, tp, level);
1381
1382 throw_exception (error);
1383 }
1384 END_CATCH
1385
1386 btrace_finalize_ftrace_pt (decoder, tp, level);
1387 }
1388
1389 #else /* defined (HAVE_LIBIPT) */
1390
1391 static void
1392 btrace_compute_ftrace_pt (struct thread_info *tp,
1393 const struct btrace_data_pt *btrace,
1394 VEC (bfun_s) **gaps)
1395 {
1396 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1397 }
1398
1399 #endif /* defined (HAVE_LIBIPT) */
1400
1401 /* Compute the function branch trace from a block branch trace BTRACE for
1402 a thread given by BTINFO. */
1403
1404 static void
1405 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1406 VEC (bfun_s) **gaps)
1407 {
1408 DEBUG ("compute ftrace");
1409
1410 switch (btrace->format)
1411 {
1412 case BTRACE_FORMAT_NONE:
1413 return;
1414
1415 case BTRACE_FORMAT_BTS:
1416 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1417 return;
1418
1419 case BTRACE_FORMAT_PT:
1420 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1421 return;
1422 }
1423
1424 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1425 }
1426
1427 static void
1428 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1429 {
1430 if (!VEC_empty (bfun_s, *gaps))
1431 {
1432 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1433 btrace_bridge_gaps (tp, gaps);
1434 }
1435 }
1436
1437 static void
1438 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1439 {
1440 VEC (bfun_s) *gaps;
1441 struct cleanup *old_chain;
1442
1443 gaps = NULL;
1444 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1445
1446 TRY
1447 {
1448 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1449 }
1450 CATCH (error, RETURN_MASK_ALL)
1451 {
1452 btrace_finalize_ftrace (tp, &gaps);
1453
1454 throw_exception (error);
1455 }
1456 END_CATCH
1457
1458 btrace_finalize_ftrace (tp, &gaps);
1459
1460 do_cleanups (old_chain);
1461 }
1462
1463 /* Add an entry for the current PC. */
1464
1465 static void
1466 btrace_add_pc (struct thread_info *tp)
1467 {
1468 struct btrace_data btrace;
1469 struct btrace_block *block;
1470 struct regcache *regcache;
1471 struct cleanup *cleanup;
1472 CORE_ADDR pc;
1473
1474 regcache = get_thread_regcache (tp->ptid);
1475 pc = regcache_read_pc (regcache);
1476
1477 btrace_data_init (&btrace);
1478 btrace.format = BTRACE_FORMAT_BTS;
1479 btrace.variant.bts.blocks = NULL;
1480
1481 cleanup = make_cleanup_btrace_data (&btrace);
1482
1483 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1484 block->begin = pc;
1485 block->end = pc;
1486
1487 btrace_compute_ftrace (tp, &btrace);
1488
1489 do_cleanups (cleanup);
1490 }
1491
1492 /* See btrace.h. */
1493
1494 void
1495 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1496 {
1497 if (tp->btrace.target != NULL)
1498 return;
1499
1500 #if !defined (HAVE_LIBIPT)
1501 if (conf->format == BTRACE_FORMAT_PT)
1502 error (_("GDB does not support Intel Processor Trace."));
1503 #endif /* !defined (HAVE_LIBIPT) */
1504
1505 if (!target_supports_btrace (conf->format))
1506 error (_("Target does not support branch tracing."));
1507
1508 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1509 target_pid_to_str (tp->ptid));
1510
1511 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1512
1513 /* We're done if we failed to enable tracing. */
1514 if (tp->btrace.target == NULL)
1515 return;
1516
1517 /* We need to undo the enable in case of errors. */
1518 TRY
1519 {
1520 /* Add an entry for the current PC so we start tracing from where we
1521 enabled it.
1522
1523 If we can't access TP's registers, TP is most likely running. In this
1524 case, we can't really say where tracing was enabled so it should be
1525 safe to simply skip this step.
1526
1527 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1528 start at the PC at which tracing was enabled. */
1529 if (conf->format != BTRACE_FORMAT_PT
1530 && can_access_registers_ptid (tp->ptid))
1531 btrace_add_pc (tp);
1532 }
1533 CATCH (exception, RETURN_MASK_ALL)
1534 {
1535 btrace_disable (tp);
1536
1537 throw_exception (exception);
1538 }
1539 END_CATCH
1540 }
1541
1542 /* See btrace.h. */
1543
1544 const struct btrace_config *
1545 btrace_conf (const struct btrace_thread_info *btinfo)
1546 {
1547 if (btinfo->target == NULL)
1548 return NULL;
1549
1550 return target_btrace_conf (btinfo->target);
1551 }
1552
1553 /* See btrace.h. */
1554
1555 void
1556 btrace_disable (struct thread_info *tp)
1557 {
1558 struct btrace_thread_info *btp = &tp->btrace;
1559 int errcode = 0;
1560
1561 if (btp->target == NULL)
1562 return;
1563
1564 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1565 target_pid_to_str (tp->ptid));
1566
1567 target_disable_btrace (btp->target);
1568 btp->target = NULL;
1569
1570 btrace_clear (tp);
1571 }
1572
1573 /* See btrace.h. */
1574
1575 void
1576 btrace_teardown (struct thread_info *tp)
1577 {
1578 struct btrace_thread_info *btp = &tp->btrace;
1579 int errcode = 0;
1580
1581 if (btp->target == NULL)
1582 return;
1583
1584 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1585 target_pid_to_str (tp->ptid));
1586
1587 target_teardown_btrace (btp->target);
1588 btp->target = NULL;
1589
1590 btrace_clear (tp);
1591 }
1592
1593 /* Stitch branch trace in BTS format. */
1594
1595 static int
1596 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1597 {
1598 struct btrace_thread_info *btinfo;
1599 struct btrace_function *last_bfun;
1600 struct btrace_insn *last_insn;
1601 btrace_block_s *first_new_block;
1602
1603 btinfo = &tp->btrace;
1604 last_bfun = btinfo->end;
1605 gdb_assert (last_bfun != NULL);
1606 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1607
1608 /* If the existing trace ends with a gap, we just glue the traces
1609 together. We need to drop the last (i.e. chronologically first) block
1610 of the new trace, though, since we can't fill in the start address.*/
1611 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1612 {
1613 VEC_pop (btrace_block_s, btrace->blocks);
1614 return 0;
1615 }
1616
1617 /* Beware that block trace starts with the most recent block, so the
1618 chronologically first block in the new trace is the last block in
1619 the new trace's block vector. */
1620 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1621 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1622
1623 /* If the current PC at the end of the block is the same as in our current
1624 trace, there are two explanations:
1625 1. we executed the instruction and some branch brought us back.
1626 2. we have not made any progress.
1627 In the first case, the delta trace vector should contain at least two
1628 entries.
1629 In the second case, the delta trace vector should contain exactly one
1630 entry for the partial block containing the current PC. Remove it. */
1631 if (first_new_block->end == last_insn->pc
1632 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1633 {
1634 VEC_pop (btrace_block_s, btrace->blocks);
1635 return 0;
1636 }
1637
1638 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1639 core_addr_to_string_nz (first_new_block->end));
1640
1641 /* Do a simple sanity check to make sure we don't accidentally end up
1642 with a bad block. This should not occur in practice. */
1643 if (first_new_block->end < last_insn->pc)
1644 {
1645 warning (_("Error while trying to read delta trace. Falling back to "
1646 "a full read."));
1647 return -1;
1648 }
1649
1650 /* We adjust the last block to start at the end of our current trace. */
1651 gdb_assert (first_new_block->begin == 0);
1652 first_new_block->begin = last_insn->pc;
1653
1654 /* We simply pop the last insn so we can insert it again as part of
1655 the normal branch trace computation.
1656 Since instruction iterators are based on indices in the instructions
1657 vector, we don't leave any pointers dangling. */
1658 DEBUG ("pruning insn at %s for stitching",
1659 ftrace_print_insn_addr (last_insn));
1660
1661 VEC_pop (btrace_insn_s, last_bfun->insn);
1662
1663 /* The instructions vector may become empty temporarily if this has
1664 been the only instruction in this function segment.
1665 This violates the invariant but will be remedied shortly by
1666 btrace_compute_ftrace when we add the new trace. */
1667
1668 /* The only case where this would hurt is if the entire trace consisted
1669 of just that one instruction. If we remove it, we might turn the now
1670 empty btrace function segment into a gap. But we don't want gaps at
1671 the beginning. To avoid this, we remove the entire old trace. */
1672 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1673 btrace_clear (tp);
1674
1675 return 0;
1676 }
1677
1678 /* Adjust the block trace in order to stitch old and new trace together.
1679 BTRACE is the new delta trace between the last and the current stop.
1680 TP is the traced thread.
1681 May modifx BTRACE as well as the existing trace in TP.
1682 Return 0 on success, -1 otherwise. */
1683
1684 static int
1685 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1686 {
1687 /* If we don't have trace, there's nothing to do. */
1688 if (btrace_data_empty (btrace))
1689 return 0;
1690
1691 switch (btrace->format)
1692 {
1693 case BTRACE_FORMAT_NONE:
1694 return 0;
1695
1696 case BTRACE_FORMAT_BTS:
1697 return btrace_stitch_bts (&btrace->variant.bts, tp);
1698
1699 case BTRACE_FORMAT_PT:
1700 /* Delta reads are not supported. */
1701 return -1;
1702 }
1703
1704 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1705 }
1706
1707 /* Clear the branch trace histories in BTINFO. */
1708
1709 static void
1710 btrace_clear_history (struct btrace_thread_info *btinfo)
1711 {
1712 xfree (btinfo->insn_history);
1713 xfree (btinfo->call_history);
1714 xfree (btinfo->replay);
1715
1716 btinfo->insn_history = NULL;
1717 btinfo->call_history = NULL;
1718 btinfo->replay = NULL;
1719 }
1720
1721 /* Clear the branch trace maintenance histories in BTINFO. */
1722
1723 static void
1724 btrace_maint_clear (struct btrace_thread_info *btinfo)
1725 {
1726 switch (btinfo->data.format)
1727 {
1728 default:
1729 break;
1730
1731 case BTRACE_FORMAT_BTS:
1732 btinfo->maint.variant.bts.packet_history.begin = 0;
1733 btinfo->maint.variant.bts.packet_history.end = 0;
1734 break;
1735
1736 #if defined (HAVE_LIBIPT)
1737 case BTRACE_FORMAT_PT:
1738 xfree (btinfo->maint.variant.pt.packets);
1739
1740 btinfo->maint.variant.pt.packets = NULL;
1741 btinfo->maint.variant.pt.packet_history.begin = 0;
1742 btinfo->maint.variant.pt.packet_history.end = 0;
1743 break;
1744 #endif /* defined (HAVE_LIBIPT) */
1745 }
1746 }
1747
1748 /* See btrace.h. */
1749
1750 const char *
1751 btrace_decode_error (enum btrace_format format, int errcode)
1752 {
1753 switch (format)
1754 {
1755 case BTRACE_FORMAT_BTS:
1756 switch (errcode)
1757 {
1758 case BDE_BTS_OVERFLOW:
1759 return _("instruction overflow");
1760
1761 case BDE_BTS_INSN_SIZE:
1762 return _("unknown instruction");
1763
1764 default:
1765 break;
1766 }
1767 break;
1768
1769 #if defined (HAVE_LIBIPT)
1770 case BTRACE_FORMAT_PT:
1771 switch (errcode)
1772 {
1773 case BDE_PT_USER_QUIT:
1774 return _("trace decode cancelled");
1775
1776 case BDE_PT_DISABLED:
1777 return _("disabled");
1778
1779 case BDE_PT_OVERFLOW:
1780 return _("overflow");
1781
1782 default:
1783 if (errcode < 0)
1784 return pt_errstr (pt_errcode (errcode));
1785 break;
1786 }
1787 break;
1788 #endif /* defined (HAVE_LIBIPT) */
1789
1790 default:
1791 break;
1792 }
1793
1794 return _("unknown");
1795 }
1796
1797 /* See btrace.h. */
1798
1799 void
1800 btrace_fetch (struct thread_info *tp)
1801 {
1802 struct btrace_thread_info *btinfo;
1803 struct btrace_target_info *tinfo;
1804 struct btrace_data btrace;
1805 struct cleanup *cleanup;
1806 int errcode;
1807
1808 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1809 target_pid_to_str (tp->ptid));
1810
1811 btinfo = &tp->btrace;
1812 tinfo = btinfo->target;
1813 if (tinfo == NULL)
1814 return;
1815
1816 /* There's no way we could get new trace while replaying.
1817 On the other hand, delta trace would return a partial record with the
1818 current PC, which is the replay PC, not the last PC, as expected. */
1819 if (btinfo->replay != NULL)
1820 return;
1821
1822 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1823 can store a gdb.Record object in Python referring to a different thread
1824 than the current one, temporarily set INFERIOR_PTID. */
1825 cleanup = save_inferior_ptid ();
1826 inferior_ptid = tp->ptid;
1827
1828 /* We should not be called on running or exited threads. */
1829 gdb_assert (can_access_registers_ptid (tp->ptid));
1830
1831 btrace_data_init (&btrace);
1832 make_cleanup_btrace_data (&btrace);
1833
1834 /* Let's first try to extend the trace we already have. */
1835 if (btinfo->end != NULL)
1836 {
1837 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1838 if (errcode == 0)
1839 {
1840 /* Success. Let's try to stitch the traces together. */
1841 errcode = btrace_stitch_trace (&btrace, tp);
1842 }
1843 else
1844 {
1845 /* We failed to read delta trace. Let's try to read new trace. */
1846 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1847
1848 /* If we got any new trace, discard what we have. */
1849 if (errcode == 0 && !btrace_data_empty (&btrace))
1850 btrace_clear (tp);
1851 }
1852
1853 /* If we were not able to read the trace, we start over. */
1854 if (errcode != 0)
1855 {
1856 btrace_clear (tp);
1857 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1858 }
1859 }
1860 else
1861 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1862
1863 /* If we were not able to read the branch trace, signal an error. */
1864 if (errcode != 0)
1865 error (_("Failed to read branch trace."));
1866
1867 /* Compute the trace, provided we have any. */
1868 if (!btrace_data_empty (&btrace))
1869 {
1870 /* Store the raw trace data. The stored data will be cleared in
1871 btrace_clear, so we always append the new trace. */
1872 btrace_data_append (&btinfo->data, &btrace);
1873 btrace_maint_clear (btinfo);
1874
1875 btrace_clear_history (btinfo);
1876 btrace_compute_ftrace (tp, &btrace);
1877 }
1878
1879 do_cleanups (cleanup);
1880 }
1881
1882 /* See btrace.h. */
1883
1884 void
1885 btrace_clear (struct thread_info *tp)
1886 {
1887 struct btrace_thread_info *btinfo;
1888
1889 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1890 target_pid_to_str (tp->ptid));
1891
1892 /* Make sure btrace frames that may hold a pointer into the branch
1893 trace data are destroyed. */
1894 reinit_frame_cache ();
1895
1896 btinfo = &tp->btrace;
1897 for (auto &bfun : btinfo->functions)
1898 {
1899 VEC_free (btrace_insn_s, bfun->insn);
1900 xfree (bfun);
1901 }
1902
1903 btinfo->functions.clear ();
1904 btinfo->begin = NULL;
1905 btinfo->end = NULL;
1906 btinfo->ngaps = 0;
1907
1908 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1909 btrace_maint_clear (btinfo);
1910 btrace_data_clear (&btinfo->data);
1911 btrace_clear_history (btinfo);
1912 }
1913
1914 /* See btrace.h. */
1915
1916 void
1917 btrace_free_objfile (struct objfile *objfile)
1918 {
1919 struct thread_info *tp;
1920
1921 DEBUG ("free objfile");
1922
1923 ALL_NON_EXITED_THREADS (tp)
1924 btrace_clear (tp);
1925 }
1926
1927 #if defined (HAVE_LIBEXPAT)
1928
1929 /* Check the btrace document version. */
1930
1931 static void
1932 check_xml_btrace_version (struct gdb_xml_parser *parser,
1933 const struct gdb_xml_element *element,
1934 void *user_data, VEC (gdb_xml_value_s) *attributes)
1935 {
1936 const char *version
1937 = (const char *) xml_find_attribute (attributes, "version")->value;
1938
1939 if (strcmp (version, "1.0") != 0)
1940 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1941 }
1942
1943 /* Parse a btrace "block" xml record. */
1944
1945 static void
1946 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1947 const struct gdb_xml_element *element,
1948 void *user_data, VEC (gdb_xml_value_s) *attributes)
1949 {
1950 struct btrace_data *btrace;
1951 struct btrace_block *block;
1952 ULONGEST *begin, *end;
1953
1954 btrace = (struct btrace_data *) user_data;
1955
1956 switch (btrace->format)
1957 {
1958 case BTRACE_FORMAT_BTS:
1959 break;
1960
1961 case BTRACE_FORMAT_NONE:
1962 btrace->format = BTRACE_FORMAT_BTS;
1963 btrace->variant.bts.blocks = NULL;
1964 break;
1965
1966 default:
1967 gdb_xml_error (parser, _("Btrace format error."));
1968 }
1969
1970 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1971 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1972
1973 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1974 block->begin = *begin;
1975 block->end = *end;
1976 }
1977
1978 /* Parse a "raw" xml record. */
1979
1980 static void
1981 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1982 gdb_byte **pdata, size_t *psize)
1983 {
1984 struct cleanup *cleanup;
1985 gdb_byte *data, *bin;
1986 size_t len, size;
1987
1988 len = strlen (body_text);
1989 if (len % 2 != 0)
1990 gdb_xml_error (parser, _("Bad raw data size."));
1991
1992 size = len / 2;
1993
1994 bin = data = (gdb_byte *) xmalloc (size);
1995 cleanup = make_cleanup (xfree, data);
1996
1997 /* We use hex encoding - see common/rsp-low.h. */
1998 while (len > 0)
1999 {
2000 char hi, lo;
2001
2002 hi = *body_text++;
2003 lo = *body_text++;
2004
2005 if (hi == 0 || lo == 0)
2006 gdb_xml_error (parser, _("Bad hex encoding."));
2007
2008 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2009 len -= 2;
2010 }
2011
2012 discard_cleanups (cleanup);
2013
2014 *pdata = data;
2015 *psize = size;
2016 }
2017
2018 /* Parse a btrace pt-config "cpu" xml record. */
2019
2020 static void
2021 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2022 const struct gdb_xml_element *element,
2023 void *user_data,
2024 VEC (gdb_xml_value_s) *attributes)
2025 {
2026 struct btrace_data *btrace;
2027 const char *vendor;
2028 ULONGEST *family, *model, *stepping;
2029
2030 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2031 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2032 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2033 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2034
2035 btrace = (struct btrace_data *) user_data;
2036
2037 if (strcmp (vendor, "GenuineIntel") == 0)
2038 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2039
2040 btrace->variant.pt.config.cpu.family = *family;
2041 btrace->variant.pt.config.cpu.model = *model;
2042 btrace->variant.pt.config.cpu.stepping = *stepping;
2043 }
2044
2045 /* Parse a btrace pt "raw" xml record. */
2046
2047 static void
2048 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2049 const struct gdb_xml_element *element,
2050 void *user_data, const char *body_text)
2051 {
2052 struct btrace_data *btrace;
2053
2054 btrace = (struct btrace_data *) user_data;
2055 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2056 &btrace->variant.pt.size);
2057 }
2058
2059 /* Parse a btrace "pt" xml record. */
2060
2061 static void
2062 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2063 const struct gdb_xml_element *element,
2064 void *user_data, VEC (gdb_xml_value_s) *attributes)
2065 {
2066 struct btrace_data *btrace;
2067
2068 btrace = (struct btrace_data *) user_data;
2069 btrace->format = BTRACE_FORMAT_PT;
2070 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2071 btrace->variant.pt.data = NULL;
2072 btrace->variant.pt.size = 0;
2073 }
2074
2075 static const struct gdb_xml_attribute block_attributes[] = {
2076 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2077 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2078 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2079 };
2080
2081 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2082 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2083 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2084 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2085 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2086 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2087 };
2088
2089 static const struct gdb_xml_element btrace_pt_config_children[] = {
2090 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2091 parse_xml_btrace_pt_config_cpu, NULL },
2092 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2093 };
2094
2095 static const struct gdb_xml_element btrace_pt_children[] = {
2096 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2097 NULL },
2098 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2099 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2100 };
2101
2102 static const struct gdb_xml_attribute btrace_attributes[] = {
2103 { "version", GDB_XML_AF_NONE, NULL, NULL },
2104 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2105 };
2106
2107 static const struct gdb_xml_element btrace_children[] = {
2108 { "block", block_attributes, NULL,
2109 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2110 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2111 NULL },
2112 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2113 };
2114
2115 static const struct gdb_xml_element btrace_elements[] = {
2116 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2117 check_xml_btrace_version, NULL },
2118 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2119 };
2120
2121 #endif /* defined (HAVE_LIBEXPAT) */
2122
2123 /* See btrace.h. */
2124
2125 void
2126 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2127 {
2128 struct cleanup *cleanup;
2129 int errcode;
2130
2131 #if defined (HAVE_LIBEXPAT)
2132
2133 btrace->format = BTRACE_FORMAT_NONE;
2134
2135 cleanup = make_cleanup_btrace_data (btrace);
2136 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2137 buffer, btrace);
2138 if (errcode != 0)
2139 error (_("Error parsing branch trace."));
2140
2141 /* Keep parse results. */
2142 discard_cleanups (cleanup);
2143
2144 #else /* !defined (HAVE_LIBEXPAT) */
2145
2146 error (_("Cannot process branch trace. XML parsing is not supported."));
2147
2148 #endif /* !defined (HAVE_LIBEXPAT) */
2149 }
2150
2151 #if defined (HAVE_LIBEXPAT)
2152
2153 /* Parse a btrace-conf "bts" xml record. */
2154
2155 static void
2156 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2157 const struct gdb_xml_element *element,
2158 void *user_data, VEC (gdb_xml_value_s) *attributes)
2159 {
2160 struct btrace_config *conf;
2161 struct gdb_xml_value *size;
2162
2163 conf = (struct btrace_config *) user_data;
2164 conf->format = BTRACE_FORMAT_BTS;
2165 conf->bts.size = 0;
2166
2167 size = xml_find_attribute (attributes, "size");
2168 if (size != NULL)
2169 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2170 }
2171
2172 /* Parse a btrace-conf "pt" xml record. */
2173
2174 static void
2175 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2176 const struct gdb_xml_element *element,
2177 void *user_data, VEC (gdb_xml_value_s) *attributes)
2178 {
2179 struct btrace_config *conf;
2180 struct gdb_xml_value *size;
2181
2182 conf = (struct btrace_config *) user_data;
2183 conf->format = BTRACE_FORMAT_PT;
2184 conf->pt.size = 0;
2185
2186 size = xml_find_attribute (attributes, "size");
2187 if (size != NULL)
2188 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2189 }
2190
2191 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2192 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2193 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2194 };
2195
2196 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2197 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2198 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2199 };
2200
2201 static const struct gdb_xml_element btrace_conf_children[] = {
2202 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2203 parse_xml_btrace_conf_bts, NULL },
2204 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2205 parse_xml_btrace_conf_pt, NULL },
2206 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2207 };
2208
2209 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2210 { "version", GDB_XML_AF_NONE, NULL, NULL },
2211 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2212 };
2213
2214 static const struct gdb_xml_element btrace_conf_elements[] = {
2215 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2216 GDB_XML_EF_NONE, NULL, NULL },
2217 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2218 };
2219
2220 #endif /* defined (HAVE_LIBEXPAT) */
2221
2222 /* See btrace.h. */
2223
2224 void
2225 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2226 {
2227 int errcode;
2228
2229 #if defined (HAVE_LIBEXPAT)
2230
2231 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2232 btrace_conf_elements, xml, conf);
2233 if (errcode != 0)
2234 error (_("Error parsing branch trace configuration."));
2235
2236 #else /* !defined (HAVE_LIBEXPAT) */
2237
2238 error (_("XML parsing is not supported."));
2239
2240 #endif /* !defined (HAVE_LIBEXPAT) */
2241 }
2242
2243 /* See btrace.h. */
2244
2245 const struct btrace_insn *
2246 btrace_insn_get (const struct btrace_insn_iterator *it)
2247 {
2248 const struct btrace_function *bfun;
2249 unsigned int index, end;
2250
2251 index = it->index;
2252 bfun = it->function;
2253
2254 /* Check if the iterator points to a gap in the trace. */
2255 if (bfun->errcode != 0)
2256 return NULL;
2257
2258 /* The index is within the bounds of this function's instruction vector. */
2259 end = VEC_length (btrace_insn_s, bfun->insn);
2260 gdb_assert (0 < end);
2261 gdb_assert (index < end);
2262
2263 return VEC_index (btrace_insn_s, bfun->insn, index);
2264 }
2265
2266 /* See btrace.h. */
2267
2268 int
2269 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2270 {
2271 return it->function->errcode;
2272 }
2273
2274 /* See btrace.h. */
2275
2276 unsigned int
2277 btrace_insn_number (const struct btrace_insn_iterator *it)
2278 {
2279 return it->function->insn_offset + it->index;
2280 }
2281
2282 /* See btrace.h. */
2283
2284 void
2285 btrace_insn_begin (struct btrace_insn_iterator *it,
2286 const struct btrace_thread_info *btinfo)
2287 {
2288 const struct btrace_function *bfun;
2289
2290 bfun = btinfo->begin;
2291 if (bfun == NULL)
2292 error (_("No trace."));
2293
2294 it->function = bfun;
2295 it->index = 0;
2296 }
2297
2298 /* See btrace.h. */
2299
2300 void
2301 btrace_insn_end (struct btrace_insn_iterator *it,
2302 const struct btrace_thread_info *btinfo)
2303 {
2304 const struct btrace_function *bfun;
2305 unsigned int length;
2306
2307 bfun = btinfo->end;
2308 if (bfun == NULL)
2309 error (_("No trace."));
2310
2311 length = VEC_length (btrace_insn_s, bfun->insn);
2312
2313 /* The last function may either be a gap or it contains the current
2314 instruction, which is one past the end of the execution trace; ignore
2315 it. */
2316 if (length > 0)
2317 length -= 1;
2318
2319 it->function = bfun;
2320 it->index = length;
2321 }
2322
2323 /* See btrace.h. */
2324
2325 unsigned int
2326 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2327 {
2328 const struct btrace_function *bfun;
2329 unsigned int index, steps;
2330
2331 bfun = it->function;
2332 steps = 0;
2333 index = it->index;
2334
2335 while (stride != 0)
2336 {
2337 unsigned int end, space, adv;
2338
2339 end = VEC_length (btrace_insn_s, bfun->insn);
2340
2341 /* An empty function segment represents a gap in the trace. We count
2342 it as one instruction. */
2343 if (end == 0)
2344 {
2345 const struct btrace_function *next;
2346
2347 next = bfun->flow.next;
2348 if (next == NULL)
2349 break;
2350
2351 stride -= 1;
2352 steps += 1;
2353
2354 bfun = next;
2355 index = 0;
2356
2357 continue;
2358 }
2359
2360 gdb_assert (0 < end);
2361 gdb_assert (index < end);
2362
2363 /* Compute the number of instructions remaining in this segment. */
2364 space = end - index;
2365
2366 /* Advance the iterator as far as possible within this segment. */
2367 adv = std::min (space, stride);
2368 stride -= adv;
2369 index += adv;
2370 steps += adv;
2371
2372 /* Move to the next function if we're at the end of this one. */
2373 if (index == end)
2374 {
2375 const struct btrace_function *next;
2376
2377 next = bfun->flow.next;
2378 if (next == NULL)
2379 {
2380 /* We stepped past the last function.
2381
2382 Let's adjust the index to point to the last instruction in
2383 the previous function. */
2384 index -= 1;
2385 steps -= 1;
2386 break;
2387 }
2388
2389 /* We now point to the first instruction in the new function. */
2390 bfun = next;
2391 index = 0;
2392 }
2393
2394 /* We did make progress. */
2395 gdb_assert (adv > 0);
2396 }
2397
2398 /* Update the iterator. */
2399 it->function = bfun;
2400 it->index = index;
2401
2402 return steps;
2403 }
2404
2405 /* See btrace.h. */
2406
2407 unsigned int
2408 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2409 {
2410 const struct btrace_function *bfun;
2411 unsigned int index, steps;
2412
2413 bfun = it->function;
2414 steps = 0;
2415 index = it->index;
2416
2417 while (stride != 0)
2418 {
2419 unsigned int adv;
2420
2421 /* Move to the previous function if we're at the start of this one. */
2422 if (index == 0)
2423 {
2424 const struct btrace_function *prev;
2425
2426 prev = bfun->flow.prev;
2427 if (prev == NULL)
2428 break;
2429
2430 /* We point to one after the last instruction in the new function. */
2431 bfun = prev;
2432 index = VEC_length (btrace_insn_s, bfun->insn);
2433
2434 /* An empty function segment represents a gap in the trace. We count
2435 it as one instruction. */
2436 if (index == 0)
2437 {
2438 stride -= 1;
2439 steps += 1;
2440
2441 continue;
2442 }
2443 }
2444
2445 /* Advance the iterator as far as possible within this segment. */
2446 adv = std::min (index, stride);
2447
2448 stride -= adv;
2449 index -= adv;
2450 steps += adv;
2451
2452 /* We did make progress. */
2453 gdb_assert (adv > 0);
2454 }
2455
2456 /* Update the iterator. */
2457 it->function = bfun;
2458 it->index = index;
2459
2460 return steps;
2461 }
2462
2463 /* See btrace.h. */
2464
2465 int
2466 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2467 const struct btrace_insn_iterator *rhs)
2468 {
2469 unsigned int lnum, rnum;
2470
2471 lnum = btrace_insn_number (lhs);
2472 rnum = btrace_insn_number (rhs);
2473
2474 return (int) (lnum - rnum);
2475 }
2476
2477 /* See btrace.h. */
2478
2479 int
2480 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2481 const struct btrace_thread_info *btinfo,
2482 unsigned int number)
2483 {
2484 const struct btrace_function *bfun;
2485 unsigned int upper, lower;
2486
2487 if (btinfo->functions.empty ())
2488 return 0;
2489
2490 lower = 0;
2491 bfun = btinfo->functions[lower];
2492 if (number < bfun->insn_offset)
2493 return 0;
2494
2495 upper = btinfo->functions.size () - 1;
2496 bfun = btinfo->functions[upper];
2497 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2498 return 0;
2499
2500 /* We assume that there are no holes in the numbering. */
2501 for (;;)
2502 {
2503 const unsigned int average = lower + (upper - lower) / 2;
2504
2505 bfun = btinfo->functions[average];
2506
2507 if (number < bfun->insn_offset)
2508 {
2509 upper = average - 1;
2510 continue;
2511 }
2512
2513 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2514 {
2515 lower = average + 1;
2516 continue;
2517 }
2518
2519 break;
2520 }
2521
2522 it->function = bfun;
2523 it->index = number - bfun->insn_offset;
2524 return 1;
2525 }
2526
2527 /* See btrace.h. */
2528
2529 const struct btrace_function *
2530 btrace_call_get (const struct btrace_call_iterator *it)
2531 {
2532 return it->function;
2533 }
2534
2535 /* See btrace.h. */
2536
2537 unsigned int
2538 btrace_call_number (const struct btrace_call_iterator *it)
2539 {
2540 const struct btrace_thread_info *btinfo;
2541 const struct btrace_function *bfun;
2542 unsigned int insns;
2543
2544 btinfo = it->btinfo;
2545 bfun = it->function;
2546 if (bfun != NULL)
2547 return bfun->number;
2548
2549 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2550 number of the last function. */
2551 bfun = btinfo->end;
2552 insns = VEC_length (btrace_insn_s, bfun->insn);
2553
2554 /* If the function contains only a single instruction (i.e. the current
2555 instruction), it will be skipped and its number is already the number
2556 we seek. */
2557 if (insns == 1)
2558 return bfun->number;
2559
2560 /* Otherwise, return one more than the number of the last function. */
2561 return bfun->number + 1;
2562 }
2563
2564 /* See btrace.h. */
2565
2566 void
2567 btrace_call_begin (struct btrace_call_iterator *it,
2568 const struct btrace_thread_info *btinfo)
2569 {
2570 const struct btrace_function *bfun;
2571
2572 bfun = btinfo->begin;
2573 if (bfun == NULL)
2574 error (_("No trace."));
2575
2576 it->btinfo = btinfo;
2577 it->function = bfun;
2578 }
2579
2580 /* See btrace.h. */
2581
2582 void
2583 btrace_call_end (struct btrace_call_iterator *it,
2584 const struct btrace_thread_info *btinfo)
2585 {
2586 const struct btrace_function *bfun;
2587
2588 bfun = btinfo->end;
2589 if (bfun == NULL)
2590 error (_("No trace."));
2591
2592 it->btinfo = btinfo;
2593 it->function = NULL;
2594 }
2595
2596 /* See btrace.h. */
2597
2598 unsigned int
2599 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2600 {
2601 const struct btrace_function *bfun;
2602 unsigned int steps;
2603
2604 bfun = it->function;
2605 steps = 0;
2606 while (bfun != NULL)
2607 {
2608 const struct btrace_function *next;
2609 unsigned int insns;
2610
2611 next = bfun->flow.next;
2612 if (next == NULL)
2613 {
2614 /* Ignore the last function if it only contains a single
2615 (i.e. the current) instruction. */
2616 insns = VEC_length (btrace_insn_s, bfun->insn);
2617 if (insns == 1)
2618 steps -= 1;
2619 }
2620
2621 if (stride == steps)
2622 break;
2623
2624 bfun = next;
2625 steps += 1;
2626 }
2627
2628 it->function = bfun;
2629 return steps;
2630 }
2631
2632 /* See btrace.h. */
2633
2634 unsigned int
2635 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2636 {
2637 const struct btrace_thread_info *btinfo;
2638 const struct btrace_function *bfun;
2639 unsigned int steps;
2640
2641 bfun = it->function;
2642 steps = 0;
2643
2644 if (bfun == NULL)
2645 {
2646 unsigned int insns;
2647
2648 btinfo = it->btinfo;
2649 bfun = btinfo->end;
2650 if (bfun == NULL)
2651 return 0;
2652
2653 /* Ignore the last function if it only contains a single
2654 (i.e. the current) instruction. */
2655 insns = VEC_length (btrace_insn_s, bfun->insn);
2656 if (insns == 1)
2657 bfun = bfun->flow.prev;
2658
2659 if (bfun == NULL)
2660 return 0;
2661
2662 steps += 1;
2663 }
2664
2665 while (steps < stride)
2666 {
2667 const struct btrace_function *prev;
2668
2669 prev = bfun->flow.prev;
2670 if (prev == NULL)
2671 break;
2672
2673 bfun = prev;
2674 steps += 1;
2675 }
2676
2677 it->function = bfun;
2678 return steps;
2679 }
2680
2681 /* See btrace.h. */
2682
2683 int
2684 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2685 const struct btrace_call_iterator *rhs)
2686 {
2687 unsigned int lnum, rnum;
2688
2689 lnum = btrace_call_number (lhs);
2690 rnum = btrace_call_number (rhs);
2691
2692 return (int) (lnum - rnum);
2693 }
2694
2695 /* See btrace.h. */
2696
2697 int
2698 btrace_find_call_by_number (struct btrace_call_iterator *it,
2699 const struct btrace_thread_info *btinfo,
2700 unsigned int number)
2701 {
2702 const struct btrace_function *bfun;
2703
2704 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2705 {
2706 unsigned int bnum;
2707
2708 bnum = bfun->number;
2709 if (number == bnum)
2710 {
2711 it->btinfo = btinfo;
2712 it->function = bfun;
2713 return 1;
2714 }
2715
2716 /* Functions are ordered and numbered consecutively. We could bail out
2717 earlier. On the other hand, it is very unlikely that we search for
2718 a nonexistent function. */
2719 }
2720
2721 return 0;
2722 }
2723
2724 /* See btrace.h. */
2725
2726 void
2727 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2728 const struct btrace_insn_iterator *begin,
2729 const struct btrace_insn_iterator *end)
2730 {
2731 if (btinfo->insn_history == NULL)
2732 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2733
2734 btinfo->insn_history->begin = *begin;
2735 btinfo->insn_history->end = *end;
2736 }
2737
2738 /* See btrace.h. */
2739
2740 void
2741 btrace_set_call_history (struct btrace_thread_info *btinfo,
2742 const struct btrace_call_iterator *begin,
2743 const struct btrace_call_iterator *end)
2744 {
2745 gdb_assert (begin->btinfo == end->btinfo);
2746
2747 if (btinfo->call_history == NULL)
2748 btinfo->call_history = XCNEW (struct btrace_call_history);
2749
2750 btinfo->call_history->begin = *begin;
2751 btinfo->call_history->end = *end;
2752 }
2753
2754 /* See btrace.h. */
2755
2756 int
2757 btrace_is_replaying (struct thread_info *tp)
2758 {
2759 return tp->btrace.replay != NULL;
2760 }
2761
2762 /* See btrace.h. */
2763
2764 int
2765 btrace_is_empty (struct thread_info *tp)
2766 {
2767 struct btrace_insn_iterator begin, end;
2768 struct btrace_thread_info *btinfo;
2769
2770 btinfo = &tp->btrace;
2771
2772 if (btinfo->begin == NULL)
2773 return 1;
2774
2775 btrace_insn_begin (&begin, btinfo);
2776 btrace_insn_end (&end, btinfo);
2777
2778 return btrace_insn_cmp (&begin, &end) == 0;
2779 }
2780
2781 /* Forward the cleanup request. */
2782
2783 static void
2784 do_btrace_data_cleanup (void *arg)
2785 {
2786 btrace_data_fini ((struct btrace_data *) arg);
2787 }
2788
2789 /* See btrace.h. */
2790
2791 struct cleanup *
2792 make_cleanup_btrace_data (struct btrace_data *data)
2793 {
2794 return make_cleanup (do_btrace_data_cleanup, data);
2795 }
2796
2797 #if defined (HAVE_LIBIPT)
2798
2799 /* Print a single packet. */
2800
2801 static void
2802 pt_print_packet (const struct pt_packet *packet)
2803 {
2804 switch (packet->type)
2805 {
2806 default:
2807 printf_unfiltered (("[??: %x]"), packet->type);
2808 break;
2809
2810 case ppt_psb:
2811 printf_unfiltered (("psb"));
2812 break;
2813
2814 case ppt_psbend:
2815 printf_unfiltered (("psbend"));
2816 break;
2817
2818 case ppt_pad:
2819 printf_unfiltered (("pad"));
2820 break;
2821
2822 case ppt_tip:
2823 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2824 packet->payload.ip.ipc,
2825 packet->payload.ip.ip);
2826 break;
2827
2828 case ppt_tip_pge:
2829 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2830 packet->payload.ip.ipc,
2831 packet->payload.ip.ip);
2832 break;
2833
2834 case ppt_tip_pgd:
2835 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2836 packet->payload.ip.ipc,
2837 packet->payload.ip.ip);
2838 break;
2839
2840 case ppt_fup:
2841 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2842 packet->payload.ip.ipc,
2843 packet->payload.ip.ip);
2844 break;
2845
2846 case ppt_tnt_8:
2847 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2848 packet->payload.tnt.bit_size,
2849 packet->payload.tnt.payload);
2850 break;
2851
2852 case ppt_tnt_64:
2853 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2854 packet->payload.tnt.bit_size,
2855 packet->payload.tnt.payload);
2856 break;
2857
2858 case ppt_pip:
2859 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2860 packet->payload.pip.nr ? (" nr") : (""));
2861 break;
2862
2863 case ppt_tsc:
2864 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2865 break;
2866
2867 case ppt_cbr:
2868 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2869 break;
2870
2871 case ppt_mode:
2872 switch (packet->payload.mode.leaf)
2873 {
2874 default:
2875 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2876 break;
2877
2878 case pt_mol_exec:
2879 printf_unfiltered (("mode.exec%s%s"),
2880 packet->payload.mode.bits.exec.csl
2881 ? (" cs.l") : (""),
2882 packet->payload.mode.bits.exec.csd
2883 ? (" cs.d") : (""));
2884 break;
2885
2886 case pt_mol_tsx:
2887 printf_unfiltered (("mode.tsx%s%s"),
2888 packet->payload.mode.bits.tsx.intx
2889 ? (" intx") : (""),
2890 packet->payload.mode.bits.tsx.abrt
2891 ? (" abrt") : (""));
2892 break;
2893 }
2894 break;
2895
2896 case ppt_ovf:
2897 printf_unfiltered (("ovf"));
2898 break;
2899
2900 case ppt_stop:
2901 printf_unfiltered (("stop"));
2902 break;
2903
2904 case ppt_vmcs:
2905 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2906 break;
2907
2908 case ppt_tma:
2909 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2910 packet->payload.tma.fc);
2911 break;
2912
2913 case ppt_mtc:
2914 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2915 break;
2916
2917 case ppt_cyc:
2918 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2919 break;
2920
2921 case ppt_mnt:
2922 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2923 break;
2924 }
2925 }
2926
2927 /* Decode packets into MAINT using DECODER. */
2928
2929 static void
2930 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2931 struct pt_packet_decoder *decoder)
2932 {
2933 int errcode;
2934
2935 for (;;)
2936 {
2937 struct btrace_pt_packet packet;
2938
2939 errcode = pt_pkt_sync_forward (decoder);
2940 if (errcode < 0)
2941 break;
2942
2943 for (;;)
2944 {
2945 pt_pkt_get_offset (decoder, &packet.offset);
2946
2947 errcode = pt_pkt_next (decoder, &packet.packet,
2948 sizeof(packet.packet));
2949 if (errcode < 0)
2950 break;
2951
2952 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2953 {
2954 packet.errcode = pt_errcode (errcode);
2955 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2956 &packet);
2957 }
2958 }
2959
2960 if (errcode == -pte_eos)
2961 break;
2962
2963 packet.errcode = pt_errcode (errcode);
2964 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2965 &packet);
2966
2967 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2968 packet.offset, pt_errstr (packet.errcode));
2969 }
2970
2971 if (errcode != -pte_eos)
2972 warning (_("Failed to synchronize onto the Intel Processor Trace "
2973 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2974 }
2975
2976 /* Update the packet history in BTINFO. */
2977
2978 static void
2979 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2980 {
2981 volatile struct gdb_exception except;
2982 struct pt_packet_decoder *decoder;
2983 struct btrace_data_pt *pt;
2984 struct pt_config config;
2985 int errcode;
2986
2987 pt = &btinfo->data.variant.pt;
2988
2989 /* Nothing to do if there is no trace. */
2990 if (pt->size == 0)
2991 return;
2992
2993 memset (&config, 0, sizeof(config));
2994
2995 config.size = sizeof (config);
2996 config.begin = pt->data;
2997 config.end = pt->data + pt->size;
2998
2999 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
3000 config.cpu.family = pt->config.cpu.family;
3001 config.cpu.model = pt->config.cpu.model;
3002 config.cpu.stepping = pt->config.cpu.stepping;
3003
3004 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3005 if (errcode < 0)
3006 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
3007 pt_errstr (pt_errcode (errcode)));
3008
3009 decoder = pt_pkt_alloc_decoder (&config);
3010 if (decoder == NULL)
3011 error (_("Failed to allocate the Intel Processor Trace decoder."));
3012
3013 TRY
3014 {
3015 btrace_maint_decode_pt (&btinfo->maint, decoder);
3016 }
3017 CATCH (except, RETURN_MASK_ALL)
3018 {
3019 pt_pkt_free_decoder (decoder);
3020
3021 if (except.reason < 0)
3022 throw_exception (except);
3023 }
3024 END_CATCH
3025
3026 pt_pkt_free_decoder (decoder);
3027 }
3028
3029 #endif /* !defined (HAVE_LIBIPT) */
3030
3031 /* Update the packet maintenance information for BTINFO and store the
3032 low and high bounds into BEGIN and END, respectively.
3033 Store the current iterator state into FROM and TO. */
3034
3035 static void
3036 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3037 unsigned int *begin, unsigned int *end,
3038 unsigned int *from, unsigned int *to)
3039 {
3040 switch (btinfo->data.format)
3041 {
3042 default:
3043 *begin = 0;
3044 *end = 0;
3045 *from = 0;
3046 *to = 0;
3047 break;
3048
3049 case BTRACE_FORMAT_BTS:
3050 /* Nothing to do - we operate directly on BTINFO->DATA. */
3051 *begin = 0;
3052 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3053 *from = btinfo->maint.variant.bts.packet_history.begin;
3054 *to = btinfo->maint.variant.bts.packet_history.end;
3055 break;
3056
3057 #if defined (HAVE_LIBIPT)
3058 case BTRACE_FORMAT_PT:
3059 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3060 btrace_maint_update_pt_packets (btinfo);
3061
3062 *begin = 0;
3063 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3064 *from = btinfo->maint.variant.pt.packet_history.begin;
3065 *to = btinfo->maint.variant.pt.packet_history.end;
3066 break;
3067 #endif /* defined (HAVE_LIBIPT) */
3068 }
3069 }
3070
3071 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3072 update the current iterator position. */
3073
3074 static void
3075 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3076 unsigned int begin, unsigned int end)
3077 {
3078 switch (btinfo->data.format)
3079 {
3080 default:
3081 break;
3082
3083 case BTRACE_FORMAT_BTS:
3084 {
3085 VEC (btrace_block_s) *blocks;
3086 unsigned int blk;
3087
3088 blocks = btinfo->data.variant.bts.blocks;
3089 for (blk = begin; blk < end; ++blk)
3090 {
3091 const btrace_block_s *block;
3092
3093 block = VEC_index (btrace_block_s, blocks, blk);
3094
3095 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3096 core_addr_to_string_nz (block->begin),
3097 core_addr_to_string_nz (block->end));
3098 }
3099
3100 btinfo->maint.variant.bts.packet_history.begin = begin;
3101 btinfo->maint.variant.bts.packet_history.end = end;
3102 }
3103 break;
3104
3105 #if defined (HAVE_LIBIPT)
3106 case BTRACE_FORMAT_PT:
3107 {
3108 VEC (btrace_pt_packet_s) *packets;
3109 unsigned int pkt;
3110
3111 packets = btinfo->maint.variant.pt.packets;
3112 for (pkt = begin; pkt < end; ++pkt)
3113 {
3114 const struct btrace_pt_packet *packet;
3115
3116 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3117
3118 printf_unfiltered ("%u\t", pkt);
3119 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3120
3121 if (packet->errcode == pte_ok)
3122 pt_print_packet (&packet->packet);
3123 else
3124 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3125
3126 printf_unfiltered ("\n");
3127 }
3128
3129 btinfo->maint.variant.pt.packet_history.begin = begin;
3130 btinfo->maint.variant.pt.packet_history.end = end;
3131 }
3132 break;
3133 #endif /* defined (HAVE_LIBIPT) */
3134 }
3135 }
3136
3137 /* Read a number from an argument string. */
3138
3139 static unsigned int
3140 get_uint (char **arg)
3141 {
3142 char *begin, *end, *pos;
3143 unsigned long number;
3144
3145 begin = *arg;
3146 pos = skip_spaces (begin);
3147
3148 if (!isdigit (*pos))
3149 error (_("Expected positive number, got: %s."), pos);
3150
3151 number = strtoul (pos, &end, 10);
3152 if (number > UINT_MAX)
3153 error (_("Number too big."));
3154
3155 *arg += (end - begin);
3156
3157 return (unsigned int) number;
3158 }
3159
3160 /* Read a context size from an argument string. */
3161
3162 static int
3163 get_context_size (char **arg)
3164 {
3165 char *pos;
3166 int number;
3167
3168 pos = skip_spaces (*arg);
3169
3170 if (!isdigit (*pos))
3171 error (_("Expected positive number, got: %s."), pos);
3172
3173 return strtol (pos, arg, 10);
3174 }
3175
3176 /* Complain about junk at the end of an argument string. */
3177
3178 static void
3179 no_chunk (char *arg)
3180 {
3181 if (*arg != 0)
3182 error (_("Junk after argument: %s."), arg);
3183 }
3184
3185 /* The "maintenance btrace packet-history" command. */
3186
3187 static void
3188 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3189 {
3190 struct btrace_thread_info *btinfo;
3191 struct thread_info *tp;
3192 unsigned int size, begin, end, from, to;
3193
3194 tp = find_thread_ptid (inferior_ptid);
3195 if (tp == NULL)
3196 error (_("No thread."));
3197
3198 size = 10;
3199 btinfo = &tp->btrace;
3200
3201 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3202 if (begin == end)
3203 {
3204 printf_unfiltered (_("No trace.\n"));
3205 return;
3206 }
3207
3208 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3209 {
3210 from = to;
3211
3212 if (end - from < size)
3213 size = end - from;
3214 to = from + size;
3215 }
3216 else if (strcmp (arg, "-") == 0)
3217 {
3218 to = from;
3219
3220 if (to - begin < size)
3221 size = to - begin;
3222 from = to - size;
3223 }
3224 else
3225 {
3226 from = get_uint (&arg);
3227 if (end <= from)
3228 error (_("'%u' is out of range."), from);
3229
3230 arg = skip_spaces (arg);
3231 if (*arg == ',')
3232 {
3233 arg = skip_spaces (++arg);
3234
3235 if (*arg == '+')
3236 {
3237 arg += 1;
3238 size = get_context_size (&arg);
3239
3240 no_chunk (arg);
3241
3242 if (end - from < size)
3243 size = end - from;
3244 to = from + size;
3245 }
3246 else if (*arg == '-')
3247 {
3248 arg += 1;
3249 size = get_context_size (&arg);
3250
3251 no_chunk (arg);
3252
3253 /* Include the packet given as first argument. */
3254 from += 1;
3255 to = from;
3256
3257 if (to - begin < size)
3258 size = to - begin;
3259 from = to - size;
3260 }
3261 else
3262 {
3263 to = get_uint (&arg);
3264
3265 /* Include the packet at the second argument and silently
3266 truncate the range. */
3267 if (to < end)
3268 to += 1;
3269 else
3270 to = end;
3271
3272 no_chunk (arg);
3273 }
3274 }
3275 else
3276 {
3277 no_chunk (arg);
3278
3279 if (end - from < size)
3280 size = end - from;
3281 to = from + size;
3282 }
3283
3284 dont_repeat ();
3285 }
3286
3287 btrace_maint_print_packets (btinfo, from, to);
3288 }
3289
3290 /* The "maintenance btrace clear-packet-history" command. */
3291
3292 static void
3293 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3294 {
3295 struct btrace_thread_info *btinfo;
3296 struct thread_info *tp;
3297
3298 if (args != NULL && *args != 0)
3299 error (_("Invalid argument."));
3300
3301 tp = find_thread_ptid (inferior_ptid);
3302 if (tp == NULL)
3303 error (_("No thread."));
3304
3305 btinfo = &tp->btrace;
3306
3307 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3308 btrace_maint_clear (btinfo);
3309 btrace_data_clear (&btinfo->data);
3310 }
3311
3312 /* The "maintenance btrace clear" command. */
3313
3314 static void
3315 maint_btrace_clear_cmd (char *args, int from_tty)
3316 {
3317 struct btrace_thread_info *btinfo;
3318 struct thread_info *tp;
3319
3320 if (args != NULL && *args != 0)
3321 error (_("Invalid argument."));
3322
3323 tp = find_thread_ptid (inferior_ptid);
3324 if (tp == NULL)
3325 error (_("No thread."));
3326
3327 btrace_clear (tp);
3328 }
3329
3330 /* The "maintenance btrace" command. */
3331
3332 static void
3333 maint_btrace_cmd (char *args, int from_tty)
3334 {
3335 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3336 gdb_stdout);
3337 }
3338
3339 /* The "maintenance set btrace" command. */
3340
3341 static void
3342 maint_btrace_set_cmd (char *args, int from_tty)
3343 {
3344 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3345 gdb_stdout);
3346 }
3347
3348 /* The "maintenance show btrace" command. */
3349
3350 static void
3351 maint_btrace_show_cmd (char *args, int from_tty)
3352 {
3353 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3354 all_commands, gdb_stdout);
3355 }
3356
3357 /* The "maintenance set btrace pt" command. */
3358
3359 static void
3360 maint_btrace_pt_set_cmd (char *args, int from_tty)
3361 {
3362 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3363 all_commands, gdb_stdout);
3364 }
3365
3366 /* The "maintenance show btrace pt" command. */
3367
3368 static void
3369 maint_btrace_pt_show_cmd (char *args, int from_tty)
3370 {
3371 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3372 all_commands, gdb_stdout);
3373 }
3374
3375 /* The "maintenance info btrace" command. */
3376
3377 static void
3378 maint_info_btrace_cmd (char *args, int from_tty)
3379 {
3380 struct btrace_thread_info *btinfo;
3381 struct thread_info *tp;
3382 const struct btrace_config *conf;
3383
3384 if (args != NULL && *args != 0)
3385 error (_("Invalid argument."));
3386
3387 tp = find_thread_ptid (inferior_ptid);
3388 if (tp == NULL)
3389 error (_("No thread."));
3390
3391 btinfo = &tp->btrace;
3392
3393 conf = btrace_conf (btinfo);
3394 if (conf == NULL)
3395 error (_("No btrace configuration."));
3396
3397 printf_unfiltered (_("Format: %s.\n"),
3398 btrace_format_string (conf->format));
3399
3400 switch (conf->format)
3401 {
3402 default:
3403 break;
3404
3405 case BTRACE_FORMAT_BTS:
3406 printf_unfiltered (_("Number of packets: %u.\n"),
3407 VEC_length (btrace_block_s,
3408 btinfo->data.variant.bts.blocks));
3409 break;
3410
3411 #if defined (HAVE_LIBIPT)
3412 case BTRACE_FORMAT_PT:
3413 {
3414 struct pt_version version;
3415
3416 version = pt_library_version ();
3417 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3418 version.minor, version.build,
3419 version.ext != NULL ? version.ext : "");
3420
3421 btrace_maint_update_pt_packets (btinfo);
3422 printf_unfiltered (_("Number of packets: %u.\n"),
3423 VEC_length (btrace_pt_packet_s,
3424 btinfo->maint.variant.pt.packets));
3425 }
3426 break;
3427 #endif /* defined (HAVE_LIBIPT) */
3428 }
3429 }
3430
3431 /* The "maint show btrace pt skip-pad" show value function. */
3432
3433 static void
3434 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3435 struct cmd_list_element *c,
3436 const char *value)
3437 {
3438 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3439 }
3440
3441
3442 /* Initialize btrace maintenance commands. */
3443
3444 void _initialize_btrace (void);
3445 void
3446 _initialize_btrace (void)
3447 {
3448 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3449 _("Info about branch tracing data."), &maintenanceinfolist);
3450
3451 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3452 _("Branch tracing maintenance commands."),
3453 &maint_btrace_cmdlist, "maintenance btrace ",
3454 0, &maintenancelist);
3455
3456 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3457 Set branch tracing specific variables."),
3458 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3459 0, &maintenance_set_cmdlist);
3460
3461 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3462 Set Intel Processor Trace specific variables."),
3463 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3464 0, &maint_btrace_set_cmdlist);
3465
3466 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3467 Show branch tracing specific variables."),
3468 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3469 0, &maintenance_show_cmdlist);
3470
3471 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3472 Show Intel Processor Trace specific variables."),
3473 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3474 0, &maint_btrace_show_cmdlist);
3475
3476 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3477 &maint_btrace_pt_skip_pad, _("\
3478 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3479 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3480 When enabled, PAD packets are ignored in the btrace packet history."),
3481 NULL, show_maint_btrace_pt_skip_pad,
3482 &maint_btrace_pt_set_cmdlist,
3483 &maint_btrace_pt_show_cmdlist);
3484
3485 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3486 _("Print the raw branch tracing data.\n\
3487 With no argument, print ten more packets after the previous ten-line print.\n\
3488 With '-' as argument print ten packets before a previous ten-line print.\n\
3489 One argument specifies the starting packet of a ten-line print.\n\
3490 Two arguments with comma between specify starting and ending packets to \
3491 print.\n\
3492 Preceded with '+'/'-' the second argument specifies the distance from the \
3493 first.\n"),
3494 &maint_btrace_cmdlist);
3495
3496 add_cmd ("clear-packet-history", class_maintenance,
3497 maint_btrace_clear_packet_history_cmd,
3498 _("Clears the branch tracing packet history.\n\
3499 Discards the raw branch tracing data but not the execution history data.\n\
3500 "),
3501 &maint_btrace_cmdlist);
3502
3503 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3504 _("Clears the branch tracing data.\n\
3505 Discards the raw branch tracing data and the execution history data.\n\
3506 The next 'record' command will fetch the branch tracing data anew.\n\
3507 "),
3508 &maint_btrace_cmdlist);
3509
3510 }
This page took 0.10766 seconds and 4 git commands to generate.