ddf6692d18055b525ab68e45afebaf852ef7d37b
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
88 return MSYMBOL_PRINT_NAME (msym);
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return non-zero if BFUN does not match MFUN and FUN,
145 return zero otherwise. */
146
147 static int
148 ftrace_function_switched (const struct btrace_function *bfun,
149 const struct minimal_symbol *mfun,
150 const struct symbol *fun)
151 {
152 struct minimal_symbol *msym;
153 struct symbol *sym;
154
155 msym = bfun->msym;
156 sym = bfun->sym;
157
158 /* If the minimal symbol changed, we certainly switched functions. */
159 if (mfun != NULL && msym != NULL
160 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
161 return 1;
162
163 /* If the symbol changed, we certainly switched functions. */
164 if (fun != NULL && sym != NULL)
165 {
166 const char *bfname, *fname;
167
168 /* Check the function name. */
169 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
170 return 1;
171
172 /* Check the location of those functions, as well. */
173 bfname = symtab_to_fullname (symbol_symtab (sym));
174 fname = symtab_to_fullname (symbol_symtab (fun));
175 if (filename_cmp (fname, bfname) != 0)
176 return 1;
177 }
178
179 /* If we lost symbol information, we switched functions. */
180 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
181 return 1;
182
183 /* If we gained symbol information, we switched functions. */
184 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
185 return 1;
186
187 return 0;
188 }
189
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
193
194 static struct btrace_function *
195 ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
197 struct symbol *fun)
198 {
199 struct btrace_function *bfun;
200
201 bfun = XCNEW (struct btrace_function);
202
203 bfun->msym = mfun;
204 bfun->sym = fun;
205 bfun->flow.prev = prev;
206
207 if (prev == NULL)
208 {
209 /* Start counting at one. */
210 bfun->number = 1;
211 bfun->insn_offset = 1;
212 }
213 else
214 {
215 gdb_assert (prev->flow.next == NULL);
216 prev->flow.next = bfun;
217
218 bfun->number = prev->number + 1;
219 bfun->insn_offset = (prev->insn_offset
220 + VEC_length (btrace_insn_s, prev->insn));
221 bfun->level = prev->level;
222 }
223
224 return bfun;
225 }
226
227 /* Update the UP field of a function segment. */
228
229 static void
230 ftrace_update_caller (struct btrace_function *bfun,
231 struct btrace_function *caller,
232 enum btrace_function_flag flags)
233 {
234 if (bfun->up != NULL)
235 ftrace_debug (bfun, "updating caller");
236
237 bfun->up = caller;
238 bfun->flags = flags;
239
240 ftrace_debug (bfun, "set caller");
241 ftrace_debug (caller, "..to");
242 }
243
244 /* Fix up the caller for all segments of a function. */
245
246 static void
247 ftrace_fixup_caller (struct btrace_function *bfun,
248 struct btrace_function *caller,
249 enum btrace_function_flag flags)
250 {
251 struct btrace_function *prev, *next;
252
253 ftrace_update_caller (bfun, caller, flags);
254
255 /* Update all function segments belonging to the same function. */
256 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
257 ftrace_update_caller (prev, caller, flags);
258
259 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
260 ftrace_update_caller (next, caller, flags);
261 }
262
263 /* Add a new function segment for a call.
264 CALLER is the chronologically preceding function segment.
265 MFUN and FUN are the symbol information we have for this function. */
266
267 static struct btrace_function *
268 ftrace_new_call (struct btrace_function *caller,
269 struct minimal_symbol *mfun,
270 struct symbol *fun)
271 {
272 struct btrace_function *bfun;
273
274 bfun = ftrace_new_function (caller, mfun, fun);
275 bfun->up = caller;
276 bfun->level += 1;
277
278 ftrace_debug (bfun, "new call");
279
280 return bfun;
281 }
282
283 /* Add a new function segment for a tail call.
284 CALLER is the chronologically preceding function segment.
285 MFUN and FUN are the symbol information we have for this function. */
286
287 static struct btrace_function *
288 ftrace_new_tailcall (struct btrace_function *caller,
289 struct minimal_symbol *mfun,
290 struct symbol *fun)
291 {
292 struct btrace_function *bfun;
293
294 bfun = ftrace_new_function (caller, mfun, fun);
295 bfun->up = caller;
296 bfun->level += 1;
297 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
298
299 ftrace_debug (bfun, "new tail call");
300
301 return bfun;
302 }
303
304 /* Return the caller of BFUN or NULL if there is none. This function skips
305 tail calls in the call chain. */
306 static struct btrace_function *
307 ftrace_get_caller (struct btrace_function *bfun)
308 {
309 for (; bfun != NULL; bfun = bfun->up)
310 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
311 return bfun->up;
312
313 return NULL;
314 }
315
316 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
317 symbol information. */
318
319 static struct btrace_function *
320 ftrace_find_caller (struct btrace_function *bfun,
321 struct minimal_symbol *mfun,
322 struct symbol *fun)
323 {
324 for (; bfun != NULL; bfun = bfun->up)
325 {
326 /* Skip functions with incompatible symbol information. */
327 if (ftrace_function_switched (bfun, mfun, fun))
328 continue;
329
330 /* This is the function segment we're looking for. */
331 break;
332 }
333
334 return bfun;
335 }
336
337 /* Find the innermost caller in the back trace of BFUN, skipping all
338 function segments that do not end with a call instruction (e.g.
339 tail calls ending with a jump). */
340
341 static struct btrace_function *
342 ftrace_find_call (struct btrace_function *bfun)
343 {
344 for (; bfun != NULL; bfun = bfun->up)
345 {
346 struct btrace_insn *last;
347
348 /* Skip gaps. */
349 if (bfun->errcode != 0)
350 continue;
351
352 last = VEC_last (btrace_insn_s, bfun->insn);
353
354 if (last->iclass == BTRACE_INSN_CALL)
355 break;
356 }
357
358 return bfun;
359 }
360
361 /* Add a continuation segment for a function into which we return.
362 PREV is the chronologically preceding function segment.
363 MFUN and FUN are the symbol information we have for this function. */
364
365 static struct btrace_function *
366 ftrace_new_return (struct btrace_function *prev,
367 struct minimal_symbol *mfun,
368 struct symbol *fun)
369 {
370 struct btrace_function *bfun, *caller;
371
372 bfun = ftrace_new_function (prev, mfun, fun);
373
374 /* It is important to start at PREV's caller. Otherwise, we might find
375 PREV itself, if PREV is a recursive function. */
376 caller = ftrace_find_caller (prev->up, mfun, fun);
377 if (caller != NULL)
378 {
379 /* The caller of PREV is the preceding btrace function segment in this
380 function instance. */
381 gdb_assert (caller->segment.next == NULL);
382
383 caller->segment.next = bfun;
384 bfun->segment.prev = caller;
385
386 /* Maintain the function level. */
387 bfun->level = caller->level;
388
389 /* Maintain the call stack. */
390 bfun->up = caller->up;
391 bfun->flags = caller->flags;
392
393 ftrace_debug (bfun, "new return");
394 }
395 else
396 {
397 /* We did not find a caller. This could mean that something went
398 wrong or that the call is simply not included in the trace. */
399
400 /* Let's search for some actual call. */
401 caller = ftrace_find_call (prev->up);
402 if (caller == NULL)
403 {
404 /* There is no call in PREV's back trace. We assume that the
405 branch trace did not include it. */
406
407 /* Let's find the topmost function and add a new caller for it.
408 This should handle a series of initial tail calls. */
409 while (prev->up != NULL)
410 prev = prev->up;
411
412 bfun->level = prev->level - 1;
413
414 /* Fix up the call stack for PREV. */
415 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
416
417 ftrace_debug (bfun, "new return - no caller");
418 }
419 else
420 {
421 /* There is a call in PREV's back trace to which we should have
422 returned but didn't. Let's start a new, separate back trace
423 from PREV's level. */
424 bfun->level = prev->level - 1;
425
426 /* We fix up the back trace for PREV but leave other function segments
427 on the same level as they are.
428 This should handle things like schedule () correctly where we're
429 switching contexts. */
430 prev->up = bfun;
431 prev->flags = BFUN_UP_LINKS_TO_RET;
432
433 ftrace_debug (bfun, "new return - unknown caller");
434 }
435 }
436
437 return bfun;
438 }
439
440 /* Add a new function segment for a function switch.
441 PREV is the chronologically preceding function segment.
442 MFUN and FUN are the symbol information we have for this function. */
443
444 static struct btrace_function *
445 ftrace_new_switch (struct btrace_function *prev,
446 struct minimal_symbol *mfun,
447 struct symbol *fun)
448 {
449 struct btrace_function *bfun;
450
451 /* This is an unexplained function switch. We can't really be sure about the
452 call stack, yet the best I can think of right now is to preserve it. */
453 bfun = ftrace_new_function (prev, mfun, fun);
454 bfun->up = prev->up;
455 bfun->flags = prev->flags;
456
457 ftrace_debug (bfun, "new switch");
458
459 return bfun;
460 }
461
462 /* Add a new function segment for a gap in the trace due to a decode error.
463 PREV is the chronologically preceding function segment.
464 ERRCODE is the format-specific error code. */
465
466 static struct btrace_function *
467 ftrace_new_gap (struct btrace_function *prev, int errcode)
468 {
469 struct btrace_function *bfun;
470
471 /* We hijack prev if it was empty. */
472 if (prev != NULL && prev->errcode == 0
473 && VEC_empty (btrace_insn_s, prev->insn))
474 bfun = prev;
475 else
476 bfun = ftrace_new_function (prev, NULL, NULL);
477
478 bfun->errcode = errcode;
479
480 ftrace_debug (bfun, "new gap");
481
482 return bfun;
483 }
484
485 /* Update BFUN with respect to the instruction at PC. This may create new
486 function segments.
487 Return the chronologically latest function segment, never NULL. */
488
489 static struct btrace_function *
490 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
491 {
492 struct bound_minimal_symbol bmfun;
493 struct minimal_symbol *mfun;
494 struct symbol *fun;
495 struct btrace_insn *last;
496
497 /* Try to determine the function we're in. We use both types of symbols
498 to avoid surprises when we sometimes get a full symbol and sometimes
499 only a minimal symbol. */
500 fun = find_pc_function (pc);
501 bmfun = lookup_minimal_symbol_by_pc (pc);
502 mfun = bmfun.minsym;
503
504 if (fun == NULL && mfun == NULL)
505 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
506
507 /* If we didn't have a function or if we had a gap before, we create one. */
508 if (bfun == NULL || bfun->errcode != 0)
509 return ftrace_new_function (bfun, mfun, fun);
510
511 /* Check the last instruction, if we have one.
512 We do this check first, since it allows us to fill in the call stack
513 links in addition to the normal flow links. */
514 last = NULL;
515 if (!VEC_empty (btrace_insn_s, bfun->insn))
516 last = VEC_last (btrace_insn_s, bfun->insn);
517
518 if (last != NULL)
519 {
520 switch (last->iclass)
521 {
522 case BTRACE_INSN_RETURN:
523 {
524 const char *fname;
525
526 /* On some systems, _dl_runtime_resolve returns to the resolved
527 function instead of jumping to it. From our perspective,
528 however, this is a tailcall.
529 If we treated it as return, we wouldn't be able to find the
530 resolved function in our stack back trace. Hence, we would
531 lose the current stack back trace and start anew with an empty
532 back trace. When the resolved function returns, we would then
533 create a stack back trace with the same function names but
534 different frame id's. This will confuse stepping. */
535 fname = ftrace_print_function_name (bfun);
536 if (strcmp (fname, "_dl_runtime_resolve") == 0)
537 return ftrace_new_tailcall (bfun, mfun, fun);
538
539 return ftrace_new_return (bfun, mfun, fun);
540 }
541
542 case BTRACE_INSN_CALL:
543 /* Ignore calls to the next instruction. They are used for PIC. */
544 if (last->pc + last->size == pc)
545 break;
546
547 return ftrace_new_call (bfun, mfun, fun);
548
549 case BTRACE_INSN_JUMP:
550 {
551 CORE_ADDR start;
552
553 start = get_pc_function_start (pc);
554
555 /* A jump to the start of a function is (typically) a tail call. */
556 if (start == pc)
557 return ftrace_new_tailcall (bfun, mfun, fun);
558
559 /* If we can't determine the function for PC, we treat a jump at
560 the end of the block as tail call if we're switching functions
561 and as an intra-function branch if we don't. */
562 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
563 return ftrace_new_tailcall (bfun, mfun, fun);
564
565 break;
566 }
567 }
568 }
569
570 /* Check if we're switching functions for some other reason. */
571 if (ftrace_function_switched (bfun, mfun, fun))
572 {
573 DEBUG_FTRACE ("switching from %s in %s at %s",
574 ftrace_print_insn_addr (last),
575 ftrace_print_function_name (bfun),
576 ftrace_print_filename (bfun));
577
578 return ftrace_new_switch (bfun, mfun, fun);
579 }
580
581 return bfun;
582 }
583
584 /* Add the instruction at PC to BFUN's instructions. */
585
586 static void
587 ftrace_update_insns (struct btrace_function *bfun,
588 const struct btrace_insn *insn)
589 {
590 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
591
592 if (record_debug > 1)
593 ftrace_debug (bfun, "update insn");
594 }
595
596 /* Classify the instruction at PC. */
597
598 static enum btrace_insn_class
599 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
600 {
601 enum btrace_insn_class iclass;
602
603 iclass = BTRACE_INSN_OTHER;
604 TRY
605 {
606 if (gdbarch_insn_is_call (gdbarch, pc))
607 iclass = BTRACE_INSN_CALL;
608 else if (gdbarch_insn_is_ret (gdbarch, pc))
609 iclass = BTRACE_INSN_RETURN;
610 else if (gdbarch_insn_is_jump (gdbarch, pc))
611 iclass = BTRACE_INSN_JUMP;
612 }
613 CATCH (error, RETURN_MASK_ERROR)
614 {
615 }
616 END_CATCH
617
618 return iclass;
619 }
620
621 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
622 number of matching function segments or zero if the back traces do not
623 match. */
624
625 static int
626 ftrace_match_backtrace (struct btrace_function *lhs,
627 struct btrace_function *rhs)
628 {
629 int matches;
630
631 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
632 {
633 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
634 return 0;
635
636 lhs = ftrace_get_caller (lhs);
637 rhs = ftrace_get_caller (rhs);
638 }
639
640 return matches;
641 }
642
643 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
644
645 static void
646 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
647 {
648 if (adjustment == 0)
649 return;
650
651 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
652 ftrace_debug (bfun, "..bfun");
653
654 for (; bfun != NULL; bfun = bfun->flow.next)
655 bfun->level += adjustment;
656 }
657
658 /* Recompute the global level offset. Traverse the function trace and compute
659 the global level offset as the negative of the minimal function level. */
660
661 static void
662 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
663 {
664 struct btrace_function *bfun, *end;
665 int level;
666
667 if (btinfo == NULL)
668 return;
669
670 bfun = btinfo->begin;
671 if (bfun == NULL)
672 return;
673
674 /* The last function segment contains the current instruction, which is not
675 really part of the trace. If it contains just this one instruction, we
676 stop when we reach it; otherwise, we let the below loop run to the end. */
677 end = btinfo->end;
678 if (VEC_length (btrace_insn_s, end->insn) > 1)
679 end = NULL;
680
681 level = INT_MAX;
682 for (; bfun != end; bfun = bfun->flow.next)
683 level = std::min (level, bfun->level);
684
685 DEBUG_FTRACE ("setting global level offset: %d", -level);
686 btinfo->level = -level;
687 }
688
689 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
690 ftrace_connect_backtrace. */
691
692 static void
693 ftrace_connect_bfun (struct btrace_function *prev,
694 struct btrace_function *next)
695 {
696 DEBUG_FTRACE ("connecting...");
697 ftrace_debug (prev, "..prev");
698 ftrace_debug (next, "..next");
699
700 /* The function segments are not yet connected. */
701 gdb_assert (prev->segment.next == NULL);
702 gdb_assert (next->segment.prev == NULL);
703
704 prev->segment.next = next;
705 next->segment.prev = prev;
706
707 /* We may have moved NEXT to a different function level. */
708 ftrace_fixup_level (next, prev->level - next->level);
709
710 /* If we run out of back trace for one, let's use the other's. */
711 if (prev->up == NULL)
712 {
713 if (next->up != NULL)
714 {
715 DEBUG_FTRACE ("using next's callers");
716 ftrace_fixup_caller (prev, next->up, next->flags);
717 }
718 }
719 else if (next->up == NULL)
720 {
721 if (prev->up != NULL)
722 {
723 DEBUG_FTRACE ("using prev's callers");
724 ftrace_fixup_caller (next, prev->up, prev->flags);
725 }
726 }
727 else
728 {
729 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
730 link to add the tail callers to NEXT's back trace.
731
732 This removes NEXT->UP from NEXT's back trace. It will be added back
733 when connecting NEXT and PREV's callers - provided they exist.
734
735 If PREV's back trace consists of a series of tail calls without an
736 actual call, there will be no further connection and NEXT's caller will
737 be removed for good. To catch this case, we handle it here and connect
738 the top of PREV's back trace to NEXT's caller. */
739 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
740 {
741 struct btrace_function *caller;
742 btrace_function_flags flags;
743
744 /* We checked NEXT->UP above so CALLER can't be NULL. */
745 caller = next->up;
746 flags = next->flags;
747
748 DEBUG_FTRACE ("adding prev's tail calls to next");
749
750 ftrace_fixup_caller (next, prev->up, prev->flags);
751
752 for (prev = prev->up; prev != NULL; prev = prev->up)
753 {
754 /* At the end of PREV's back trace, continue with CALLER. */
755 if (prev->up == NULL)
756 {
757 DEBUG_FTRACE ("fixing up link for tailcall chain");
758 ftrace_debug (prev, "..top");
759 ftrace_debug (caller, "..up");
760
761 ftrace_fixup_caller (prev, caller, flags);
762
763 /* If we skipped any tail calls, this may move CALLER to a
764 different function level.
765
766 Note that changing CALLER's level is only OK because we
767 know that this is the last iteration of the bottom-to-top
768 walk in ftrace_connect_backtrace.
769
770 Otherwise we will fix up CALLER's level when we connect it
771 to PREV's caller in the next iteration. */
772 ftrace_fixup_level (caller, prev->level - caller->level - 1);
773 break;
774 }
775
776 /* There's nothing to do if we find a real call. */
777 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
778 {
779 DEBUG_FTRACE ("will fix up link in next iteration");
780 break;
781 }
782 }
783 }
784 }
785 }
786
787 /* Connect function segments on the same level in the back trace at LHS and RHS.
788 The back traces at LHS and RHS are expected to match according to
789 ftrace_match_backtrace. */
790
791 static void
792 ftrace_connect_backtrace (struct btrace_function *lhs,
793 struct btrace_function *rhs)
794 {
795 while (lhs != NULL && rhs != NULL)
796 {
797 struct btrace_function *prev, *next;
798
799 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
800
801 /* Connecting LHS and RHS may change the up link. */
802 prev = lhs;
803 next = rhs;
804
805 lhs = ftrace_get_caller (lhs);
806 rhs = ftrace_get_caller (rhs);
807
808 ftrace_connect_bfun (prev, next);
809 }
810 }
811
812 /* Bridge the gap between two function segments left and right of a gap if their
813 respective back traces match in at least MIN_MATCHES functions.
814
815 Returns non-zero if the gap could be bridged, zero otherwise. */
816
817 static int
818 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
819 int min_matches)
820 {
821 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
822 int best_matches;
823
824 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
825 rhs->insn_offset - 1, min_matches);
826
827 best_matches = 0;
828 best_l = NULL;
829 best_r = NULL;
830
831 /* We search the back traces of LHS and RHS for valid connections and connect
832 the two functon segments that give the longest combined back trace. */
833
834 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
835 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
836 {
837 int matches;
838
839 matches = ftrace_match_backtrace (cand_l, cand_r);
840 if (best_matches < matches)
841 {
842 best_matches = matches;
843 best_l = cand_l;
844 best_r = cand_r;
845 }
846 }
847
848 /* We need at least MIN_MATCHES matches. */
849 gdb_assert (min_matches > 0);
850 if (best_matches < min_matches)
851 return 0;
852
853 DEBUG_FTRACE ("..matches: %d", best_matches);
854
855 /* We will fix up the level of BEST_R and succeeding function segments such
856 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
857
858 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
859 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
860
861 To catch this, we already fix up the level here where we can start at RHS
862 instead of at BEST_R. We will ignore the level fixup when connecting
863 BEST_L to BEST_R as they will already be on the same level. */
864 ftrace_fixup_level (rhs, best_l->level - best_r->level);
865
866 ftrace_connect_backtrace (best_l, best_r);
867
868 return best_matches;
869 }
870
871 /* Try to bridge gaps due to overflow or decode errors by connecting the
872 function segments that are separated by the gap. */
873
874 static void
875 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
876 {
877 VEC (bfun_s) *remaining;
878 struct cleanup *old_chain;
879 int min_matches;
880
881 DEBUG ("bridge gaps");
882
883 remaining = NULL;
884 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
885
886 /* We require a minimum amount of matches for bridging a gap. The number of
887 required matches will be lowered with each iteration.
888
889 The more matches the higher our confidence that the bridging is correct.
890 For big gaps or small traces, however, it may not be feasible to require a
891 high number of matches. */
892 for (min_matches = 5; min_matches > 0; --min_matches)
893 {
894 /* Let's try to bridge as many gaps as we can. In some cases, we need to
895 skip a gap and revisit it again after we closed later gaps. */
896 while (!VEC_empty (bfun_s, *gaps))
897 {
898 struct btrace_function *gap;
899 unsigned int idx;
900
901 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
902 {
903 struct btrace_function *lhs, *rhs;
904 int bridged;
905
906 /* We may have a sequence of gaps if we run from one error into
907 the next as we try to re-sync onto the trace stream. Ignore
908 all but the leftmost gap in such a sequence.
909
910 Also ignore gaps at the beginning of the trace. */
911 lhs = gap->flow.prev;
912 if (lhs == NULL || lhs->errcode != 0)
913 continue;
914
915 /* Skip gaps to the right. */
916 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
917 if (rhs->errcode == 0)
918 break;
919
920 /* Ignore gaps at the end of the trace. */
921 if (rhs == NULL)
922 continue;
923
924 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
925
926 /* Keep track of gaps we were not able to bridge and try again.
927 If we just pushed them to the end of GAPS we would risk an
928 infinite loop in case we simply cannot bridge a gap. */
929 if (bridged == 0)
930 VEC_safe_push (bfun_s, remaining, gap);
931 }
932
933 /* Let's see if we made any progress. */
934 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
935 break;
936
937 VEC_free (bfun_s, *gaps);
938
939 *gaps = remaining;
940 remaining = NULL;
941 }
942
943 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
944 if (VEC_empty (bfun_s, *gaps))
945 break;
946
947 VEC_free (bfun_s, remaining);
948 }
949
950 do_cleanups (old_chain);
951
952 /* We may omit this in some cases. Not sure it is worth the extra
953 complication, though. */
954 ftrace_compute_global_level_offset (&tp->btrace);
955 }
956
957 /* Compute the function branch trace from BTS trace. */
958
959 static void
960 btrace_compute_ftrace_bts (struct thread_info *tp,
961 const struct btrace_data_bts *btrace,
962 VEC (bfun_s) **gaps)
963 {
964 struct btrace_thread_info *btinfo;
965 struct btrace_function *begin, *end;
966 struct gdbarch *gdbarch;
967 unsigned int blk;
968 int level;
969
970 gdbarch = target_gdbarch ();
971 btinfo = &tp->btrace;
972 begin = btinfo->begin;
973 end = btinfo->end;
974 level = begin != NULL ? -btinfo->level : INT_MAX;
975 blk = VEC_length (btrace_block_s, btrace->blocks);
976
977 while (blk != 0)
978 {
979 btrace_block_s *block;
980 CORE_ADDR pc;
981
982 blk -= 1;
983
984 block = VEC_index (btrace_block_s, btrace->blocks, blk);
985 pc = block->begin;
986
987 for (;;)
988 {
989 struct btrace_insn insn;
990 int size;
991
992 /* We should hit the end of the block. Warn if we went too far. */
993 if (block->end < pc)
994 {
995 /* Indicate the gap in the trace. */
996 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
997 if (begin == NULL)
998 begin = end;
999
1000 VEC_safe_push (bfun_s, *gaps, end);
1001
1002 warning (_("Recorded trace may be corrupted at instruction "
1003 "%u (pc = %s)."), end->insn_offset - 1,
1004 core_addr_to_string_nz (pc));
1005
1006 break;
1007 }
1008
1009 end = ftrace_update_function (end, pc);
1010 if (begin == NULL)
1011 begin = end;
1012
1013 /* Maintain the function level offset.
1014 For all but the last block, we do it here. */
1015 if (blk != 0)
1016 level = std::min (level, end->level);
1017
1018 size = 0;
1019 TRY
1020 {
1021 size = gdb_insn_length (gdbarch, pc);
1022 }
1023 CATCH (error, RETURN_MASK_ERROR)
1024 {
1025 }
1026 END_CATCH
1027
1028 insn.pc = pc;
1029 insn.size = size;
1030 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1031 insn.flags = 0;
1032
1033 ftrace_update_insns (end, &insn);
1034
1035 /* We're done once we pushed the instruction at the end. */
1036 if (block->end == pc)
1037 break;
1038
1039 /* We can't continue if we fail to compute the size. */
1040 if (size <= 0)
1041 {
1042 /* Indicate the gap in the trace. We just added INSN so we're
1043 not at the beginning. */
1044 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
1045
1046 VEC_safe_push (bfun_s, *gaps, end);
1047
1048 warning (_("Recorded trace may be incomplete at instruction %u "
1049 "(pc = %s)."), end->insn_offset - 1,
1050 core_addr_to_string_nz (pc));
1051
1052 break;
1053 }
1054
1055 pc += size;
1056
1057 /* Maintain the function level offset.
1058 For the last block, we do it here to not consider the last
1059 instruction.
1060 Since the last instruction corresponds to the current instruction
1061 and is not really part of the execution history, it shouldn't
1062 affect the level. */
1063 if (blk == 0)
1064 level = std::min (level, end->level);
1065 }
1066 }
1067
1068 btinfo->begin = begin;
1069 btinfo->end = end;
1070
1071 /* LEVEL is the minimal function level of all btrace function segments.
1072 Define the global level offset to -LEVEL so all function levels are
1073 normalized to start at zero. */
1074 btinfo->level = -level;
1075 }
1076
1077 #if defined (HAVE_LIBIPT)
1078
1079 static enum btrace_insn_class
1080 pt_reclassify_insn (enum pt_insn_class iclass)
1081 {
1082 switch (iclass)
1083 {
1084 case ptic_call:
1085 return BTRACE_INSN_CALL;
1086
1087 case ptic_return:
1088 return BTRACE_INSN_RETURN;
1089
1090 case ptic_jump:
1091 return BTRACE_INSN_JUMP;
1092
1093 default:
1094 return BTRACE_INSN_OTHER;
1095 }
1096 }
1097
1098 /* Return the btrace instruction flags for INSN. */
1099
1100 static btrace_insn_flags
1101 pt_btrace_insn_flags (const struct pt_insn *insn)
1102 {
1103 btrace_insn_flags flags = 0;
1104
1105 if (insn->speculative)
1106 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1107
1108 return flags;
1109 }
1110
1111 /* Add function branch trace using DECODER. */
1112
1113 static void
1114 ftrace_add_pt (struct pt_insn_decoder *decoder,
1115 struct btrace_function **pbegin,
1116 struct btrace_function **pend, int *plevel,
1117 VEC (bfun_s) **gaps)
1118 {
1119 struct btrace_function *begin, *end, *upd;
1120 uint64_t offset;
1121 int errcode;
1122
1123 begin = *pbegin;
1124 end = *pend;
1125 for (;;)
1126 {
1127 struct btrace_insn btinsn;
1128 struct pt_insn insn;
1129
1130 errcode = pt_insn_sync_forward (decoder);
1131 if (errcode < 0)
1132 {
1133 if (errcode != -pte_eos)
1134 warning (_("Failed to synchronize onto the Intel Processor "
1135 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1136 break;
1137 }
1138
1139 memset (&btinsn, 0, sizeof (btinsn));
1140 for (;;)
1141 {
1142 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1143 if (errcode < 0)
1144 break;
1145
1146 /* Look for gaps in the trace - unless we're at the beginning. */
1147 if (begin != NULL)
1148 {
1149 /* Tracing is disabled and re-enabled each time we enter the
1150 kernel. Most times, we continue from the same instruction we
1151 stopped before. This is indicated via the RESUMED instruction
1152 flag. The ENABLED instruction flag means that we continued
1153 from some other instruction. Indicate this as a trace gap. */
1154 if (insn.enabled)
1155 {
1156 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
1157
1158 VEC_safe_push (bfun_s, *gaps, end);
1159
1160 pt_insn_get_offset (decoder, &offset);
1161
1162 warning (_("Non-contiguous trace at instruction %u (offset "
1163 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1164 end->insn_offset - 1, offset, insn.ip);
1165 }
1166 }
1167
1168 /* Indicate trace overflows. */
1169 if (insn.resynced)
1170 {
1171 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1172 if (begin == NULL)
1173 *pbegin = begin = end;
1174
1175 VEC_safe_push (bfun_s, *gaps, end);
1176
1177 pt_insn_get_offset (decoder, &offset);
1178
1179 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1180 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1181 offset, insn.ip);
1182 }
1183
1184 upd = ftrace_update_function (end, insn.ip);
1185 if (upd != end)
1186 {
1187 *pend = end = upd;
1188
1189 if (begin == NULL)
1190 *pbegin = begin = upd;
1191 }
1192
1193 /* Maintain the function level offset. */
1194 *plevel = std::min (*plevel, end->level);
1195
1196 btinsn.pc = (CORE_ADDR) insn.ip;
1197 btinsn.size = (gdb_byte) insn.size;
1198 btinsn.iclass = pt_reclassify_insn (insn.iclass);
1199 btinsn.flags = pt_btrace_insn_flags (&insn);
1200
1201 ftrace_update_insns (end, &btinsn);
1202 }
1203
1204 if (errcode == -pte_eos)
1205 break;
1206
1207 /* Indicate the gap in the trace. */
1208 *pend = end = ftrace_new_gap (end, errcode);
1209 if (begin == NULL)
1210 *pbegin = begin = end;
1211
1212 VEC_safe_push (bfun_s, *gaps, end);
1213
1214 pt_insn_get_offset (decoder, &offset);
1215
1216 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1217 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1218 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1219 }
1220 }
1221
1222 /* A callback function to allow the trace decoder to read the inferior's
1223 memory. */
1224
1225 static int
1226 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1227 const struct pt_asid *asid, uint64_t pc,
1228 void *context)
1229 {
1230 int result, errcode;
1231
1232 result = (int) size;
1233 TRY
1234 {
1235 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1236 if (errcode != 0)
1237 result = -pte_nomap;
1238 }
1239 CATCH (error, RETURN_MASK_ERROR)
1240 {
1241 result = -pte_nomap;
1242 }
1243 END_CATCH
1244
1245 return result;
1246 }
1247
1248 /* Translate the vendor from one enum to another. */
1249
1250 static enum pt_cpu_vendor
1251 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1252 {
1253 switch (vendor)
1254 {
1255 default:
1256 return pcv_unknown;
1257
1258 case CV_INTEL:
1259 return pcv_intel;
1260 }
1261 }
1262
1263 /* Finalize the function branch trace after decode. */
1264
1265 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1266 struct thread_info *tp, int level)
1267 {
1268 pt_insn_free_decoder (decoder);
1269
1270 /* LEVEL is the minimal function level of all btrace function segments.
1271 Define the global level offset to -LEVEL so all function levels are
1272 normalized to start at zero. */
1273 tp->btrace.level = -level;
1274
1275 /* Add a single last instruction entry for the current PC.
1276 This allows us to compute the backtrace at the current PC using both
1277 standard unwind and btrace unwind.
1278 This extra entry is ignored by all record commands. */
1279 btrace_add_pc (tp);
1280 }
1281
1282 /* Compute the function branch trace from Intel Processor Trace
1283 format. */
1284
1285 static void
1286 btrace_compute_ftrace_pt (struct thread_info *tp,
1287 const struct btrace_data_pt *btrace,
1288 VEC (bfun_s) **gaps)
1289 {
1290 struct btrace_thread_info *btinfo;
1291 struct pt_insn_decoder *decoder;
1292 struct pt_config config;
1293 int level, errcode;
1294
1295 if (btrace->size == 0)
1296 return;
1297
1298 btinfo = &tp->btrace;
1299 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1300
1301 pt_config_init(&config);
1302 config.begin = btrace->data;
1303 config.end = btrace->data + btrace->size;
1304
1305 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1306 config.cpu.family = btrace->config.cpu.family;
1307 config.cpu.model = btrace->config.cpu.model;
1308 config.cpu.stepping = btrace->config.cpu.stepping;
1309
1310 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1311 if (errcode < 0)
1312 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1313 pt_errstr (pt_errcode (errcode)));
1314
1315 decoder = pt_insn_alloc_decoder (&config);
1316 if (decoder == NULL)
1317 error (_("Failed to allocate the Intel Processor Trace decoder."));
1318
1319 TRY
1320 {
1321 struct pt_image *image;
1322
1323 image = pt_insn_get_image(decoder);
1324 if (image == NULL)
1325 error (_("Failed to configure the Intel Processor Trace decoder."));
1326
1327 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1328 if (errcode < 0)
1329 error (_("Failed to configure the Intel Processor Trace decoder: "
1330 "%s."), pt_errstr (pt_errcode (errcode)));
1331
1332 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
1333 }
1334 CATCH (error, RETURN_MASK_ALL)
1335 {
1336 /* Indicate a gap in the trace if we quit trace processing. */
1337 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1338 {
1339 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
1340
1341 VEC_safe_push (bfun_s, *gaps, btinfo->end);
1342 }
1343
1344 btrace_finalize_ftrace_pt (decoder, tp, level);
1345
1346 throw_exception (error);
1347 }
1348 END_CATCH
1349
1350 btrace_finalize_ftrace_pt (decoder, tp, level);
1351 }
1352
1353 #else /* defined (HAVE_LIBIPT) */
1354
1355 static void
1356 btrace_compute_ftrace_pt (struct thread_info *tp,
1357 const struct btrace_data_pt *btrace,
1358 VEC (bfun_s) **gaps)
1359 {
1360 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1361 }
1362
1363 #endif /* defined (HAVE_LIBIPT) */
1364
1365 /* Compute the function branch trace from a block branch trace BTRACE for
1366 a thread given by BTINFO. */
1367
1368 static void
1369 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1370 VEC (bfun_s) **gaps)
1371 {
1372 DEBUG ("compute ftrace");
1373
1374 switch (btrace->format)
1375 {
1376 case BTRACE_FORMAT_NONE:
1377 return;
1378
1379 case BTRACE_FORMAT_BTS:
1380 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1381 return;
1382
1383 case BTRACE_FORMAT_PT:
1384 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1385 return;
1386 }
1387
1388 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1389 }
1390
1391 static void
1392 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1393 {
1394 if (!VEC_empty (bfun_s, *gaps))
1395 {
1396 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1397 btrace_bridge_gaps (tp, gaps);
1398 }
1399 }
1400
1401 static void
1402 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1403 {
1404 VEC (bfun_s) *gaps;
1405 struct cleanup *old_chain;
1406
1407 gaps = NULL;
1408 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1409
1410 TRY
1411 {
1412 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1413 }
1414 CATCH (error, RETURN_MASK_ALL)
1415 {
1416 btrace_finalize_ftrace (tp, &gaps);
1417
1418 throw_exception (error);
1419 }
1420 END_CATCH
1421
1422 btrace_finalize_ftrace (tp, &gaps);
1423
1424 do_cleanups (old_chain);
1425 }
1426
1427 /* Add an entry for the current PC. */
1428
1429 static void
1430 btrace_add_pc (struct thread_info *tp)
1431 {
1432 struct btrace_data btrace;
1433 struct btrace_block *block;
1434 struct regcache *regcache;
1435 struct cleanup *cleanup;
1436 CORE_ADDR pc;
1437
1438 regcache = get_thread_regcache (tp->ptid);
1439 pc = regcache_read_pc (regcache);
1440
1441 btrace_data_init (&btrace);
1442 btrace.format = BTRACE_FORMAT_BTS;
1443 btrace.variant.bts.blocks = NULL;
1444
1445 cleanup = make_cleanup_btrace_data (&btrace);
1446
1447 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1448 block->begin = pc;
1449 block->end = pc;
1450
1451 btrace_compute_ftrace (tp, &btrace);
1452
1453 do_cleanups (cleanup);
1454 }
1455
1456 /* See btrace.h. */
1457
1458 void
1459 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1460 {
1461 if (tp->btrace.target != NULL)
1462 return;
1463
1464 #if !defined (HAVE_LIBIPT)
1465 if (conf->format == BTRACE_FORMAT_PT)
1466 error (_("GDB does not support Intel Processor Trace."));
1467 #endif /* !defined (HAVE_LIBIPT) */
1468
1469 if (!target_supports_btrace (conf->format))
1470 error (_("Target does not support branch tracing."));
1471
1472 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1473 target_pid_to_str (tp->ptid));
1474
1475 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1476
1477 /* We're done if we failed to enable tracing. */
1478 if (tp->btrace.target == NULL)
1479 return;
1480
1481 /* We need to undo the enable in case of errors. */
1482 TRY
1483 {
1484 /* Add an entry for the current PC so we start tracing from where we
1485 enabled it.
1486
1487 If we can't access TP's registers, TP is most likely running. In this
1488 case, we can't really say where tracing was enabled so it should be
1489 safe to simply skip this step.
1490
1491 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1492 start at the PC at which tracing was enabled. */
1493 if (conf->format != BTRACE_FORMAT_PT
1494 && can_access_registers_ptid (tp->ptid))
1495 btrace_add_pc (tp);
1496 }
1497 CATCH (exception, RETURN_MASK_ALL)
1498 {
1499 btrace_disable (tp);
1500
1501 throw_exception (exception);
1502 }
1503 END_CATCH
1504 }
1505
1506 /* See btrace.h. */
1507
1508 const struct btrace_config *
1509 btrace_conf (const struct btrace_thread_info *btinfo)
1510 {
1511 if (btinfo->target == NULL)
1512 return NULL;
1513
1514 return target_btrace_conf (btinfo->target);
1515 }
1516
1517 /* See btrace.h. */
1518
1519 void
1520 btrace_disable (struct thread_info *tp)
1521 {
1522 struct btrace_thread_info *btp = &tp->btrace;
1523 int errcode = 0;
1524
1525 if (btp->target == NULL)
1526 return;
1527
1528 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1529 target_pid_to_str (tp->ptid));
1530
1531 target_disable_btrace (btp->target);
1532 btp->target = NULL;
1533
1534 btrace_clear (tp);
1535 }
1536
1537 /* See btrace.h. */
1538
1539 void
1540 btrace_teardown (struct thread_info *tp)
1541 {
1542 struct btrace_thread_info *btp = &tp->btrace;
1543 int errcode = 0;
1544
1545 if (btp->target == NULL)
1546 return;
1547
1548 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1549 target_pid_to_str (tp->ptid));
1550
1551 target_teardown_btrace (btp->target);
1552 btp->target = NULL;
1553
1554 btrace_clear (tp);
1555 }
1556
1557 /* Stitch branch trace in BTS format. */
1558
1559 static int
1560 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1561 {
1562 struct btrace_thread_info *btinfo;
1563 struct btrace_function *last_bfun;
1564 struct btrace_insn *last_insn;
1565 btrace_block_s *first_new_block;
1566
1567 btinfo = &tp->btrace;
1568 last_bfun = btinfo->end;
1569 gdb_assert (last_bfun != NULL);
1570 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1571
1572 /* If the existing trace ends with a gap, we just glue the traces
1573 together. We need to drop the last (i.e. chronologically first) block
1574 of the new trace, though, since we can't fill in the start address.*/
1575 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1576 {
1577 VEC_pop (btrace_block_s, btrace->blocks);
1578 return 0;
1579 }
1580
1581 /* Beware that block trace starts with the most recent block, so the
1582 chronologically first block in the new trace is the last block in
1583 the new trace's block vector. */
1584 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1585 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1586
1587 /* If the current PC at the end of the block is the same as in our current
1588 trace, there are two explanations:
1589 1. we executed the instruction and some branch brought us back.
1590 2. we have not made any progress.
1591 In the first case, the delta trace vector should contain at least two
1592 entries.
1593 In the second case, the delta trace vector should contain exactly one
1594 entry for the partial block containing the current PC. Remove it. */
1595 if (first_new_block->end == last_insn->pc
1596 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1597 {
1598 VEC_pop (btrace_block_s, btrace->blocks);
1599 return 0;
1600 }
1601
1602 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1603 core_addr_to_string_nz (first_new_block->end));
1604
1605 /* Do a simple sanity check to make sure we don't accidentally end up
1606 with a bad block. This should not occur in practice. */
1607 if (first_new_block->end < last_insn->pc)
1608 {
1609 warning (_("Error while trying to read delta trace. Falling back to "
1610 "a full read."));
1611 return -1;
1612 }
1613
1614 /* We adjust the last block to start at the end of our current trace. */
1615 gdb_assert (first_new_block->begin == 0);
1616 first_new_block->begin = last_insn->pc;
1617
1618 /* We simply pop the last insn so we can insert it again as part of
1619 the normal branch trace computation.
1620 Since instruction iterators are based on indices in the instructions
1621 vector, we don't leave any pointers dangling. */
1622 DEBUG ("pruning insn at %s for stitching",
1623 ftrace_print_insn_addr (last_insn));
1624
1625 VEC_pop (btrace_insn_s, last_bfun->insn);
1626
1627 /* The instructions vector may become empty temporarily if this has
1628 been the only instruction in this function segment.
1629 This violates the invariant but will be remedied shortly by
1630 btrace_compute_ftrace when we add the new trace. */
1631
1632 /* The only case where this would hurt is if the entire trace consisted
1633 of just that one instruction. If we remove it, we might turn the now
1634 empty btrace function segment into a gap. But we don't want gaps at
1635 the beginning. To avoid this, we remove the entire old trace. */
1636 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1637 btrace_clear (tp);
1638
1639 return 0;
1640 }
1641
1642 /* Adjust the block trace in order to stitch old and new trace together.
1643 BTRACE is the new delta trace between the last and the current stop.
1644 TP is the traced thread.
1645 May modifx BTRACE as well as the existing trace in TP.
1646 Return 0 on success, -1 otherwise. */
1647
1648 static int
1649 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1650 {
1651 /* If we don't have trace, there's nothing to do. */
1652 if (btrace_data_empty (btrace))
1653 return 0;
1654
1655 switch (btrace->format)
1656 {
1657 case BTRACE_FORMAT_NONE:
1658 return 0;
1659
1660 case BTRACE_FORMAT_BTS:
1661 return btrace_stitch_bts (&btrace->variant.bts, tp);
1662
1663 case BTRACE_FORMAT_PT:
1664 /* Delta reads are not supported. */
1665 return -1;
1666 }
1667
1668 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1669 }
1670
1671 /* Clear the branch trace histories in BTINFO. */
1672
1673 static void
1674 btrace_clear_history (struct btrace_thread_info *btinfo)
1675 {
1676 xfree (btinfo->insn_history);
1677 xfree (btinfo->call_history);
1678 xfree (btinfo->replay);
1679
1680 btinfo->insn_history = NULL;
1681 btinfo->call_history = NULL;
1682 btinfo->replay = NULL;
1683 }
1684
1685 /* Clear the branch trace maintenance histories in BTINFO. */
1686
1687 static void
1688 btrace_maint_clear (struct btrace_thread_info *btinfo)
1689 {
1690 switch (btinfo->data.format)
1691 {
1692 default:
1693 break;
1694
1695 case BTRACE_FORMAT_BTS:
1696 btinfo->maint.variant.bts.packet_history.begin = 0;
1697 btinfo->maint.variant.bts.packet_history.end = 0;
1698 break;
1699
1700 #if defined (HAVE_LIBIPT)
1701 case BTRACE_FORMAT_PT:
1702 xfree (btinfo->maint.variant.pt.packets);
1703
1704 btinfo->maint.variant.pt.packets = NULL;
1705 btinfo->maint.variant.pt.packet_history.begin = 0;
1706 btinfo->maint.variant.pt.packet_history.end = 0;
1707 break;
1708 #endif /* defined (HAVE_LIBIPT) */
1709 }
1710 }
1711
1712 /* See btrace.h. */
1713
1714 void
1715 btrace_fetch (struct thread_info *tp)
1716 {
1717 struct btrace_thread_info *btinfo;
1718 struct btrace_target_info *tinfo;
1719 struct btrace_data btrace;
1720 struct cleanup *cleanup;
1721 int errcode;
1722
1723 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1724 target_pid_to_str (tp->ptid));
1725
1726 btinfo = &tp->btrace;
1727 tinfo = btinfo->target;
1728 if (tinfo == NULL)
1729 return;
1730
1731 /* There's no way we could get new trace while replaying.
1732 On the other hand, delta trace would return a partial record with the
1733 current PC, which is the replay PC, not the last PC, as expected. */
1734 if (btinfo->replay != NULL)
1735 return;
1736
1737 /* We should not be called on running or exited threads. */
1738 gdb_assert (can_access_registers_ptid (tp->ptid));
1739
1740 btrace_data_init (&btrace);
1741 cleanup = make_cleanup_btrace_data (&btrace);
1742
1743 /* Let's first try to extend the trace we already have. */
1744 if (btinfo->end != NULL)
1745 {
1746 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1747 if (errcode == 0)
1748 {
1749 /* Success. Let's try to stitch the traces together. */
1750 errcode = btrace_stitch_trace (&btrace, tp);
1751 }
1752 else
1753 {
1754 /* We failed to read delta trace. Let's try to read new trace. */
1755 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1756
1757 /* If we got any new trace, discard what we have. */
1758 if (errcode == 0 && !btrace_data_empty (&btrace))
1759 btrace_clear (tp);
1760 }
1761
1762 /* If we were not able to read the trace, we start over. */
1763 if (errcode != 0)
1764 {
1765 btrace_clear (tp);
1766 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1767 }
1768 }
1769 else
1770 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1771
1772 /* If we were not able to read the branch trace, signal an error. */
1773 if (errcode != 0)
1774 error (_("Failed to read branch trace."));
1775
1776 /* Compute the trace, provided we have any. */
1777 if (!btrace_data_empty (&btrace))
1778 {
1779 /* Store the raw trace data. The stored data will be cleared in
1780 btrace_clear, so we always append the new trace. */
1781 btrace_data_append (&btinfo->data, &btrace);
1782 btrace_maint_clear (btinfo);
1783
1784 btrace_clear_history (btinfo);
1785 btrace_compute_ftrace (tp, &btrace);
1786 }
1787
1788 do_cleanups (cleanup);
1789 }
1790
1791 /* See btrace.h. */
1792
1793 void
1794 btrace_clear (struct thread_info *tp)
1795 {
1796 struct btrace_thread_info *btinfo;
1797 struct btrace_function *it, *trash;
1798
1799 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1800 target_pid_to_str (tp->ptid));
1801
1802 /* Make sure btrace frames that may hold a pointer into the branch
1803 trace data are destroyed. */
1804 reinit_frame_cache ();
1805
1806 btinfo = &tp->btrace;
1807
1808 it = btinfo->begin;
1809 while (it != NULL)
1810 {
1811 trash = it;
1812 it = it->flow.next;
1813
1814 xfree (trash);
1815 }
1816
1817 btinfo->begin = NULL;
1818 btinfo->end = NULL;
1819 btinfo->ngaps = 0;
1820
1821 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1822 btrace_maint_clear (btinfo);
1823 btrace_data_clear (&btinfo->data);
1824 btrace_clear_history (btinfo);
1825 }
1826
1827 /* See btrace.h. */
1828
1829 void
1830 btrace_free_objfile (struct objfile *objfile)
1831 {
1832 struct thread_info *tp;
1833
1834 DEBUG ("free objfile");
1835
1836 ALL_NON_EXITED_THREADS (tp)
1837 btrace_clear (tp);
1838 }
1839
1840 #if defined (HAVE_LIBEXPAT)
1841
1842 /* Check the btrace document version. */
1843
1844 static void
1845 check_xml_btrace_version (struct gdb_xml_parser *parser,
1846 const struct gdb_xml_element *element,
1847 void *user_data, VEC (gdb_xml_value_s) *attributes)
1848 {
1849 const char *version
1850 = (const char *) xml_find_attribute (attributes, "version")->value;
1851
1852 if (strcmp (version, "1.0") != 0)
1853 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1854 }
1855
1856 /* Parse a btrace "block" xml record. */
1857
1858 static void
1859 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1860 const struct gdb_xml_element *element,
1861 void *user_data, VEC (gdb_xml_value_s) *attributes)
1862 {
1863 struct btrace_data *btrace;
1864 struct btrace_block *block;
1865 ULONGEST *begin, *end;
1866
1867 btrace = (struct btrace_data *) user_data;
1868
1869 switch (btrace->format)
1870 {
1871 case BTRACE_FORMAT_BTS:
1872 break;
1873
1874 case BTRACE_FORMAT_NONE:
1875 btrace->format = BTRACE_FORMAT_BTS;
1876 btrace->variant.bts.blocks = NULL;
1877 break;
1878
1879 default:
1880 gdb_xml_error (parser, _("Btrace format error."));
1881 }
1882
1883 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1884 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1885
1886 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1887 block->begin = *begin;
1888 block->end = *end;
1889 }
1890
1891 /* Parse a "raw" xml record. */
1892
1893 static void
1894 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1895 gdb_byte **pdata, size_t *psize)
1896 {
1897 struct cleanup *cleanup;
1898 gdb_byte *data, *bin;
1899 size_t len, size;
1900
1901 len = strlen (body_text);
1902 if (len % 2 != 0)
1903 gdb_xml_error (parser, _("Bad raw data size."));
1904
1905 size = len / 2;
1906
1907 bin = data = (gdb_byte *) xmalloc (size);
1908 cleanup = make_cleanup (xfree, data);
1909
1910 /* We use hex encoding - see common/rsp-low.h. */
1911 while (len > 0)
1912 {
1913 char hi, lo;
1914
1915 hi = *body_text++;
1916 lo = *body_text++;
1917
1918 if (hi == 0 || lo == 0)
1919 gdb_xml_error (parser, _("Bad hex encoding."));
1920
1921 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1922 len -= 2;
1923 }
1924
1925 discard_cleanups (cleanup);
1926
1927 *pdata = data;
1928 *psize = size;
1929 }
1930
1931 /* Parse a btrace pt-config "cpu" xml record. */
1932
1933 static void
1934 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1935 const struct gdb_xml_element *element,
1936 void *user_data,
1937 VEC (gdb_xml_value_s) *attributes)
1938 {
1939 struct btrace_data *btrace;
1940 const char *vendor;
1941 ULONGEST *family, *model, *stepping;
1942
1943 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1944 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1945 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1946 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1947
1948 btrace = (struct btrace_data *) user_data;
1949
1950 if (strcmp (vendor, "GenuineIntel") == 0)
1951 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1952
1953 btrace->variant.pt.config.cpu.family = *family;
1954 btrace->variant.pt.config.cpu.model = *model;
1955 btrace->variant.pt.config.cpu.stepping = *stepping;
1956 }
1957
1958 /* Parse a btrace pt "raw" xml record. */
1959
1960 static void
1961 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1962 const struct gdb_xml_element *element,
1963 void *user_data, const char *body_text)
1964 {
1965 struct btrace_data *btrace;
1966
1967 btrace = (struct btrace_data *) user_data;
1968 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1969 &btrace->variant.pt.size);
1970 }
1971
1972 /* Parse a btrace "pt" xml record. */
1973
1974 static void
1975 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1976 const struct gdb_xml_element *element,
1977 void *user_data, VEC (gdb_xml_value_s) *attributes)
1978 {
1979 struct btrace_data *btrace;
1980
1981 btrace = (struct btrace_data *) user_data;
1982 btrace->format = BTRACE_FORMAT_PT;
1983 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1984 btrace->variant.pt.data = NULL;
1985 btrace->variant.pt.size = 0;
1986 }
1987
1988 static const struct gdb_xml_attribute block_attributes[] = {
1989 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1990 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1991 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1992 };
1993
1994 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1995 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1996 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1997 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1998 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1999 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2000 };
2001
2002 static const struct gdb_xml_element btrace_pt_config_children[] = {
2003 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2004 parse_xml_btrace_pt_config_cpu, NULL },
2005 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2006 };
2007
2008 static const struct gdb_xml_element btrace_pt_children[] = {
2009 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2010 NULL },
2011 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2012 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2013 };
2014
2015 static const struct gdb_xml_attribute btrace_attributes[] = {
2016 { "version", GDB_XML_AF_NONE, NULL, NULL },
2017 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2018 };
2019
2020 static const struct gdb_xml_element btrace_children[] = {
2021 { "block", block_attributes, NULL,
2022 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2023 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2024 NULL },
2025 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2026 };
2027
2028 static const struct gdb_xml_element btrace_elements[] = {
2029 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2030 check_xml_btrace_version, NULL },
2031 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2032 };
2033
2034 #endif /* defined (HAVE_LIBEXPAT) */
2035
2036 /* See btrace.h. */
2037
2038 void
2039 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2040 {
2041 struct cleanup *cleanup;
2042 int errcode;
2043
2044 #if defined (HAVE_LIBEXPAT)
2045
2046 btrace->format = BTRACE_FORMAT_NONE;
2047
2048 cleanup = make_cleanup_btrace_data (btrace);
2049 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2050 buffer, btrace);
2051 if (errcode != 0)
2052 error (_("Error parsing branch trace."));
2053
2054 /* Keep parse results. */
2055 discard_cleanups (cleanup);
2056
2057 #else /* !defined (HAVE_LIBEXPAT) */
2058
2059 error (_("Cannot process branch trace. XML parsing is not supported."));
2060
2061 #endif /* !defined (HAVE_LIBEXPAT) */
2062 }
2063
2064 #if defined (HAVE_LIBEXPAT)
2065
2066 /* Parse a btrace-conf "bts" xml record. */
2067
2068 static void
2069 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2070 const struct gdb_xml_element *element,
2071 void *user_data, VEC (gdb_xml_value_s) *attributes)
2072 {
2073 struct btrace_config *conf;
2074 struct gdb_xml_value *size;
2075
2076 conf = (struct btrace_config *) user_data;
2077 conf->format = BTRACE_FORMAT_BTS;
2078 conf->bts.size = 0;
2079
2080 size = xml_find_attribute (attributes, "size");
2081 if (size != NULL)
2082 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2083 }
2084
2085 /* Parse a btrace-conf "pt" xml record. */
2086
2087 static void
2088 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2089 const struct gdb_xml_element *element,
2090 void *user_data, VEC (gdb_xml_value_s) *attributes)
2091 {
2092 struct btrace_config *conf;
2093 struct gdb_xml_value *size;
2094
2095 conf = (struct btrace_config *) user_data;
2096 conf->format = BTRACE_FORMAT_PT;
2097 conf->pt.size = 0;
2098
2099 size = xml_find_attribute (attributes, "size");
2100 if (size != NULL)
2101 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2102 }
2103
2104 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2105 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2106 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2107 };
2108
2109 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2110 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2111 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2112 };
2113
2114 static const struct gdb_xml_element btrace_conf_children[] = {
2115 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2116 parse_xml_btrace_conf_bts, NULL },
2117 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2118 parse_xml_btrace_conf_pt, NULL },
2119 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2120 };
2121
2122 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2123 { "version", GDB_XML_AF_NONE, NULL, NULL },
2124 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2125 };
2126
2127 static const struct gdb_xml_element btrace_conf_elements[] = {
2128 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2129 GDB_XML_EF_NONE, NULL, NULL },
2130 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2131 };
2132
2133 #endif /* defined (HAVE_LIBEXPAT) */
2134
2135 /* See btrace.h. */
2136
2137 void
2138 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2139 {
2140 int errcode;
2141
2142 #if defined (HAVE_LIBEXPAT)
2143
2144 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2145 btrace_conf_elements, xml, conf);
2146 if (errcode != 0)
2147 error (_("Error parsing branch trace configuration."));
2148
2149 #else /* !defined (HAVE_LIBEXPAT) */
2150
2151 error (_("XML parsing is not supported."));
2152
2153 #endif /* !defined (HAVE_LIBEXPAT) */
2154 }
2155
2156 /* See btrace.h. */
2157
2158 const struct btrace_insn *
2159 btrace_insn_get (const struct btrace_insn_iterator *it)
2160 {
2161 const struct btrace_function *bfun;
2162 unsigned int index, end;
2163
2164 index = it->index;
2165 bfun = it->function;
2166
2167 /* Check if the iterator points to a gap in the trace. */
2168 if (bfun->errcode != 0)
2169 return NULL;
2170
2171 /* The index is within the bounds of this function's instruction vector. */
2172 end = VEC_length (btrace_insn_s, bfun->insn);
2173 gdb_assert (0 < end);
2174 gdb_assert (index < end);
2175
2176 return VEC_index (btrace_insn_s, bfun->insn, index);
2177 }
2178
2179 /* See btrace.h. */
2180
2181 unsigned int
2182 btrace_insn_number (const struct btrace_insn_iterator *it)
2183 {
2184 const struct btrace_function *bfun;
2185
2186 bfun = it->function;
2187
2188 /* Return zero if the iterator points to a gap in the trace. */
2189 if (bfun->errcode != 0)
2190 return 0;
2191
2192 return bfun->insn_offset + it->index;
2193 }
2194
2195 /* See btrace.h. */
2196
2197 void
2198 btrace_insn_begin (struct btrace_insn_iterator *it,
2199 const struct btrace_thread_info *btinfo)
2200 {
2201 const struct btrace_function *bfun;
2202
2203 bfun = btinfo->begin;
2204 if (bfun == NULL)
2205 error (_("No trace."));
2206
2207 it->function = bfun;
2208 it->index = 0;
2209 }
2210
2211 /* See btrace.h. */
2212
2213 void
2214 btrace_insn_end (struct btrace_insn_iterator *it,
2215 const struct btrace_thread_info *btinfo)
2216 {
2217 const struct btrace_function *bfun;
2218 unsigned int length;
2219
2220 bfun = btinfo->end;
2221 if (bfun == NULL)
2222 error (_("No trace."));
2223
2224 length = VEC_length (btrace_insn_s, bfun->insn);
2225
2226 /* The last function may either be a gap or it contains the current
2227 instruction, which is one past the end of the execution trace; ignore
2228 it. */
2229 if (length > 0)
2230 length -= 1;
2231
2232 it->function = bfun;
2233 it->index = length;
2234 }
2235
2236 /* See btrace.h. */
2237
2238 unsigned int
2239 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2240 {
2241 const struct btrace_function *bfun;
2242 unsigned int index, steps;
2243
2244 bfun = it->function;
2245 steps = 0;
2246 index = it->index;
2247
2248 while (stride != 0)
2249 {
2250 unsigned int end, space, adv;
2251
2252 end = VEC_length (btrace_insn_s, bfun->insn);
2253
2254 /* An empty function segment represents a gap in the trace. We count
2255 it as one instruction. */
2256 if (end == 0)
2257 {
2258 const struct btrace_function *next;
2259
2260 next = bfun->flow.next;
2261 if (next == NULL)
2262 break;
2263
2264 stride -= 1;
2265 steps += 1;
2266
2267 bfun = next;
2268 index = 0;
2269
2270 continue;
2271 }
2272
2273 gdb_assert (0 < end);
2274 gdb_assert (index < end);
2275
2276 /* Compute the number of instructions remaining in this segment. */
2277 space = end - index;
2278
2279 /* Advance the iterator as far as possible within this segment. */
2280 adv = std::min (space, stride);
2281 stride -= adv;
2282 index += adv;
2283 steps += adv;
2284
2285 /* Move to the next function if we're at the end of this one. */
2286 if (index == end)
2287 {
2288 const struct btrace_function *next;
2289
2290 next = bfun->flow.next;
2291 if (next == NULL)
2292 {
2293 /* We stepped past the last function.
2294
2295 Let's adjust the index to point to the last instruction in
2296 the previous function. */
2297 index -= 1;
2298 steps -= 1;
2299 break;
2300 }
2301
2302 /* We now point to the first instruction in the new function. */
2303 bfun = next;
2304 index = 0;
2305 }
2306
2307 /* We did make progress. */
2308 gdb_assert (adv > 0);
2309 }
2310
2311 /* Update the iterator. */
2312 it->function = bfun;
2313 it->index = index;
2314
2315 return steps;
2316 }
2317
2318 /* See btrace.h. */
2319
2320 unsigned int
2321 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2322 {
2323 const struct btrace_function *bfun;
2324 unsigned int index, steps;
2325
2326 bfun = it->function;
2327 steps = 0;
2328 index = it->index;
2329
2330 while (stride != 0)
2331 {
2332 unsigned int adv;
2333
2334 /* Move to the previous function if we're at the start of this one. */
2335 if (index == 0)
2336 {
2337 const struct btrace_function *prev;
2338
2339 prev = bfun->flow.prev;
2340 if (prev == NULL)
2341 break;
2342
2343 /* We point to one after the last instruction in the new function. */
2344 bfun = prev;
2345 index = VEC_length (btrace_insn_s, bfun->insn);
2346
2347 /* An empty function segment represents a gap in the trace. We count
2348 it as one instruction. */
2349 if (index == 0)
2350 {
2351 stride -= 1;
2352 steps += 1;
2353
2354 continue;
2355 }
2356 }
2357
2358 /* Advance the iterator as far as possible within this segment. */
2359 adv = std::min (index, stride);
2360
2361 stride -= adv;
2362 index -= adv;
2363 steps += adv;
2364
2365 /* We did make progress. */
2366 gdb_assert (adv > 0);
2367 }
2368
2369 /* Update the iterator. */
2370 it->function = bfun;
2371 it->index = index;
2372
2373 return steps;
2374 }
2375
2376 /* See btrace.h. */
2377
2378 int
2379 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2380 const struct btrace_insn_iterator *rhs)
2381 {
2382 unsigned int lnum, rnum;
2383
2384 lnum = btrace_insn_number (lhs);
2385 rnum = btrace_insn_number (rhs);
2386
2387 /* A gap has an instruction number of zero. Things are getting more
2388 complicated if gaps are involved.
2389
2390 We take the instruction number offset from the iterator's function.
2391 This is the number of the first instruction after the gap.
2392
2393 This is OK as long as both lhs and rhs point to gaps. If only one of
2394 them does, we need to adjust the number based on the other's regular
2395 instruction number. Otherwise, a gap might compare equal to an
2396 instruction. */
2397
2398 if (lnum == 0 && rnum == 0)
2399 {
2400 lnum = lhs->function->insn_offset;
2401 rnum = rhs->function->insn_offset;
2402 }
2403 else if (lnum == 0)
2404 {
2405 lnum = lhs->function->insn_offset;
2406
2407 if (lnum == rnum)
2408 lnum -= 1;
2409 }
2410 else if (rnum == 0)
2411 {
2412 rnum = rhs->function->insn_offset;
2413
2414 if (rnum == lnum)
2415 rnum -= 1;
2416 }
2417
2418 return (int) (lnum - rnum);
2419 }
2420
2421 /* See btrace.h. */
2422
2423 int
2424 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2425 const struct btrace_thread_info *btinfo,
2426 unsigned int number)
2427 {
2428 const struct btrace_function *bfun;
2429 unsigned int end, length;
2430
2431 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2432 {
2433 /* Skip gaps. */
2434 if (bfun->errcode != 0)
2435 continue;
2436
2437 if (bfun->insn_offset <= number)
2438 break;
2439 }
2440
2441 if (bfun == NULL)
2442 return 0;
2443
2444 length = VEC_length (btrace_insn_s, bfun->insn);
2445 gdb_assert (length > 0);
2446
2447 end = bfun->insn_offset + length;
2448 if (end <= number)
2449 return 0;
2450
2451 it->function = bfun;
2452 it->index = number - bfun->insn_offset;
2453
2454 return 1;
2455 }
2456
2457 /* See btrace.h. */
2458
2459 const struct btrace_function *
2460 btrace_call_get (const struct btrace_call_iterator *it)
2461 {
2462 return it->function;
2463 }
2464
2465 /* See btrace.h. */
2466
2467 unsigned int
2468 btrace_call_number (const struct btrace_call_iterator *it)
2469 {
2470 const struct btrace_thread_info *btinfo;
2471 const struct btrace_function *bfun;
2472 unsigned int insns;
2473
2474 btinfo = it->btinfo;
2475 bfun = it->function;
2476 if (bfun != NULL)
2477 return bfun->number;
2478
2479 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2480 number of the last function. */
2481 bfun = btinfo->end;
2482 insns = VEC_length (btrace_insn_s, bfun->insn);
2483
2484 /* If the function contains only a single instruction (i.e. the current
2485 instruction), it will be skipped and its number is already the number
2486 we seek. */
2487 if (insns == 1)
2488 return bfun->number;
2489
2490 /* Otherwise, return one more than the number of the last function. */
2491 return bfun->number + 1;
2492 }
2493
2494 /* See btrace.h. */
2495
2496 void
2497 btrace_call_begin (struct btrace_call_iterator *it,
2498 const struct btrace_thread_info *btinfo)
2499 {
2500 const struct btrace_function *bfun;
2501
2502 bfun = btinfo->begin;
2503 if (bfun == NULL)
2504 error (_("No trace."));
2505
2506 it->btinfo = btinfo;
2507 it->function = bfun;
2508 }
2509
2510 /* See btrace.h. */
2511
2512 void
2513 btrace_call_end (struct btrace_call_iterator *it,
2514 const struct btrace_thread_info *btinfo)
2515 {
2516 const struct btrace_function *bfun;
2517
2518 bfun = btinfo->end;
2519 if (bfun == NULL)
2520 error (_("No trace."));
2521
2522 it->btinfo = btinfo;
2523 it->function = NULL;
2524 }
2525
2526 /* See btrace.h. */
2527
2528 unsigned int
2529 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2530 {
2531 const struct btrace_function *bfun;
2532 unsigned int steps;
2533
2534 bfun = it->function;
2535 steps = 0;
2536 while (bfun != NULL)
2537 {
2538 const struct btrace_function *next;
2539 unsigned int insns;
2540
2541 next = bfun->flow.next;
2542 if (next == NULL)
2543 {
2544 /* Ignore the last function if it only contains a single
2545 (i.e. the current) instruction. */
2546 insns = VEC_length (btrace_insn_s, bfun->insn);
2547 if (insns == 1)
2548 steps -= 1;
2549 }
2550
2551 if (stride == steps)
2552 break;
2553
2554 bfun = next;
2555 steps += 1;
2556 }
2557
2558 it->function = bfun;
2559 return steps;
2560 }
2561
2562 /* See btrace.h. */
2563
2564 unsigned int
2565 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2566 {
2567 const struct btrace_thread_info *btinfo;
2568 const struct btrace_function *bfun;
2569 unsigned int steps;
2570
2571 bfun = it->function;
2572 steps = 0;
2573
2574 if (bfun == NULL)
2575 {
2576 unsigned int insns;
2577
2578 btinfo = it->btinfo;
2579 bfun = btinfo->end;
2580 if (bfun == NULL)
2581 return 0;
2582
2583 /* Ignore the last function if it only contains a single
2584 (i.e. the current) instruction. */
2585 insns = VEC_length (btrace_insn_s, bfun->insn);
2586 if (insns == 1)
2587 bfun = bfun->flow.prev;
2588
2589 if (bfun == NULL)
2590 return 0;
2591
2592 steps += 1;
2593 }
2594
2595 while (steps < stride)
2596 {
2597 const struct btrace_function *prev;
2598
2599 prev = bfun->flow.prev;
2600 if (prev == NULL)
2601 break;
2602
2603 bfun = prev;
2604 steps += 1;
2605 }
2606
2607 it->function = bfun;
2608 return steps;
2609 }
2610
2611 /* See btrace.h. */
2612
2613 int
2614 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2615 const struct btrace_call_iterator *rhs)
2616 {
2617 unsigned int lnum, rnum;
2618
2619 lnum = btrace_call_number (lhs);
2620 rnum = btrace_call_number (rhs);
2621
2622 return (int) (lnum - rnum);
2623 }
2624
2625 /* See btrace.h. */
2626
2627 int
2628 btrace_find_call_by_number (struct btrace_call_iterator *it,
2629 const struct btrace_thread_info *btinfo,
2630 unsigned int number)
2631 {
2632 const struct btrace_function *bfun;
2633
2634 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2635 {
2636 unsigned int bnum;
2637
2638 bnum = bfun->number;
2639 if (number == bnum)
2640 {
2641 it->btinfo = btinfo;
2642 it->function = bfun;
2643 return 1;
2644 }
2645
2646 /* Functions are ordered and numbered consecutively. We could bail out
2647 earlier. On the other hand, it is very unlikely that we search for
2648 a nonexistent function. */
2649 }
2650
2651 return 0;
2652 }
2653
2654 /* See btrace.h. */
2655
2656 void
2657 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2658 const struct btrace_insn_iterator *begin,
2659 const struct btrace_insn_iterator *end)
2660 {
2661 if (btinfo->insn_history == NULL)
2662 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2663
2664 btinfo->insn_history->begin = *begin;
2665 btinfo->insn_history->end = *end;
2666 }
2667
2668 /* See btrace.h. */
2669
2670 void
2671 btrace_set_call_history (struct btrace_thread_info *btinfo,
2672 const struct btrace_call_iterator *begin,
2673 const struct btrace_call_iterator *end)
2674 {
2675 gdb_assert (begin->btinfo == end->btinfo);
2676
2677 if (btinfo->call_history == NULL)
2678 btinfo->call_history = XCNEW (struct btrace_call_history);
2679
2680 btinfo->call_history->begin = *begin;
2681 btinfo->call_history->end = *end;
2682 }
2683
2684 /* See btrace.h. */
2685
2686 int
2687 btrace_is_replaying (struct thread_info *tp)
2688 {
2689 return tp->btrace.replay != NULL;
2690 }
2691
2692 /* See btrace.h. */
2693
2694 int
2695 btrace_is_empty (struct thread_info *tp)
2696 {
2697 struct btrace_insn_iterator begin, end;
2698 struct btrace_thread_info *btinfo;
2699
2700 btinfo = &tp->btrace;
2701
2702 if (btinfo->begin == NULL)
2703 return 1;
2704
2705 btrace_insn_begin (&begin, btinfo);
2706 btrace_insn_end (&end, btinfo);
2707
2708 return btrace_insn_cmp (&begin, &end) == 0;
2709 }
2710
2711 /* Forward the cleanup request. */
2712
2713 static void
2714 do_btrace_data_cleanup (void *arg)
2715 {
2716 btrace_data_fini ((struct btrace_data *) arg);
2717 }
2718
2719 /* See btrace.h. */
2720
2721 struct cleanup *
2722 make_cleanup_btrace_data (struct btrace_data *data)
2723 {
2724 return make_cleanup (do_btrace_data_cleanup, data);
2725 }
2726
2727 #if defined (HAVE_LIBIPT)
2728
2729 /* Print a single packet. */
2730
2731 static void
2732 pt_print_packet (const struct pt_packet *packet)
2733 {
2734 switch (packet->type)
2735 {
2736 default:
2737 printf_unfiltered (("[??: %x]"), packet->type);
2738 break;
2739
2740 case ppt_psb:
2741 printf_unfiltered (("psb"));
2742 break;
2743
2744 case ppt_psbend:
2745 printf_unfiltered (("psbend"));
2746 break;
2747
2748 case ppt_pad:
2749 printf_unfiltered (("pad"));
2750 break;
2751
2752 case ppt_tip:
2753 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2754 packet->payload.ip.ipc,
2755 packet->payload.ip.ip);
2756 break;
2757
2758 case ppt_tip_pge:
2759 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2760 packet->payload.ip.ipc,
2761 packet->payload.ip.ip);
2762 break;
2763
2764 case ppt_tip_pgd:
2765 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2766 packet->payload.ip.ipc,
2767 packet->payload.ip.ip);
2768 break;
2769
2770 case ppt_fup:
2771 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2772 packet->payload.ip.ipc,
2773 packet->payload.ip.ip);
2774 break;
2775
2776 case ppt_tnt_8:
2777 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2778 packet->payload.tnt.bit_size,
2779 packet->payload.tnt.payload);
2780 break;
2781
2782 case ppt_tnt_64:
2783 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2784 packet->payload.tnt.bit_size,
2785 packet->payload.tnt.payload);
2786 break;
2787
2788 case ppt_pip:
2789 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2790 packet->payload.pip.nr ? (" nr") : (""));
2791 break;
2792
2793 case ppt_tsc:
2794 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2795 break;
2796
2797 case ppt_cbr:
2798 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2799 break;
2800
2801 case ppt_mode:
2802 switch (packet->payload.mode.leaf)
2803 {
2804 default:
2805 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2806 break;
2807
2808 case pt_mol_exec:
2809 printf_unfiltered (("mode.exec%s%s"),
2810 packet->payload.mode.bits.exec.csl
2811 ? (" cs.l") : (""),
2812 packet->payload.mode.bits.exec.csd
2813 ? (" cs.d") : (""));
2814 break;
2815
2816 case pt_mol_tsx:
2817 printf_unfiltered (("mode.tsx%s%s"),
2818 packet->payload.mode.bits.tsx.intx
2819 ? (" intx") : (""),
2820 packet->payload.mode.bits.tsx.abrt
2821 ? (" abrt") : (""));
2822 break;
2823 }
2824 break;
2825
2826 case ppt_ovf:
2827 printf_unfiltered (("ovf"));
2828 break;
2829
2830 case ppt_stop:
2831 printf_unfiltered (("stop"));
2832 break;
2833
2834 case ppt_vmcs:
2835 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2836 break;
2837
2838 case ppt_tma:
2839 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2840 packet->payload.tma.fc);
2841 break;
2842
2843 case ppt_mtc:
2844 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2845 break;
2846
2847 case ppt_cyc:
2848 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2849 break;
2850
2851 case ppt_mnt:
2852 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2853 break;
2854 }
2855 }
2856
2857 /* Decode packets into MAINT using DECODER. */
2858
2859 static void
2860 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2861 struct pt_packet_decoder *decoder)
2862 {
2863 int errcode;
2864
2865 for (;;)
2866 {
2867 struct btrace_pt_packet packet;
2868
2869 errcode = pt_pkt_sync_forward (decoder);
2870 if (errcode < 0)
2871 break;
2872
2873 for (;;)
2874 {
2875 pt_pkt_get_offset (decoder, &packet.offset);
2876
2877 errcode = pt_pkt_next (decoder, &packet.packet,
2878 sizeof(packet.packet));
2879 if (errcode < 0)
2880 break;
2881
2882 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2883 {
2884 packet.errcode = pt_errcode (errcode);
2885 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2886 &packet);
2887 }
2888 }
2889
2890 if (errcode == -pte_eos)
2891 break;
2892
2893 packet.errcode = pt_errcode (errcode);
2894 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2895 &packet);
2896
2897 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2898 packet.offset, pt_errstr (packet.errcode));
2899 }
2900
2901 if (errcode != -pte_eos)
2902 warning (_("Failed to synchronize onto the Intel Processor Trace "
2903 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2904 }
2905
2906 /* Update the packet history in BTINFO. */
2907
2908 static void
2909 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2910 {
2911 volatile struct gdb_exception except;
2912 struct pt_packet_decoder *decoder;
2913 struct btrace_data_pt *pt;
2914 struct pt_config config;
2915 int errcode;
2916
2917 pt = &btinfo->data.variant.pt;
2918
2919 /* Nothing to do if there is no trace. */
2920 if (pt->size == 0)
2921 return;
2922
2923 memset (&config, 0, sizeof(config));
2924
2925 config.size = sizeof (config);
2926 config.begin = pt->data;
2927 config.end = pt->data + pt->size;
2928
2929 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2930 config.cpu.family = pt->config.cpu.family;
2931 config.cpu.model = pt->config.cpu.model;
2932 config.cpu.stepping = pt->config.cpu.stepping;
2933
2934 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2935 if (errcode < 0)
2936 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2937 pt_errstr (pt_errcode (errcode)));
2938
2939 decoder = pt_pkt_alloc_decoder (&config);
2940 if (decoder == NULL)
2941 error (_("Failed to allocate the Intel Processor Trace decoder."));
2942
2943 TRY
2944 {
2945 btrace_maint_decode_pt (&btinfo->maint, decoder);
2946 }
2947 CATCH (except, RETURN_MASK_ALL)
2948 {
2949 pt_pkt_free_decoder (decoder);
2950
2951 if (except.reason < 0)
2952 throw_exception (except);
2953 }
2954 END_CATCH
2955
2956 pt_pkt_free_decoder (decoder);
2957 }
2958
2959 #endif /* !defined (HAVE_LIBIPT) */
2960
2961 /* Update the packet maintenance information for BTINFO and store the
2962 low and high bounds into BEGIN and END, respectively.
2963 Store the current iterator state into FROM and TO. */
2964
2965 static void
2966 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2967 unsigned int *begin, unsigned int *end,
2968 unsigned int *from, unsigned int *to)
2969 {
2970 switch (btinfo->data.format)
2971 {
2972 default:
2973 *begin = 0;
2974 *end = 0;
2975 *from = 0;
2976 *to = 0;
2977 break;
2978
2979 case BTRACE_FORMAT_BTS:
2980 /* Nothing to do - we operate directly on BTINFO->DATA. */
2981 *begin = 0;
2982 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2983 *from = btinfo->maint.variant.bts.packet_history.begin;
2984 *to = btinfo->maint.variant.bts.packet_history.end;
2985 break;
2986
2987 #if defined (HAVE_LIBIPT)
2988 case BTRACE_FORMAT_PT:
2989 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2990 btrace_maint_update_pt_packets (btinfo);
2991
2992 *begin = 0;
2993 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2994 *from = btinfo->maint.variant.pt.packet_history.begin;
2995 *to = btinfo->maint.variant.pt.packet_history.end;
2996 break;
2997 #endif /* defined (HAVE_LIBIPT) */
2998 }
2999 }
3000
3001 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3002 update the current iterator position. */
3003
3004 static void
3005 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3006 unsigned int begin, unsigned int end)
3007 {
3008 switch (btinfo->data.format)
3009 {
3010 default:
3011 break;
3012
3013 case BTRACE_FORMAT_BTS:
3014 {
3015 VEC (btrace_block_s) *blocks;
3016 unsigned int blk;
3017
3018 blocks = btinfo->data.variant.bts.blocks;
3019 for (blk = begin; blk < end; ++blk)
3020 {
3021 const btrace_block_s *block;
3022
3023 block = VEC_index (btrace_block_s, blocks, blk);
3024
3025 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3026 core_addr_to_string_nz (block->begin),
3027 core_addr_to_string_nz (block->end));
3028 }
3029
3030 btinfo->maint.variant.bts.packet_history.begin = begin;
3031 btinfo->maint.variant.bts.packet_history.end = end;
3032 }
3033 break;
3034
3035 #if defined (HAVE_LIBIPT)
3036 case BTRACE_FORMAT_PT:
3037 {
3038 VEC (btrace_pt_packet_s) *packets;
3039 unsigned int pkt;
3040
3041 packets = btinfo->maint.variant.pt.packets;
3042 for (pkt = begin; pkt < end; ++pkt)
3043 {
3044 const struct btrace_pt_packet *packet;
3045
3046 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3047
3048 printf_unfiltered ("%u\t", pkt);
3049 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3050
3051 if (packet->errcode == pte_ok)
3052 pt_print_packet (&packet->packet);
3053 else
3054 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3055
3056 printf_unfiltered ("\n");
3057 }
3058
3059 btinfo->maint.variant.pt.packet_history.begin = begin;
3060 btinfo->maint.variant.pt.packet_history.end = end;
3061 }
3062 break;
3063 #endif /* defined (HAVE_LIBIPT) */
3064 }
3065 }
3066
3067 /* Read a number from an argument string. */
3068
3069 static unsigned int
3070 get_uint (char **arg)
3071 {
3072 char *begin, *end, *pos;
3073 unsigned long number;
3074
3075 begin = *arg;
3076 pos = skip_spaces (begin);
3077
3078 if (!isdigit (*pos))
3079 error (_("Expected positive number, got: %s."), pos);
3080
3081 number = strtoul (pos, &end, 10);
3082 if (number > UINT_MAX)
3083 error (_("Number too big."));
3084
3085 *arg += (end - begin);
3086
3087 return (unsigned int) number;
3088 }
3089
3090 /* Read a context size from an argument string. */
3091
3092 static int
3093 get_context_size (char **arg)
3094 {
3095 char *pos;
3096 int number;
3097
3098 pos = skip_spaces (*arg);
3099
3100 if (!isdigit (*pos))
3101 error (_("Expected positive number, got: %s."), pos);
3102
3103 return strtol (pos, arg, 10);
3104 }
3105
3106 /* Complain about junk at the end of an argument string. */
3107
3108 static void
3109 no_chunk (char *arg)
3110 {
3111 if (*arg != 0)
3112 error (_("Junk after argument: %s."), arg);
3113 }
3114
3115 /* The "maintenance btrace packet-history" command. */
3116
3117 static void
3118 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3119 {
3120 struct btrace_thread_info *btinfo;
3121 struct thread_info *tp;
3122 unsigned int size, begin, end, from, to;
3123
3124 tp = find_thread_ptid (inferior_ptid);
3125 if (tp == NULL)
3126 error (_("No thread."));
3127
3128 size = 10;
3129 btinfo = &tp->btrace;
3130
3131 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3132 if (begin == end)
3133 {
3134 printf_unfiltered (_("No trace.\n"));
3135 return;
3136 }
3137
3138 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3139 {
3140 from = to;
3141
3142 if (end - from < size)
3143 size = end - from;
3144 to = from + size;
3145 }
3146 else if (strcmp (arg, "-") == 0)
3147 {
3148 to = from;
3149
3150 if (to - begin < size)
3151 size = to - begin;
3152 from = to - size;
3153 }
3154 else
3155 {
3156 from = get_uint (&arg);
3157 if (end <= from)
3158 error (_("'%u' is out of range."), from);
3159
3160 arg = skip_spaces (arg);
3161 if (*arg == ',')
3162 {
3163 arg = skip_spaces (++arg);
3164
3165 if (*arg == '+')
3166 {
3167 arg += 1;
3168 size = get_context_size (&arg);
3169
3170 no_chunk (arg);
3171
3172 if (end - from < size)
3173 size = end - from;
3174 to = from + size;
3175 }
3176 else if (*arg == '-')
3177 {
3178 arg += 1;
3179 size = get_context_size (&arg);
3180
3181 no_chunk (arg);
3182
3183 /* Include the packet given as first argument. */
3184 from += 1;
3185 to = from;
3186
3187 if (to - begin < size)
3188 size = to - begin;
3189 from = to - size;
3190 }
3191 else
3192 {
3193 to = get_uint (&arg);
3194
3195 /* Include the packet at the second argument and silently
3196 truncate the range. */
3197 if (to < end)
3198 to += 1;
3199 else
3200 to = end;
3201
3202 no_chunk (arg);
3203 }
3204 }
3205 else
3206 {
3207 no_chunk (arg);
3208
3209 if (end - from < size)
3210 size = end - from;
3211 to = from + size;
3212 }
3213
3214 dont_repeat ();
3215 }
3216
3217 btrace_maint_print_packets (btinfo, from, to);
3218 }
3219
3220 /* The "maintenance btrace clear-packet-history" command. */
3221
3222 static void
3223 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3224 {
3225 struct btrace_thread_info *btinfo;
3226 struct thread_info *tp;
3227
3228 if (args != NULL && *args != 0)
3229 error (_("Invalid argument."));
3230
3231 tp = find_thread_ptid (inferior_ptid);
3232 if (tp == NULL)
3233 error (_("No thread."));
3234
3235 btinfo = &tp->btrace;
3236
3237 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3238 btrace_maint_clear (btinfo);
3239 btrace_data_clear (&btinfo->data);
3240 }
3241
3242 /* The "maintenance btrace clear" command. */
3243
3244 static void
3245 maint_btrace_clear_cmd (char *args, int from_tty)
3246 {
3247 struct btrace_thread_info *btinfo;
3248 struct thread_info *tp;
3249
3250 if (args != NULL && *args != 0)
3251 error (_("Invalid argument."));
3252
3253 tp = find_thread_ptid (inferior_ptid);
3254 if (tp == NULL)
3255 error (_("No thread."));
3256
3257 btrace_clear (tp);
3258 }
3259
3260 /* The "maintenance btrace" command. */
3261
3262 static void
3263 maint_btrace_cmd (char *args, int from_tty)
3264 {
3265 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3266 gdb_stdout);
3267 }
3268
3269 /* The "maintenance set btrace" command. */
3270
3271 static void
3272 maint_btrace_set_cmd (char *args, int from_tty)
3273 {
3274 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3275 gdb_stdout);
3276 }
3277
3278 /* The "maintenance show btrace" command. */
3279
3280 static void
3281 maint_btrace_show_cmd (char *args, int from_tty)
3282 {
3283 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3284 all_commands, gdb_stdout);
3285 }
3286
3287 /* The "maintenance set btrace pt" command. */
3288
3289 static void
3290 maint_btrace_pt_set_cmd (char *args, int from_tty)
3291 {
3292 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3293 all_commands, gdb_stdout);
3294 }
3295
3296 /* The "maintenance show btrace pt" command. */
3297
3298 static void
3299 maint_btrace_pt_show_cmd (char *args, int from_tty)
3300 {
3301 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3302 all_commands, gdb_stdout);
3303 }
3304
3305 /* The "maintenance info btrace" command. */
3306
3307 static void
3308 maint_info_btrace_cmd (char *args, int from_tty)
3309 {
3310 struct btrace_thread_info *btinfo;
3311 struct thread_info *tp;
3312 const struct btrace_config *conf;
3313
3314 if (args != NULL && *args != 0)
3315 error (_("Invalid argument."));
3316
3317 tp = find_thread_ptid (inferior_ptid);
3318 if (tp == NULL)
3319 error (_("No thread."));
3320
3321 btinfo = &tp->btrace;
3322
3323 conf = btrace_conf (btinfo);
3324 if (conf == NULL)
3325 error (_("No btrace configuration."));
3326
3327 printf_unfiltered (_("Format: %s.\n"),
3328 btrace_format_string (conf->format));
3329
3330 switch (conf->format)
3331 {
3332 default:
3333 break;
3334
3335 case BTRACE_FORMAT_BTS:
3336 printf_unfiltered (_("Number of packets: %u.\n"),
3337 VEC_length (btrace_block_s,
3338 btinfo->data.variant.bts.blocks));
3339 break;
3340
3341 #if defined (HAVE_LIBIPT)
3342 case BTRACE_FORMAT_PT:
3343 {
3344 struct pt_version version;
3345
3346 version = pt_library_version ();
3347 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3348 version.minor, version.build,
3349 version.ext != NULL ? version.ext : "");
3350
3351 btrace_maint_update_pt_packets (btinfo);
3352 printf_unfiltered (_("Number of packets: %u.\n"),
3353 VEC_length (btrace_pt_packet_s,
3354 btinfo->maint.variant.pt.packets));
3355 }
3356 break;
3357 #endif /* defined (HAVE_LIBIPT) */
3358 }
3359 }
3360
3361 /* The "maint show btrace pt skip-pad" show value function. */
3362
3363 static void
3364 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3365 struct cmd_list_element *c,
3366 const char *value)
3367 {
3368 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3369 }
3370
3371
3372 /* Initialize btrace maintenance commands. */
3373
3374 void _initialize_btrace (void);
3375 void
3376 _initialize_btrace (void)
3377 {
3378 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3379 _("Info about branch tracing data."), &maintenanceinfolist);
3380
3381 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3382 _("Branch tracing maintenance commands."),
3383 &maint_btrace_cmdlist, "maintenance btrace ",
3384 0, &maintenancelist);
3385
3386 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3387 Set branch tracing specific variables."),
3388 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3389 0, &maintenance_set_cmdlist);
3390
3391 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3392 Set Intel Processor Trace specific variables."),
3393 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3394 0, &maint_btrace_set_cmdlist);
3395
3396 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3397 Show branch tracing specific variables."),
3398 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3399 0, &maintenance_show_cmdlist);
3400
3401 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3402 Show Intel Processor Trace specific variables."),
3403 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3404 0, &maint_btrace_show_cmdlist);
3405
3406 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3407 &maint_btrace_pt_skip_pad, _("\
3408 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3409 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3410 When enabled, PAD packets are ignored in the btrace packet history."),
3411 NULL, show_maint_btrace_pt_skip_pad,
3412 &maint_btrace_pt_set_cmdlist,
3413 &maint_btrace_pt_show_cmdlist);
3414
3415 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3416 _("Print the raw branch tracing data.\n\
3417 With no argument, print ten more packets after the previous ten-line print.\n\
3418 With '-' as argument print ten packets before a previous ten-line print.\n\
3419 One argument specifies the starting packet of a ten-line print.\n\
3420 Two arguments with comma between specify starting and ending packets to \
3421 print.\n\
3422 Preceded with '+'/'-' the second argument specifies the distance from the \
3423 first.\n"),
3424 &maint_btrace_cmdlist);
3425
3426 add_cmd ("clear-packet-history", class_maintenance,
3427 maint_btrace_clear_packet_history_cmd,
3428 _("Clears the branch tracing packet history.\n\
3429 Discards the raw branch tracing data but not the execution history data.\n\
3430 "),
3431 &maint_btrace_cmdlist);
3432
3433 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3434 _("Clears the branch tracing data.\n\
3435 Discards the raw branch tracing data and the execution history data.\n\
3436 The next 'record' command will fetch the branch tracing data anew.\n\
3437 "),
3438 &maint_btrace_cmdlist);
3439
3440 }
This page took 0.100969 seconds and 4 git commands to generate.