1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size
= pev
->size
;
124 gdb_assert (size
<= buffer_size
);
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head
< size
)
135 data_head
+= buffer_size
;
137 gdb_assert (size
<= data_head
);
138 data_tail
= data_head
- size
;
141 start
= begin
+ data_tail
% buffer_size
;
142 stop
= begin
+ data_head
% buffer_size
;
144 buffer
= (gdb_byte
*) xmalloc (size
);
147 memcpy (buffer
, start
, stop
- start
);
150 end
= begin
+ buffer_size
;
152 memcpy (buffer
, start
, end
- start
);
153 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
163 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
169 data_head
= *pev
->data_head
;
172 *data
= perf_event_read (pev
, data_head
, size
);
175 pev
->last_head
= data_head
;
178 /* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
182 perf_event_pt_event_type (int *type
)
185 = gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
189 int found
= fscanf (file
.get (), "%d", type
);
195 /* Try to determine the start address of the Linux kernel. */
198 linux_determine_kernel_start (void)
200 static uint64_t kernel_start
;
208 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
212 while (!feof (file
.get ()))
214 char buffer
[1024], symbol
[8], *line
;
218 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
222 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
226 if (strcmp (symbol
, "_text") == 0)
236 /* Check whether an address is in the kernel. */
239 perf_event_is_kernel_addr (uint64_t addr
)
241 uint64_t kernel_start
;
243 kernel_start
= linux_determine_kernel_start ();
244 if (kernel_start
!= 0ull)
245 return (addr
>= kernel_start
);
247 /* If we don't know the kernel's start address, let's check the most
248 significant bit. This will work at least for 64-bit kernels. */
249 return ((addr
& (1ull << 63)) != 0);
252 /* Check whether a perf event record should be skipped. */
255 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
257 /* The hardware may report branches from kernel into user space. Branches
258 from user into kernel space will be suppressed. We filter the former to
259 provide a consistent branch trace excluding kernel. */
260 return perf_event_is_kernel_addr (bts
->from
);
263 /* Perform a few consistency checks on a perf event sample record. This is
264 meant to catch cases when we get out of sync with the perf event stream. */
267 perf_event_sample_ok (const struct perf_event_sample
*sample
)
269 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
272 if (sample
->header
.size
!= sizeof (*sample
))
278 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
279 and to addresses (plus a header).
281 Start points into that buffer at the next sample position.
282 We read the collected samples backwards from start.
284 While reading the samples, we convert the information into a list of blocks.
285 For two adjacent samples s1 and s2, we form a block b such that b.begin =
286 s1.to and b.end = s2.from.
288 In case the buffer overflows during sampling, one sample may have its lower
289 part at the end and its upper part at the beginning of the buffer. */
291 static VEC (btrace_block_s
) *
292 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
293 const uint8_t *end
, const uint8_t *start
, size_t size
)
295 VEC (btrace_block_s
) *btrace
= NULL
;
296 struct perf_event_sample sample
;
298 struct btrace_block block
= { 0, 0 };
299 struct regcache
*regcache
;
301 gdb_assert (begin
<= start
);
302 gdb_assert (start
<= end
);
304 /* The first block ends at the current pc. */
305 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
306 block
.end
= regcache_read_pc (regcache
);
308 /* The buffer may contain a partial record as its last entry (i.e. when the
309 buffer size is not a multiple of the sample size). */
310 read
= sizeof (sample
) - 1;
312 for (; read
< size
; read
+= sizeof (sample
))
314 const struct perf_event_sample
*psample
;
316 /* Find the next perf_event sample in a backwards traversal. */
317 start
-= sizeof (sample
);
319 /* If we're still inside the buffer, we're done. */
321 psample
= (const struct perf_event_sample
*) start
;
326 /* We're to the left of the ring buffer, we will wrap around and
327 reappear at the very right of the ring buffer. */
329 missing
= (begin
- start
);
330 start
= (end
- missing
);
332 /* If the entire sample is missing, we're done. */
333 if (missing
== sizeof (sample
))
334 psample
= (const struct perf_event_sample
*) start
;
339 /* The sample wrapped around. The lower part is at the end and
340 the upper part is at the beginning of the buffer. */
341 stack
= (uint8_t *) &sample
;
343 /* Copy the two parts so we have a contiguous sample. */
344 memcpy (stack
, start
, missing
);
345 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
351 if (!perf_event_sample_ok (psample
))
353 warning (_("Branch trace may be incomplete."));
357 if (perf_event_skip_bts_record (&psample
->bts
))
360 /* We found a valid sample, so we can complete the current block. */
361 block
.begin
= psample
->bts
.to
;
363 VEC_safe_push (btrace_block_s
, btrace
, &block
);
365 /* Start the next block. */
366 block
.end
= psample
->bts
.from
;
369 /* Push the last block (i.e. the first one of inferior execution), as well.
370 We don't know where it ends, but we know where it starts. If we're
371 reading delta trace, we can fill in the start address later on.
372 Otherwise we will prune it. */
374 VEC_safe_push (btrace_block_s
, btrace
, &block
);
379 /* Check whether the kernel supports BTS. */
382 kernel_supports_bts (void)
384 struct perf_event_attr attr
;
393 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
397 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
400 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
401 safe_strerror (errno
));
405 status
= raise (SIGTRAP
);
408 warning (_("test bts: cannot raise SIGTRAP: %s."),
409 safe_strerror (errno
));
416 pid
= waitpid (child
, &status
, 0);
419 warning (_("test bts: bad pid %ld, error: %s."),
420 (long) pid
, safe_strerror (errno
));
424 if (!WIFSTOPPED (status
))
426 warning (_("test bts: expected stop. status: %d."),
431 memset (&attr
, 0, sizeof (attr
));
433 attr
.type
= PERF_TYPE_HARDWARE
;
434 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
435 attr
.sample_period
= 1;
436 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
437 attr
.exclude_kernel
= 1;
439 attr
.exclude_idle
= 1;
441 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
445 kill (child
, SIGKILL
);
446 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
448 pid
= waitpid (child
, &status
, 0);
451 warning (_("test bts: bad pid %ld, error: %s."),
452 (long) pid
, safe_strerror (errno
));
453 if (!WIFSIGNALED (status
))
454 warning (_("test bts: expected killed. status: %d."),
462 /* Check whether the kernel supports Intel Processor Trace. */
465 kernel_supports_pt (void)
467 struct perf_event_attr attr
;
469 int status
, file
, type
;
476 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
480 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
483 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
484 safe_strerror (errno
));
488 status
= raise (SIGTRAP
);
491 warning (_("test pt: cannot raise SIGTRAP: %s."),
492 safe_strerror (errno
));
499 pid
= waitpid (child
, &status
, 0);
502 warning (_("test pt: bad pid %ld, error: %s."),
503 (long) pid
, safe_strerror (errno
));
507 if (!WIFSTOPPED (status
))
509 warning (_("test pt: expected stop. status: %d."),
514 status
= perf_event_pt_event_type (&type
);
519 memset (&attr
, 0, sizeof (attr
));
521 attr
.size
= sizeof (attr
);
523 attr
.exclude_kernel
= 1;
525 attr
.exclude_idle
= 1;
527 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
532 kill (child
, SIGKILL
);
533 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
535 pid
= waitpid (child
, &status
, 0);
538 warning (_("test pt: bad pid %ld, error: %s."),
539 (long) pid
, safe_strerror (errno
));
540 if (!WIFSIGNALED (status
))
541 warning (_("test pt: expected killed. status: %d."),
549 /* Check whether an Intel cpu supports BTS. */
552 intel_supports_bts (const struct btrace_cpu
*cpu
)
559 case 0x1a: /* Nehalem */
563 case 0x25: /* Westmere */
566 case 0x2a: /* Sandy Bridge */
568 case 0x3a: /* Ivy Bridge */
570 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
571 "from" information afer an EIST transition, T-states, C1E, or
572 Adaptive Thermal Throttling. */
580 /* Check whether the cpu supports BTS. */
583 cpu_supports_bts (void)
585 struct btrace_cpu cpu
;
587 cpu
= btrace_this_cpu ();
591 /* Don't know about others. Let's assume they do. */
595 return intel_supports_bts (&cpu
);
599 /* Check whether the linux target supports BTS. */
602 linux_supports_bts (void)
608 if (!kernel_supports_bts ())
610 else if (!cpu_supports_bts ())
619 /* Check whether the linux target supports Intel Processor Trace. */
622 linux_supports_pt (void)
628 if (!kernel_supports_pt ())
637 /* See linux-btrace.h. */
640 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
644 case BTRACE_FORMAT_NONE
:
647 case BTRACE_FORMAT_BTS
:
648 return linux_supports_bts ();
650 case BTRACE_FORMAT_PT
:
651 return linux_supports_pt ();
654 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
657 /* Enable branch tracing in BTS format. */
659 static struct btrace_target_info
*
660 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
662 struct btrace_tinfo_bts
*bts
;
667 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
668 (XCNEW (btrace_target_info
));
671 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
672 bts
= &tinfo
->variant
.bts
;
674 bts
->attr
.size
= sizeof (bts
->attr
);
675 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
676 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
677 bts
->attr
.sample_period
= 1;
679 /* We sample from and to address. */
680 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
682 bts
->attr
.exclude_kernel
= 1;
683 bts
->attr
.exclude_hv
= 1;
684 bts
->attr
.exclude_idle
= 1;
686 pid
= ptid_get_lwp (ptid
);
688 pid
= ptid_get_pid (ptid
);
691 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
695 /* Convert the requested size in bytes to pages (rounding up). */
696 pages
= ((size_t) conf
->size
/ PAGE_SIZE
697 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
698 /* We need at least one page. */
702 /* The buffer size can be requested in powers of two pages. Adjust PAGES
703 to the next power of two. */
704 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
705 if ((pages
& ((size_t) 1 << pg
)) != 0)
706 pages
+= ((size_t) 1 << pg
);
708 /* We try to allocate the requested size.
709 If that fails, try to get as much as we can. */
711 for (; pages
> 0; pages
>>= 1)
716 data_size
= (__u64
) pages
* PAGE_SIZE
;
718 /* Don't ask for more than we can represent in the configuration. */
719 if ((__u64
) UINT_MAX
< data_size
)
722 size
= (size_t) data_size
;
723 length
= size
+ PAGE_SIZE
;
725 /* Check for overflows. */
726 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
729 /* The number of pages we request needs to be a power of two. */
730 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
731 if (data
.get () != MAP_FAILED
)
738 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
740 data_offset
= PAGE_SIZE
;
742 #if defined (PERF_ATTR_SIZE_VER5)
743 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
747 data_offset
= header
->data_offset
;
748 data_size
= header
->data_size
;
750 size
= (unsigned int) data_size
;
752 /* Check for overflows. */
753 if ((__u64
) size
!= data_size
)
756 #endif /* defined (PERF_ATTR_SIZE_VER5) */
758 bts
->bts
.size
= size
;
759 bts
->bts
.data_head
= &header
->data_head
;
760 bts
->bts
.mem
= (const uint8_t *) data
.get () + data_offset
;
761 bts
->bts
.last_head
= 0ull;
762 bts
->header
= header
;
763 bts
->file
= fd
.release ();
767 tinfo
->conf
.bts
.size
= (unsigned int) size
;
768 return tinfo
.release ();
771 #if defined (PERF_ATTR_SIZE_VER5)
773 /* Enable branch tracing in Intel Processor Trace format. */
775 static struct btrace_target_info
*
776 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
778 struct btrace_tinfo_pt
*pt
;
780 int pid
, pg
, errcode
, type
;
785 errcode
= perf_event_pt_event_type (&type
);
789 pid
= ptid_get_lwp (ptid
);
791 pid
= ptid_get_pid (ptid
);
793 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
794 (XCNEW (btrace_target_info
));
797 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
798 pt
= &tinfo
->variant
.pt
;
800 pt
->attr
.size
= sizeof (pt
->attr
);
801 pt
->attr
.type
= type
;
803 pt
->attr
.exclude_kernel
= 1;
804 pt
->attr
.exclude_hv
= 1;
805 pt
->attr
.exclude_idle
= 1;
808 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
812 /* Allocate the configuration page. */
813 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
815 if (data
.get () == MAP_FAILED
)
818 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
821 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
823 /* Convert the requested size in bytes to pages (rounding up). */
824 pages
= ((size_t) conf
->size
/ PAGE_SIZE
825 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
826 /* We need at least one page. */
830 /* The buffer size can be requested in powers of two pages. Adjust PAGES
831 to the next power of two. */
832 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
833 if ((pages
& ((size_t) 1 << pg
)) != 0)
834 pages
+= ((size_t) 1 << pg
);
836 /* We try to allocate the requested size.
837 If that fails, try to get as much as we can. */
839 for (; pages
> 0; pages
>>= 1)
844 data_size
= (__u64
) pages
* PAGE_SIZE
;
846 /* Don't ask for more than we can represent in the configuration. */
847 if ((__u64
) UINT_MAX
< data_size
)
850 length
= (size_t) data_size
;
852 /* Check for overflows. */
853 if ((__u64
) length
!= data_size
)
856 header
->aux_size
= data_size
;
858 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
860 if (aux
.get () != MAP_FAILED
)
867 pt
->pt
.size
= aux
.size ();
868 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
869 pt
->pt
.data_head
= &header
->aux_head
;
871 pt
->file
= fd
.release ();
875 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
876 return tinfo
.release ();
879 #else /* !defined (PERF_ATTR_SIZE_VER5) */
881 static struct btrace_target_info
*
882 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
888 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
890 /* See linux-btrace.h. */
892 struct btrace_target_info
*
893 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
895 struct btrace_target_info
*tinfo
;
898 switch (conf
->format
)
900 case BTRACE_FORMAT_NONE
:
903 case BTRACE_FORMAT_BTS
:
904 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
907 case BTRACE_FORMAT_PT
:
908 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
913 error (_("Unknown error."));
918 /* Disable BTS tracing. */
920 static enum btrace_error
921 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
923 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
926 return BTRACE_ERR_NONE
;
929 /* Disable Intel Processor Trace tracing. */
931 static enum btrace_error
932 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
934 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
935 munmap((void *) tinfo
->header
, PAGE_SIZE
);
938 return BTRACE_ERR_NONE
;
941 /* See linux-btrace.h. */
944 linux_disable_btrace (struct btrace_target_info
*tinfo
)
946 enum btrace_error errcode
;
948 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
949 switch (tinfo
->conf
.format
)
951 case BTRACE_FORMAT_NONE
:
954 case BTRACE_FORMAT_BTS
:
955 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
958 case BTRACE_FORMAT_PT
:
959 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
963 if (errcode
== BTRACE_ERR_NONE
)
969 /* Read branch trace data in BTS format for the thread given by TINFO into
970 BTRACE using the TYPE reading method. */
972 static enum btrace_error
973 linux_read_bts (struct btrace_data_bts
*btrace
,
974 struct btrace_target_info
*tinfo
,
975 enum btrace_read_type type
)
977 struct perf_event_buffer
*pevent
;
978 const uint8_t *begin
, *end
, *start
;
979 size_t buffer_size
, size
;
980 __u64 data_head
, data_tail
;
981 unsigned int retries
= 5;
983 pevent
= &tinfo
->variant
.bts
.bts
;
985 /* For delta reads, we return at least the partial last block containing
987 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
988 return BTRACE_ERR_NONE
;
990 buffer_size
= pevent
->size
;
991 data_tail
= pevent
->last_head
;
993 /* We may need to retry reading the trace. See below. */
996 data_head
= *pevent
->data_head
;
998 /* Delete any leftover trace from the previous iteration. */
999 VEC_free (btrace_block_s
, btrace
->blocks
);
1001 if (type
== BTRACE_READ_DELTA
)
1005 /* Determine the number of bytes to read and check for buffer
1008 /* Check for data head overflows. We might be able to recover from
1009 those but they are very unlikely and it's not really worth the
1011 if (data_head
< data_tail
)
1012 return BTRACE_ERR_OVERFLOW
;
1014 /* If the buffer is smaller than the trace delta, we overflowed. */
1015 data_size
= data_head
- data_tail
;
1016 if (buffer_size
< data_size
)
1017 return BTRACE_ERR_OVERFLOW
;
1019 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1020 size
= (size_t) data_size
;
1024 /* Read the entire buffer. */
1027 /* Adjust the size if the buffer has not overflowed, yet. */
1028 if (data_head
< size
)
1029 size
= (size_t) data_head
;
1032 /* Data_head keeps growing; the buffer itself is circular. */
1033 begin
= pevent
->mem
;
1034 start
= begin
+ data_head
% buffer_size
;
1036 if (data_head
<= buffer_size
)
1039 end
= begin
+ pevent
->size
;
1041 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1043 /* The stopping thread notifies its ptracer before it is scheduled out.
1044 On multi-core systems, the debugger might therefore run while the
1045 kernel might be writing the last branch trace records.
1047 Let's check whether the data head moved while we read the trace. */
1048 if (data_head
== *pevent
->data_head
)
1052 pevent
->last_head
= data_head
;
1054 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1055 if we're not doing a delta read. There is no way of filling in its zeroed
1057 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1058 && type
!= BTRACE_READ_DELTA
)
1059 VEC_pop (btrace_block_s
, btrace
->blocks
);
1061 return BTRACE_ERR_NONE
;
1064 /* Fill in the Intel Processor Trace configuration information. */
1067 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1069 conf
->cpu
= btrace_this_cpu ();
1072 /* Read branch trace data in Intel Processor Trace format for the thread
1073 given by TINFO into BTRACE using the TYPE reading method. */
1075 static enum btrace_error
1076 linux_read_pt (struct btrace_data_pt
*btrace
,
1077 struct btrace_target_info
*tinfo
,
1078 enum btrace_read_type type
)
1080 struct perf_event_buffer
*pt
;
1082 pt
= &tinfo
->variant
.pt
.pt
;
1084 linux_fill_btrace_pt_config (&btrace
->config
);
1088 case BTRACE_READ_DELTA
:
1089 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1090 around to stay inside the aux buffer. */
1091 return BTRACE_ERR_NOT_SUPPORTED
;
1093 case BTRACE_READ_NEW
:
1094 if (!perf_event_new_data (pt
))
1095 return BTRACE_ERR_NONE
;
1098 case BTRACE_READ_ALL
:
1099 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1100 return BTRACE_ERR_NONE
;
1103 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1106 /* See linux-btrace.h. */
1109 linux_read_btrace (struct btrace_data
*btrace
,
1110 struct btrace_target_info
*tinfo
,
1111 enum btrace_read_type type
)
1113 switch (tinfo
->conf
.format
)
1115 case BTRACE_FORMAT_NONE
:
1116 return BTRACE_ERR_NOT_SUPPORTED
;
1118 case BTRACE_FORMAT_BTS
:
1119 /* We read btrace in BTS format. */
1120 btrace
->format
= BTRACE_FORMAT_BTS
;
1121 btrace
->variant
.bts
.blocks
= NULL
;
1123 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1125 case BTRACE_FORMAT_PT
:
1126 /* We read btrace in Intel Processor Trace format. */
1127 btrace
->format
= BTRACE_FORMAT_PT
;
1128 btrace
->variant
.pt
.data
= NULL
;
1129 btrace
->variant
.pt
.size
= 0;
1131 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1134 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1137 /* See linux-btrace.h. */
1139 const struct btrace_config
*
1140 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1142 return &tinfo
->conf
;
1145 #else /* !HAVE_LINUX_PERF_EVENT_H */
1147 /* See linux-btrace.h. */
1150 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1155 /* See linux-btrace.h. */
1157 struct btrace_target_info
*
1158 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1163 /* See linux-btrace.h. */
1166 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1168 return BTRACE_ERR_NOT_SUPPORTED
;
1171 /* See linux-btrace.h. */
1174 linux_read_btrace (struct btrace_data
*btrace
,
1175 struct btrace_target_info
*tinfo
,
1176 enum btrace_read_type type
)
1178 return BTRACE_ERR_NOT_SUPPORTED
;
1181 /* See linux-btrace.h. */
1183 const struct btrace_config
*
1184 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1189 #endif /* !HAVE_LINUX_PERF_EVENT_H */