1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size
= pev
->size
;
124 gdb_assert (size
<= buffer_size
);
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head
< size
)
135 data_head
+= buffer_size
;
137 gdb_assert (size
<= data_head
);
138 data_tail
= data_head
- size
;
141 start
= begin
+ data_tail
% buffer_size
;
142 stop
= begin
+ data_head
% buffer_size
;
144 buffer
= (gdb_byte
*) xmalloc (size
);
147 memcpy (buffer
, start
, stop
- start
);
150 end
= begin
+ buffer_size
;
152 memcpy (buffer
, start
, end
- start
);
153 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
163 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
169 data_head
= *pev
->data_head
;
172 *data
= perf_event_read (pev
, data_head
, size
);
175 pev
->last_head
= data_head
;
178 /* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
182 perf_event_pt_event_type (int *type
)
187 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
191 found
= fscanf (file
, "%d", type
);
200 /* Try to determine the start address of the Linux kernel. */
203 linux_determine_kernel_start (void)
205 static uint64_t kernel_start
;
214 file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
220 char buffer
[1024], symbol
[8], *line
;
224 line
= fgets (buffer
, sizeof (buffer
), file
);
228 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
232 if (strcmp (symbol
, "_text") == 0)
244 /* Check whether an address is in the kernel. */
247 perf_event_is_kernel_addr (uint64_t addr
)
249 uint64_t kernel_start
;
251 kernel_start
= linux_determine_kernel_start ();
252 if (kernel_start
!= 0ull)
253 return (addr
>= kernel_start
);
255 /* If we don't know the kernel's start address, let's check the most
256 significant bit. This will work at least for 64-bit kernels. */
257 return ((addr
& (1ull << 63)) != 0);
260 /* Check whether a perf event record should be skipped. */
263 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
265 /* The hardware may report branches from kernel into user space. Branches
266 from user into kernel space will be suppressed. We filter the former to
267 provide a consistent branch trace excluding kernel. */
268 return perf_event_is_kernel_addr (bts
->from
);
271 /* Perform a few consistency checks on a perf event sample record. This is
272 meant to catch cases when we get out of sync with the perf event stream. */
275 perf_event_sample_ok (const struct perf_event_sample
*sample
)
277 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
280 if (sample
->header
.size
!= sizeof (*sample
))
286 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
287 and to addresses (plus a header).
289 Start points into that buffer at the next sample position.
290 We read the collected samples backwards from start.
292 While reading the samples, we convert the information into a list of blocks.
293 For two adjacent samples s1 and s2, we form a block b such that b.begin =
294 s1.to and b.end = s2.from.
296 In case the buffer overflows during sampling, one sample may have its lower
297 part at the end and its upper part at the beginning of the buffer. */
299 static VEC (btrace_block_s
) *
300 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
301 const uint8_t *end
, const uint8_t *start
, size_t size
)
303 VEC (btrace_block_s
) *btrace
= NULL
;
304 struct perf_event_sample sample
;
306 struct btrace_block block
= { 0, 0 };
307 struct regcache
*regcache
;
309 gdb_assert (begin
<= start
);
310 gdb_assert (start
<= end
);
312 /* The first block ends at the current pc. */
313 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
314 block
.end
= regcache_read_pc (regcache
);
316 /* The buffer may contain a partial record as its last entry (i.e. when the
317 buffer size is not a multiple of the sample size). */
318 read
= sizeof (sample
) - 1;
320 for (; read
< size
; read
+= sizeof (sample
))
322 const struct perf_event_sample
*psample
;
324 /* Find the next perf_event sample in a backwards traversal. */
325 start
-= sizeof (sample
);
327 /* If we're still inside the buffer, we're done. */
329 psample
= (const struct perf_event_sample
*) start
;
334 /* We're to the left of the ring buffer, we will wrap around and
335 reappear at the very right of the ring buffer. */
337 missing
= (begin
- start
);
338 start
= (end
- missing
);
340 /* If the entire sample is missing, we're done. */
341 if (missing
== sizeof (sample
))
342 psample
= (const struct perf_event_sample
*) start
;
347 /* The sample wrapped around. The lower part is at the end and
348 the upper part is at the beginning of the buffer. */
349 stack
= (uint8_t *) &sample
;
351 /* Copy the two parts so we have a contiguous sample. */
352 memcpy (stack
, start
, missing
);
353 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
359 if (!perf_event_sample_ok (psample
))
361 warning (_("Branch trace may be incomplete."));
365 if (perf_event_skip_bts_record (&psample
->bts
))
368 /* We found a valid sample, so we can complete the current block. */
369 block
.begin
= psample
->bts
.to
;
371 VEC_safe_push (btrace_block_s
, btrace
, &block
);
373 /* Start the next block. */
374 block
.end
= psample
->bts
.from
;
377 /* Push the last block (i.e. the first one of inferior execution), as well.
378 We don't know where it ends, but we know where it starts. If we're
379 reading delta trace, we can fill in the start address later on.
380 Otherwise we will prune it. */
382 VEC_safe_push (btrace_block_s
, btrace
, &block
);
387 /* Check whether the kernel supports BTS. */
390 kernel_supports_bts (void)
392 struct perf_event_attr attr
;
401 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
405 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
408 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
409 safe_strerror (errno
));
413 status
= raise (SIGTRAP
);
416 warning (_("test bts: cannot raise SIGTRAP: %s."),
417 safe_strerror (errno
));
424 pid
= waitpid (child
, &status
, 0);
427 warning (_("test bts: bad pid %ld, error: %s."),
428 (long) pid
, safe_strerror (errno
));
432 if (!WIFSTOPPED (status
))
434 warning (_("test bts: expected stop. status: %d."),
439 memset (&attr
, 0, sizeof (attr
));
441 attr
.type
= PERF_TYPE_HARDWARE
;
442 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
443 attr
.sample_period
= 1;
444 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
445 attr
.exclude_kernel
= 1;
447 attr
.exclude_idle
= 1;
449 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
453 kill (child
, SIGKILL
);
454 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
456 pid
= waitpid (child
, &status
, 0);
459 warning (_("test bts: bad pid %ld, error: %s."),
460 (long) pid
, safe_strerror (errno
));
461 if (!WIFSIGNALED (status
))
462 warning (_("test bts: expected killed. status: %d."),
470 /* Check whether the kernel supports Intel Processor Trace. */
473 kernel_supports_pt (void)
475 struct perf_event_attr attr
;
477 int status
, file
, type
;
484 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
488 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
491 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
492 safe_strerror (errno
));
496 status
= raise (SIGTRAP
);
499 warning (_("test pt: cannot raise SIGTRAP: %s."),
500 safe_strerror (errno
));
507 pid
= waitpid (child
, &status
, 0);
510 warning (_("test pt: bad pid %ld, error: %s."),
511 (long) pid
, safe_strerror (errno
));
515 if (!WIFSTOPPED (status
))
517 warning (_("test pt: expected stop. status: %d."),
522 status
= perf_event_pt_event_type (&type
);
527 memset (&attr
, 0, sizeof (attr
));
529 attr
.size
= sizeof (attr
);
531 attr
.exclude_kernel
= 1;
533 attr
.exclude_idle
= 1;
535 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
540 kill (child
, SIGKILL
);
541 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
543 pid
= waitpid (child
, &status
, 0);
546 warning (_("test pt: bad pid %ld, error: %s."),
547 (long) pid
, safe_strerror (errno
));
548 if (!WIFSIGNALED (status
))
549 warning (_("test pt: expected killed. status: %d."),
557 /* Check whether an Intel cpu supports BTS. */
560 intel_supports_bts (const struct btrace_cpu
*cpu
)
567 case 0x1a: /* Nehalem */
571 case 0x25: /* Westmere */
574 case 0x2a: /* Sandy Bridge */
576 case 0x3a: /* Ivy Bridge */
578 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
579 "from" information afer an EIST transition, T-states, C1E, or
580 Adaptive Thermal Throttling. */
588 /* Check whether the cpu supports BTS. */
591 cpu_supports_bts (void)
593 struct btrace_cpu cpu
;
595 cpu
= btrace_this_cpu ();
599 /* Don't know about others. Let's assume they do. */
603 return intel_supports_bts (&cpu
);
607 /* Check whether the linux target supports BTS. */
610 linux_supports_bts (void)
616 if (!kernel_supports_bts ())
618 else if (!cpu_supports_bts ())
627 /* Check whether the linux target supports Intel Processor Trace. */
630 linux_supports_pt (void)
636 if (!kernel_supports_pt ())
645 /* See linux-btrace.h. */
648 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
652 case BTRACE_FORMAT_NONE
:
655 case BTRACE_FORMAT_BTS
:
656 return linux_supports_bts ();
658 case BTRACE_FORMAT_PT
:
659 return linux_supports_pt ();
662 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
665 /* Enable branch tracing in BTS format. */
667 static struct btrace_target_info
*
668 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
670 struct perf_event_mmap_page
*header
;
671 struct btrace_target_info
*tinfo
;
672 struct btrace_tinfo_bts
*bts
;
677 tinfo
= XCNEW (struct btrace_target_info
);
680 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
681 bts
= &tinfo
->variant
.bts
;
683 bts
->attr
.size
= sizeof (bts
->attr
);
684 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
685 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
686 bts
->attr
.sample_period
= 1;
688 /* We sample from and to address. */
689 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
691 bts
->attr
.exclude_kernel
= 1;
692 bts
->attr
.exclude_hv
= 1;
693 bts
->attr
.exclude_idle
= 1;
695 pid
= ptid_get_lwp (ptid
);
697 pid
= ptid_get_pid (ptid
);
700 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
704 /* Convert the requested size in bytes to pages (rounding up). */
705 pages
= ((size_t) conf
->size
/ PAGE_SIZE
706 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
707 /* We need at least one page. */
711 /* The buffer size can be requested in powers of two pages. Adjust PAGES
712 to the next power of two. */
713 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
714 if ((pages
& ((size_t) 1 << pg
)) != 0)
715 pages
+= ((size_t) 1 << pg
);
717 /* We try to allocate the requested size.
718 If that fails, try to get as much as we can. */
719 for (; pages
> 0; pages
>>= 1)
724 data_size
= (__u64
) pages
* PAGE_SIZE
;
726 /* Don't ask for more than we can represent in the configuration. */
727 if ((__u64
) UINT_MAX
< data_size
)
730 size
= (size_t) data_size
;
731 length
= size
+ PAGE_SIZE
;
733 /* Check for overflows. */
734 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
737 /* The number of pages we request needs to be a power of two. */
738 header
= ((struct perf_event_mmap_page
*)
739 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0));
740 if (header
!= MAP_FAILED
)
747 data_offset
= PAGE_SIZE
;
749 #if defined (PERF_ATTR_SIZE_VER5)
750 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
754 data_offset
= header
->data_offset
;
755 data_size
= header
->data_size
;
757 size
= (unsigned int) data_size
;
759 /* Check for overflows. */
760 if ((__u64
) size
!= data_size
)
762 munmap ((void *) header
, size
+ PAGE_SIZE
);
766 #endif /* defined (PERF_ATTR_SIZE_VER5) */
768 bts
->header
= header
;
769 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
770 bts
->bts
.size
= size
;
771 bts
->bts
.data_head
= &header
->data_head
;
772 bts
->bts
.last_head
= 0ull;
774 tinfo
->conf
.bts
.size
= (unsigned int) size
;
778 /* We were not able to allocate any buffer. */
786 #if defined (PERF_ATTR_SIZE_VER5)
788 /* Enable branch tracing in Intel Processor Trace format. */
790 static struct btrace_target_info
*
791 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
793 struct perf_event_mmap_page
*header
;
794 struct btrace_target_info
*tinfo
;
795 struct btrace_tinfo_pt
*pt
;
797 int pid
, pg
, errcode
, type
;
802 errcode
= perf_event_pt_event_type (&type
);
806 pid
= ptid_get_lwp (ptid
);
808 pid
= ptid_get_pid (ptid
);
810 tinfo
= XCNEW (struct btrace_target_info
);
813 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
814 pt
= &tinfo
->variant
.pt
;
816 pt
->attr
.size
= sizeof (pt
->attr
);
817 pt
->attr
.type
= type
;
819 pt
->attr
.exclude_kernel
= 1;
820 pt
->attr
.exclude_hv
= 1;
821 pt
->attr
.exclude_idle
= 1;
824 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
828 /* Allocate the configuration page. */
829 header
= ((struct perf_event_mmap_page
*)
830 mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
832 if (header
== MAP_FAILED
)
835 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
837 /* Convert the requested size in bytes to pages (rounding up). */
838 pages
= ((size_t) conf
->size
/ PAGE_SIZE
839 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
840 /* We need at least one page. */
844 /* The buffer size can be requested in powers of two pages. Adjust PAGES
845 to the next power of two. */
846 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
847 if ((pages
& ((size_t) 1 << pg
)) != 0)
848 pages
+= ((size_t) 1 << pg
);
850 /* We try to allocate the requested size.
851 If that fails, try to get as much as we can. */
852 for (; pages
> 0; pages
>>= 1)
857 data_size
= (__u64
) pages
* PAGE_SIZE
;
859 /* Don't ask for more than we can represent in the configuration. */
860 if ((__u64
) UINT_MAX
< data_size
)
863 size
= (size_t) data_size
;
865 /* Check for overflows. */
866 if ((__u64
) size
!= data_size
)
869 header
->aux_size
= data_size
;
872 pt
->pt
.mem
= ((const uint8_t *)
873 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
874 header
->aux_offset
));
875 if (pt
->pt
.mem
!= MAP_FAILED
)
884 pt
->pt
.data_head
= &header
->aux_head
;
886 tinfo
->conf
.pt
.size
= (unsigned int) size
;
890 munmap((void *) header
, PAGE_SIZE
);
900 #else /* !defined (PERF_ATTR_SIZE_VER5) */
902 static struct btrace_target_info
*
903 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
909 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
911 /* See linux-btrace.h. */
913 struct btrace_target_info
*
914 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
916 struct btrace_target_info
*tinfo
;
919 switch (conf
->format
)
921 case BTRACE_FORMAT_NONE
:
924 case BTRACE_FORMAT_BTS
:
925 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
928 case BTRACE_FORMAT_PT
:
929 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
936 /* Disable BTS tracing. */
938 static enum btrace_error
939 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
941 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
944 return BTRACE_ERR_NONE
;
947 /* Disable Intel Processor Trace tracing. */
949 static enum btrace_error
950 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
952 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
953 munmap((void *) tinfo
->header
, PAGE_SIZE
);
956 return BTRACE_ERR_NONE
;
959 /* See linux-btrace.h. */
962 linux_disable_btrace (struct btrace_target_info
*tinfo
)
964 enum btrace_error errcode
;
966 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
967 switch (tinfo
->conf
.format
)
969 case BTRACE_FORMAT_NONE
:
972 case BTRACE_FORMAT_BTS
:
973 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
976 case BTRACE_FORMAT_PT
:
977 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
981 if (errcode
== BTRACE_ERR_NONE
)
987 /* Read branch trace data in BTS format for the thread given by TINFO into
988 BTRACE using the TYPE reading method. */
990 static enum btrace_error
991 linux_read_bts (struct btrace_data_bts
*btrace
,
992 struct btrace_target_info
*tinfo
,
993 enum btrace_read_type type
)
995 struct perf_event_buffer
*pevent
;
996 const uint8_t *begin
, *end
, *start
;
997 size_t buffer_size
, size
;
998 __u64 data_head
, data_tail
;
999 unsigned int retries
= 5;
1001 pevent
= &tinfo
->variant
.bts
.bts
;
1003 /* For delta reads, we return at least the partial last block containing
1005 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
1006 return BTRACE_ERR_NONE
;
1008 buffer_size
= pevent
->size
;
1009 data_tail
= pevent
->last_head
;
1011 /* We may need to retry reading the trace. See below. */
1014 data_head
= *pevent
->data_head
;
1016 /* Delete any leftover trace from the previous iteration. */
1017 VEC_free (btrace_block_s
, btrace
->blocks
);
1019 if (type
== BTRACE_READ_DELTA
)
1023 /* Determine the number of bytes to read and check for buffer
1026 /* Check for data head overflows. We might be able to recover from
1027 those but they are very unlikely and it's not really worth the
1029 if (data_head
< data_tail
)
1030 return BTRACE_ERR_OVERFLOW
;
1032 /* If the buffer is smaller than the trace delta, we overflowed. */
1033 data_size
= data_head
- data_tail
;
1034 if (buffer_size
< data_size
)
1035 return BTRACE_ERR_OVERFLOW
;
1037 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1038 size
= (size_t) data_size
;
1042 /* Read the entire buffer. */
1045 /* Adjust the size if the buffer has not overflowed, yet. */
1046 if (data_head
< size
)
1047 size
= (size_t) data_head
;
1050 /* Data_head keeps growing; the buffer itself is circular. */
1051 begin
= pevent
->mem
;
1052 start
= begin
+ data_head
% buffer_size
;
1054 if (data_head
<= buffer_size
)
1057 end
= begin
+ pevent
->size
;
1059 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1061 /* The stopping thread notifies its ptracer before it is scheduled out.
1062 On multi-core systems, the debugger might therefore run while the
1063 kernel might be writing the last branch trace records.
1065 Let's check whether the data head moved while we read the trace. */
1066 if (data_head
== *pevent
->data_head
)
1070 pevent
->last_head
= data_head
;
1072 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1073 if we're not doing a delta read. There is no way of filling in its zeroed
1075 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1076 && type
!= BTRACE_READ_DELTA
)
1077 VEC_pop (btrace_block_s
, btrace
->blocks
);
1079 return BTRACE_ERR_NONE
;
1082 /* Fill in the Intel Processor Trace configuration information. */
1085 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1087 conf
->cpu
= btrace_this_cpu ();
1090 /* Read branch trace data in Intel Processor Trace format for the thread
1091 given by TINFO into BTRACE using the TYPE reading method. */
1093 static enum btrace_error
1094 linux_read_pt (struct btrace_data_pt
*btrace
,
1095 struct btrace_target_info
*tinfo
,
1096 enum btrace_read_type type
)
1098 struct perf_event_buffer
*pt
;
1100 pt
= &tinfo
->variant
.pt
.pt
;
1102 linux_fill_btrace_pt_config (&btrace
->config
);
1106 case BTRACE_READ_DELTA
:
1107 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1108 around to stay inside the aux buffer. */
1109 return BTRACE_ERR_NOT_SUPPORTED
;
1111 case BTRACE_READ_NEW
:
1112 if (!perf_event_new_data (pt
))
1113 return BTRACE_ERR_NONE
;
1116 case BTRACE_READ_ALL
:
1117 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1118 return BTRACE_ERR_NONE
;
1121 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1124 /* See linux-btrace.h. */
1127 linux_read_btrace (struct btrace_data
*btrace
,
1128 struct btrace_target_info
*tinfo
,
1129 enum btrace_read_type type
)
1131 switch (tinfo
->conf
.format
)
1133 case BTRACE_FORMAT_NONE
:
1134 return BTRACE_ERR_NOT_SUPPORTED
;
1136 case BTRACE_FORMAT_BTS
:
1137 /* We read btrace in BTS format. */
1138 btrace
->format
= BTRACE_FORMAT_BTS
;
1139 btrace
->variant
.bts
.blocks
= NULL
;
1141 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1143 case BTRACE_FORMAT_PT
:
1144 /* We read btrace in Intel Processor Trace format. */
1145 btrace
->format
= BTRACE_FORMAT_PT
;
1146 btrace
->variant
.pt
.data
= NULL
;
1147 btrace
->variant
.pt
.size
= 0;
1149 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1152 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1155 /* See linux-btrace.h. */
1157 const struct btrace_config
*
1158 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1160 return &tinfo
->conf
;
1163 #else /* !HAVE_LINUX_PERF_EVENT_H */
1165 /* See linux-btrace.h. */
1168 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1173 /* See linux-btrace.h. */
1175 struct btrace_target_info
*
1176 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1181 /* See linux-btrace.h. */
1184 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1186 return BTRACE_ERR_NOT_SUPPORTED
;
1189 /* See linux-btrace.h. */
1192 linux_read_btrace (struct btrace_data
*btrace
,
1193 struct btrace_target_info
*tinfo
,
1194 enum btrace_read_type type
)
1196 return BTRACE_ERR_NOT_SUPPORTED
;
1199 /* See linux-btrace.h. */
1201 const struct btrace_config
*
1202 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1207 #endif /* !HAVE_LINUX_PERF_EVENT_H */