1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Try to determine the size of a pointer in bits for the OS.
108 This is the same as the size of a pointer for the inferior process
109 except when a 32-bit inferior is running on a 64-bit OS. */
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
116 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
119 const gdb_byte
*begin
, *end
, *start
, *stop
;
127 gdb_assert (size
<= data_head
);
128 data_tail
= data_head
- size
;
130 buffer_size
= pev
->size
;
132 start
= begin
+ data_tail
% buffer_size
;
133 stop
= begin
+ data_head
% buffer_size
;
135 buffer
= (gdb_byte
*) xmalloc (size
);
138 memcpy (buffer
, start
, stop
- start
);
141 end
= begin
+ buffer_size
;
143 memcpy (buffer
, start
, end
- start
);
144 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
150 /* Copy the perf event buffer data from PEV.
151 Store a pointer to the copy into DATA and its size in SIZE. */
154 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
160 data_head
= *pev
->data_head
;
163 if (data_head
< size
)
164 size
= (size_t) data_head
;
166 *data
= perf_event_read (pev
, data_head
, size
);
169 pev
->last_head
= data_head
;
172 /* Determine the event type.
173 Returns zero on success and fills in TYPE; returns -1 otherwise. */
176 perf_event_pt_event_type (int *type
)
181 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
185 found
= fscanf (file
, "%d", type
);
194 /* Try to determine the start address of the Linux kernel. */
197 linux_determine_kernel_start (void)
199 static uint64_t kernel_start
;
208 file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
214 char buffer
[1024], symbol
[8], *line
;
218 line
= fgets (buffer
, sizeof (buffer
), file
);
222 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
226 if (strcmp (symbol
, "_text") == 0)
238 /* Check whether an address is in the kernel. */
241 perf_event_is_kernel_addr (uint64_t addr
)
243 uint64_t kernel_start
;
245 kernel_start
= linux_determine_kernel_start ();
246 if (kernel_start
!= 0ull)
247 return (addr
>= kernel_start
);
249 /* If we don't know the kernel's start address, let's check the most
250 significant bit. This will work at least for 64-bit kernels. */
251 return ((addr
& (1ull << 63)) != 0);
254 /* Check whether a perf event record should be skipped. */
257 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
259 /* The hardware may report branches from kernel into user space. Branches
260 from user into kernel space will be suppressed. We filter the former to
261 provide a consistent branch trace excluding kernel. */
262 return perf_event_is_kernel_addr (bts
->from
);
265 /* Perform a few consistency checks on a perf event sample record. This is
266 meant to catch cases when we get out of sync with the perf event stream. */
269 perf_event_sample_ok (const struct perf_event_sample
*sample
)
271 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
274 if (sample
->header
.size
!= sizeof (*sample
))
280 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
281 and to addresses (plus a header).
283 Start points into that buffer at the next sample position.
284 We read the collected samples backwards from start.
286 While reading the samples, we convert the information into a list of blocks.
287 For two adjacent samples s1 and s2, we form a block b such that b.begin =
288 s1.to and b.end = s2.from.
290 In case the buffer overflows during sampling, one sample may have its lower
291 part at the end and its upper part at the beginning of the buffer. */
293 static VEC (btrace_block_s
) *
294 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
295 const uint8_t *end
, const uint8_t *start
, size_t size
)
297 VEC (btrace_block_s
) *btrace
= NULL
;
298 struct perf_event_sample sample
;
300 struct btrace_block block
= { 0, 0 };
301 struct regcache
*regcache
;
303 gdb_assert (begin
<= start
);
304 gdb_assert (start
<= end
);
306 /* The first block ends at the current pc. */
307 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
308 block
.end
= regcache_read_pc (regcache
);
310 /* The buffer may contain a partial record as its last entry (i.e. when the
311 buffer size is not a multiple of the sample size). */
312 read
= sizeof (sample
) - 1;
314 for (; read
< size
; read
+= sizeof (sample
))
316 const struct perf_event_sample
*psample
;
318 /* Find the next perf_event sample in a backwards traversal. */
319 start
-= sizeof (sample
);
321 /* If we're still inside the buffer, we're done. */
323 psample
= (const struct perf_event_sample
*) start
;
328 /* We're to the left of the ring buffer, we will wrap around and
329 reappear at the very right of the ring buffer. */
331 missing
= (begin
- start
);
332 start
= (end
- missing
);
334 /* If the entire sample is missing, we're done. */
335 if (missing
== sizeof (sample
))
336 psample
= (const struct perf_event_sample
*) start
;
341 /* The sample wrapped around. The lower part is at the end and
342 the upper part is at the beginning of the buffer. */
343 stack
= (uint8_t *) &sample
;
345 /* Copy the two parts so we have a contiguous sample. */
346 memcpy (stack
, start
, missing
);
347 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
353 if (!perf_event_sample_ok (psample
))
355 warning (_("Branch trace may be incomplete."));
359 if (perf_event_skip_bts_record (&psample
->bts
))
362 /* We found a valid sample, so we can complete the current block. */
363 block
.begin
= psample
->bts
.to
;
365 VEC_safe_push (btrace_block_s
, btrace
, &block
);
367 /* Start the next block. */
368 block
.end
= psample
->bts
.from
;
371 /* Push the last block (i.e. the first one of inferior execution), as well.
372 We don't know where it ends, but we know where it starts. If we're
373 reading delta trace, we can fill in the start address later on.
374 Otherwise we will prune it. */
376 VEC_safe_push (btrace_block_s
, btrace
, &block
);
381 /* Check whether the kernel supports BTS. */
384 kernel_supports_bts (void)
386 struct perf_event_attr attr
;
395 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
399 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
402 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
403 safe_strerror (errno
));
407 status
= raise (SIGTRAP
);
410 warning (_("test bts: cannot raise SIGTRAP: %s."),
411 safe_strerror (errno
));
418 pid
= waitpid (child
, &status
, 0);
421 warning (_("test bts: bad pid %ld, error: %s."),
422 (long) pid
, safe_strerror (errno
));
426 if (!WIFSTOPPED (status
))
428 warning (_("test bts: expected stop. status: %d."),
433 memset (&attr
, 0, sizeof (attr
));
435 attr
.type
= PERF_TYPE_HARDWARE
;
436 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
437 attr
.sample_period
= 1;
438 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
439 attr
.exclude_kernel
= 1;
441 attr
.exclude_idle
= 1;
443 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
447 kill (child
, SIGKILL
);
448 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
450 pid
= waitpid (child
, &status
, 0);
453 warning (_("test bts: bad pid %ld, error: %s."),
454 (long) pid
, safe_strerror (errno
));
455 if (!WIFSIGNALED (status
))
456 warning (_("test bts: expected killed. status: %d."),
464 /* Check whether the kernel supports Intel Processor Trace. */
467 kernel_supports_pt (void)
469 struct perf_event_attr attr
;
471 int status
, file
, type
;
478 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
482 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
485 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
486 safe_strerror (errno
));
490 status
= raise (SIGTRAP
);
493 warning (_("test pt: cannot raise SIGTRAP: %s."),
494 safe_strerror (errno
));
501 pid
= waitpid (child
, &status
, 0);
504 warning (_("test pt: bad pid %ld, error: %s."),
505 (long) pid
, safe_strerror (errno
));
509 if (!WIFSTOPPED (status
))
511 warning (_("test pt: expected stop. status: %d."),
516 status
= perf_event_pt_event_type (&type
);
521 memset (&attr
, 0, sizeof (attr
));
523 attr
.size
= sizeof (attr
);
525 attr
.exclude_kernel
= 1;
527 attr
.exclude_idle
= 1;
529 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
534 kill (child
, SIGKILL
);
535 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
537 pid
= waitpid (child
, &status
, 0);
540 warning (_("test pt: bad pid %ld, error: %s."),
541 (long) pid
, safe_strerror (errno
));
542 if (!WIFSIGNALED (status
))
543 warning (_("test pt: expected killed. status: %d."),
551 /* Check whether an Intel cpu supports BTS. */
554 intel_supports_bts (const struct btrace_cpu
*cpu
)
561 case 0x1a: /* Nehalem */
565 case 0x25: /* Westmere */
568 case 0x2a: /* Sandy Bridge */
570 case 0x3a: /* Ivy Bridge */
572 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
573 "from" information afer an EIST transition, T-states, C1E, or
574 Adaptive Thermal Throttling. */
582 /* Check whether the cpu supports BTS. */
585 cpu_supports_bts (void)
587 struct btrace_cpu cpu
;
589 cpu
= btrace_this_cpu ();
593 /* Don't know about others. Let's assume they do. */
597 return intel_supports_bts (&cpu
);
601 /* Check whether the linux target supports BTS. */
604 linux_supports_bts (void)
610 if (!kernel_supports_bts ())
612 else if (!cpu_supports_bts ())
621 /* Check whether the linux target supports Intel Processor Trace. */
624 linux_supports_pt (void)
630 if (!kernel_supports_pt ())
639 /* See linux-btrace.h. */
642 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
646 case BTRACE_FORMAT_NONE
:
649 case BTRACE_FORMAT_BTS
:
650 return linux_supports_bts ();
652 case BTRACE_FORMAT_PT
:
653 return linux_supports_pt ();
656 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
659 /* Enable branch tracing in BTS format. */
661 static struct btrace_target_info
*
662 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
664 struct perf_event_mmap_page
*header
;
665 struct btrace_target_info
*tinfo
;
666 struct btrace_tinfo_bts
*bts
;
671 tinfo
= XCNEW (struct btrace_target_info
);
674 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
675 bts
= &tinfo
->variant
.bts
;
677 bts
->attr
.size
= sizeof (bts
->attr
);
678 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
679 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
680 bts
->attr
.sample_period
= 1;
682 /* We sample from and to address. */
683 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
685 bts
->attr
.exclude_kernel
= 1;
686 bts
->attr
.exclude_hv
= 1;
687 bts
->attr
.exclude_idle
= 1;
689 pid
= ptid_get_lwp (ptid
);
691 pid
= ptid_get_pid (ptid
);
694 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
698 /* Convert the requested size in bytes to pages (rounding up). */
699 pages
= ((size_t) conf
->size
/ PAGE_SIZE
700 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
701 /* We need at least one page. */
705 /* The buffer size can be requested in powers of two pages. Adjust PAGES
706 to the next power of two. */
707 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
708 if ((pages
& ((size_t) 1 << pg
)) != 0)
709 pages
+= ((size_t) 1 << pg
);
711 /* We try to allocate the requested size.
712 If that fails, try to get as much as we can. */
713 for (; pages
> 0; pages
>>= 1)
718 data_size
= (__u64
) pages
* PAGE_SIZE
;
720 /* Don't ask for more than we can represent in the configuration. */
721 if ((__u64
) UINT_MAX
< data_size
)
724 size
= (size_t) data_size
;
725 length
= size
+ PAGE_SIZE
;
727 /* Check for overflows. */
728 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
731 /* The number of pages we request needs to be a power of two. */
732 header
= ((struct perf_event_mmap_page
*)
733 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0));
734 if (header
!= MAP_FAILED
)
741 data_offset
= PAGE_SIZE
;
743 #if defined (PERF_ATTR_SIZE_VER5)
744 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
748 data_offset
= header
->data_offset
;
749 data_size
= header
->data_size
;
751 size
= (unsigned int) data_size
;
753 /* Check for overflows. */
754 if ((__u64
) size
!= data_size
)
756 munmap ((void *) header
, size
+ PAGE_SIZE
);
760 #endif /* defined (PERF_ATTR_SIZE_VER5) */
762 bts
->header
= header
;
763 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
764 bts
->bts
.size
= size
;
765 bts
->bts
.data_head
= &header
->data_head
;
766 bts
->bts
.last_head
= 0ull;
768 tinfo
->conf
.bts
.size
= (unsigned int) size
;
772 /* We were not able to allocate any buffer. */
780 #if defined (PERF_ATTR_SIZE_VER5)
782 /* Enable branch tracing in Intel Processor Trace format. */
784 static struct btrace_target_info
*
785 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
787 struct perf_event_mmap_page
*header
;
788 struct btrace_target_info
*tinfo
;
789 struct btrace_tinfo_pt
*pt
;
791 int pid
, pg
, errcode
, type
;
796 errcode
= perf_event_pt_event_type (&type
);
800 pid
= ptid_get_lwp (ptid
);
802 pid
= ptid_get_pid (ptid
);
804 tinfo
= XCNEW (struct btrace_target_info
);
807 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
808 pt
= &tinfo
->variant
.pt
;
810 pt
->attr
.size
= sizeof (pt
->attr
);
811 pt
->attr
.type
= type
;
813 pt
->attr
.exclude_kernel
= 1;
814 pt
->attr
.exclude_hv
= 1;
815 pt
->attr
.exclude_idle
= 1;
818 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
822 /* Allocate the configuration page. */
823 header
= ((struct perf_event_mmap_page
*)
824 mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
826 if (header
== MAP_FAILED
)
829 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
831 /* Convert the requested size in bytes to pages (rounding up). */
832 pages
= ((size_t) conf
->size
/ PAGE_SIZE
833 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
834 /* We need at least one page. */
838 /* The buffer size can be requested in powers of two pages. Adjust PAGES
839 to the next power of two. */
840 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
841 if ((pages
& ((size_t) 1 << pg
)) != 0)
842 pages
+= ((size_t) 1 << pg
);
844 /* We try to allocate the requested size.
845 If that fails, try to get as much as we can. */
846 for (; pages
> 0; pages
>>= 1)
851 data_size
= (__u64
) pages
* PAGE_SIZE
;
853 /* Don't ask for more than we can represent in the configuration. */
854 if ((__u64
) UINT_MAX
< data_size
)
857 size
= (size_t) data_size
;
859 /* Check for overflows. */
860 if ((__u64
) size
!= data_size
)
863 header
->aux_size
= data_size
;
866 pt
->pt
.mem
= ((const uint8_t *)
867 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
868 header
->aux_offset
));
869 if (pt
->pt
.mem
!= MAP_FAILED
)
878 pt
->pt
.data_head
= &header
->aux_head
;
880 tinfo
->conf
.pt
.size
= (unsigned int) size
;
884 munmap((void *) header
, PAGE_SIZE
);
894 #else /* !defined (PERF_ATTR_SIZE_VER5) */
896 static struct btrace_target_info
*
897 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
903 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
905 /* See linux-btrace.h. */
907 struct btrace_target_info
*
908 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
910 struct btrace_target_info
*tinfo
;
913 switch (conf
->format
)
915 case BTRACE_FORMAT_NONE
:
918 case BTRACE_FORMAT_BTS
:
919 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
922 case BTRACE_FORMAT_PT
:
923 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
930 /* Disable BTS tracing. */
932 static enum btrace_error
933 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
935 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
938 return BTRACE_ERR_NONE
;
941 /* Disable Intel Processor Trace tracing. */
943 static enum btrace_error
944 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
946 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
947 munmap((void *) tinfo
->header
, PAGE_SIZE
);
950 return BTRACE_ERR_NONE
;
953 /* See linux-btrace.h. */
956 linux_disable_btrace (struct btrace_target_info
*tinfo
)
958 enum btrace_error errcode
;
960 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
961 switch (tinfo
->conf
.format
)
963 case BTRACE_FORMAT_NONE
:
966 case BTRACE_FORMAT_BTS
:
967 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
970 case BTRACE_FORMAT_PT
:
971 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
975 if (errcode
== BTRACE_ERR_NONE
)
981 /* Read branch trace data in BTS format for the thread given by TINFO into
982 BTRACE using the TYPE reading method. */
984 static enum btrace_error
985 linux_read_bts (struct btrace_data_bts
*btrace
,
986 struct btrace_target_info
*tinfo
,
987 enum btrace_read_type type
)
989 struct perf_event_buffer
*pevent
;
990 const uint8_t *begin
, *end
, *start
;
991 size_t buffer_size
, size
;
992 __u64 data_head
, data_tail
;
993 unsigned int retries
= 5;
995 pevent
= &tinfo
->variant
.bts
.bts
;
997 /* For delta reads, we return at least the partial last block containing
999 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
1000 return BTRACE_ERR_NONE
;
1002 buffer_size
= pevent
->size
;
1003 data_tail
= pevent
->last_head
;
1005 /* We may need to retry reading the trace. See below. */
1008 data_head
= *pevent
->data_head
;
1010 /* Delete any leftover trace from the previous iteration. */
1011 VEC_free (btrace_block_s
, btrace
->blocks
);
1013 if (type
== BTRACE_READ_DELTA
)
1017 /* Determine the number of bytes to read and check for buffer
1020 /* Check for data head overflows. We might be able to recover from
1021 those but they are very unlikely and it's not really worth the
1023 if (data_head
< data_tail
)
1024 return BTRACE_ERR_OVERFLOW
;
1026 /* If the buffer is smaller than the trace delta, we overflowed. */
1027 data_size
= data_head
- data_tail
;
1028 if (buffer_size
< data_size
)
1029 return BTRACE_ERR_OVERFLOW
;
1031 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1032 size
= (size_t) data_size
;
1036 /* Read the entire buffer. */
1039 /* Adjust the size if the buffer has not overflowed, yet. */
1040 if (data_head
< size
)
1041 size
= (size_t) data_head
;
1044 /* Data_head keeps growing; the buffer itself is circular. */
1045 begin
= pevent
->mem
;
1046 start
= begin
+ data_head
% buffer_size
;
1048 if (data_head
<= buffer_size
)
1051 end
= begin
+ pevent
->size
;
1053 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1055 /* The stopping thread notifies its ptracer before it is scheduled out.
1056 On multi-core systems, the debugger might therefore run while the
1057 kernel might be writing the last branch trace records.
1059 Let's check whether the data head moved while we read the trace. */
1060 if (data_head
== *pevent
->data_head
)
1064 pevent
->last_head
= data_head
;
1066 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1067 if we're not doing a delta read. There is no way of filling in its zeroed
1069 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1070 && type
!= BTRACE_READ_DELTA
)
1071 VEC_pop (btrace_block_s
, btrace
->blocks
);
1073 return BTRACE_ERR_NONE
;
1076 /* Fill in the Intel Processor Trace configuration information. */
1079 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1081 conf
->cpu
= btrace_this_cpu ();
1084 /* Read branch trace data in Intel Processor Trace format for the thread
1085 given by TINFO into BTRACE using the TYPE reading method. */
1087 static enum btrace_error
1088 linux_read_pt (struct btrace_data_pt
*btrace
,
1089 struct btrace_target_info
*tinfo
,
1090 enum btrace_read_type type
)
1092 struct perf_event_buffer
*pt
;
1094 pt
= &tinfo
->variant
.pt
.pt
;
1096 linux_fill_btrace_pt_config (&btrace
->config
);
1100 case BTRACE_READ_DELTA
:
1101 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1102 around to stay inside the aux buffer. */
1103 return BTRACE_ERR_NOT_SUPPORTED
;
1105 case BTRACE_READ_NEW
:
1106 if (!perf_event_new_data (pt
))
1107 return BTRACE_ERR_NONE
;
1110 case BTRACE_READ_ALL
:
1111 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1112 return BTRACE_ERR_NONE
;
1115 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1118 /* See linux-btrace.h. */
1121 linux_read_btrace (struct btrace_data
*btrace
,
1122 struct btrace_target_info
*tinfo
,
1123 enum btrace_read_type type
)
1125 switch (tinfo
->conf
.format
)
1127 case BTRACE_FORMAT_NONE
:
1128 return BTRACE_ERR_NOT_SUPPORTED
;
1130 case BTRACE_FORMAT_BTS
:
1131 /* We read btrace in BTS format. */
1132 btrace
->format
= BTRACE_FORMAT_BTS
;
1133 btrace
->variant
.bts
.blocks
= NULL
;
1135 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1137 case BTRACE_FORMAT_PT
:
1138 /* We read btrace in Intel Processor Trace format. */
1139 btrace
->format
= BTRACE_FORMAT_PT
;
1140 btrace
->variant
.pt
.data
= NULL
;
1141 btrace
->variant
.pt
.size
= 0;
1143 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1146 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1149 /* See linux-btrace.h. */
1151 const struct btrace_config
*
1152 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1154 return &tinfo
->conf
;
1157 #else /* !HAVE_LINUX_PERF_EVENT_H */
1159 /* See linux-btrace.h. */
1162 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1167 /* See linux-btrace.h. */
1169 struct btrace_target_info
*
1170 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1175 /* See linux-btrace.h. */
1178 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1180 return BTRACE_ERR_NOT_SUPPORTED
;
1183 /* See linux-btrace.h. */
1186 linux_read_btrace (struct btrace_data
*btrace
,
1187 struct btrace_target_info
*tinfo
,
1188 enum btrace_read_type type
)
1190 return BTRACE_ERR_NOT_SUPPORTED
;
1193 /* See linux-btrace.h. */
1195 const struct btrace_config
*
1196 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1201 #endif /* !HAVE_LINUX_PERF_EVENT_H */