1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Try to determine the size of a pointer in bits for the OS.
108 This is the same as the size of a pointer for the inferior process
109 except when a 32-bit inferior is running on a 64-bit OS. */
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
116 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
119 const gdb_byte
*begin
, *end
, *start
, *stop
;
127 gdb_assert (size
<= data_head
);
128 data_tail
= data_head
- size
;
130 buffer_size
= pev
->size
;
132 start
= begin
+ data_tail
% buffer_size
;
133 stop
= begin
+ data_head
% buffer_size
;
135 buffer
= xmalloc (size
);
138 memcpy (buffer
, start
, stop
- start
);
141 end
= begin
+ buffer_size
;
143 memcpy (buffer
, start
, end
- start
);
144 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
150 /* Copy the perf event buffer data from PEV.
151 Store a pointer to the copy into DATA and its size in SIZE. */
154 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
160 data_head
= *pev
->data_head
;
163 if (data_head
< size
)
164 size
= (size_t) data_head
;
166 *data
= perf_event_read (pev
, data_head
, size
);
169 pev
->last_head
= data_head
;
172 /* Determine the event type.
173 Returns zero on success and fills in TYPE; returns -1 otherwise. */
176 perf_event_pt_event_type (int *type
)
181 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
185 found
= fscanf (file
, "%d", type
);
194 /* Try to determine the start address of the Linux kernel. */
197 linux_determine_kernel_start (void)
199 static uint64_t kernel_start
;
208 file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
214 char buffer
[1024], symbol
[8], *line
;
218 line
= fgets (buffer
, sizeof (buffer
), file
);
222 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
226 if (strcmp (symbol
, "_text") == 0)
238 /* Check whether an address is in the kernel. */
241 perf_event_is_kernel_addr (uint64_t addr
)
243 uint64_t kernel_start
;
245 kernel_start
= linux_determine_kernel_start ();
246 if (kernel_start
!= 0ull)
247 return (addr
>= kernel_start
);
249 /* If we don't know the kernel's start address, let's check the most
250 significant bit. This will work at least for 64-bit kernels. */
251 return ((addr
& (1ull << 63)) != 0);
254 /* Check whether a perf event record should be skipped. */
257 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
259 /* The hardware may report branches from kernel into user space. Branches
260 from user into kernel space will be suppressed. We filter the former to
261 provide a consistent branch trace excluding kernel. */
262 return perf_event_is_kernel_addr (bts
->from
);
265 /* Perform a few consistency checks on a perf event sample record. This is
266 meant to catch cases when we get out of sync with the perf event stream. */
269 perf_event_sample_ok (const struct perf_event_sample
*sample
)
271 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
274 if (sample
->header
.size
!= sizeof (*sample
))
280 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
281 and to addresses (plus a header).
283 Start points into that buffer at the next sample position.
284 We read the collected samples backwards from start.
286 While reading the samples, we convert the information into a list of blocks.
287 For two adjacent samples s1 and s2, we form a block b such that b.begin =
288 s1.to and b.end = s2.from.
290 In case the buffer overflows during sampling, one sample may have its lower
291 part at the end and its upper part at the beginning of the buffer. */
293 static VEC (btrace_block_s
) *
294 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
295 const uint8_t *end
, const uint8_t *start
, size_t size
)
297 VEC (btrace_block_s
) *btrace
= NULL
;
298 struct perf_event_sample sample
;
300 struct btrace_block block
= { 0, 0 };
301 struct regcache
*regcache
;
303 gdb_assert (begin
<= start
);
304 gdb_assert (start
<= end
);
306 /* The first block ends at the current pc. */
307 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
308 block
.end
= regcache_read_pc (regcache
);
310 /* The buffer may contain a partial record as its last entry (i.e. when the
311 buffer size is not a multiple of the sample size). */
312 read
= sizeof (sample
) - 1;
314 for (; read
< size
; read
+= sizeof (sample
))
316 const struct perf_event_sample
*psample
;
318 /* Find the next perf_event sample in a backwards traversal. */
319 start
-= sizeof (sample
);
321 /* If we're still inside the buffer, we're done. */
323 psample
= (const struct perf_event_sample
*) start
;
328 /* We're to the left of the ring buffer, we will wrap around and
329 reappear at the very right of the ring buffer. */
331 missing
= (begin
- start
);
332 start
= (end
- missing
);
334 /* If the entire sample is missing, we're done. */
335 if (missing
== sizeof (sample
))
336 psample
= (const struct perf_event_sample
*) start
;
341 /* The sample wrapped around. The lower part is at the end and
342 the upper part is at the beginning of the buffer. */
343 stack
= (uint8_t *) &sample
;
345 /* Copy the two parts so we have a contiguous sample. */
346 memcpy (stack
, start
, missing
);
347 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
353 if (!perf_event_sample_ok (psample
))
355 warning (_("Branch trace may be incomplete."));
359 if (perf_event_skip_bts_record (&psample
->bts
))
362 /* We found a valid sample, so we can complete the current block. */
363 block
.begin
= psample
->bts
.to
;
365 VEC_safe_push (btrace_block_s
, btrace
, &block
);
367 /* Start the next block. */
368 block
.end
= psample
->bts
.from
;
371 /* Push the last block (i.e. the first one of inferior execution), as well.
372 We don't know where it ends, but we know where it starts. If we're
373 reading delta trace, we can fill in the start address later on.
374 Otherwise we will prune it. */
376 VEC_safe_push (btrace_block_s
, btrace
, &block
);
381 /* Check whether the kernel supports BTS. */
384 kernel_supports_bts (void)
386 struct perf_event_attr attr
;
395 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
399 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
402 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
403 safe_strerror (errno
));
407 status
= raise (SIGTRAP
);
410 warning (_("test bts: cannot raise SIGTRAP: %s."),
411 safe_strerror (errno
));
418 pid
= waitpid (child
, &status
, 0);
421 warning (_("test bts: bad pid %ld, error: %s."),
422 (long) pid
, safe_strerror (errno
));
426 if (!WIFSTOPPED (status
))
428 warning (_("test bts: expected stop. status: %d."),
433 memset (&attr
, 0, sizeof (attr
));
435 attr
.type
= PERF_TYPE_HARDWARE
;
436 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
437 attr
.sample_period
= 1;
438 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
439 attr
.exclude_kernel
= 1;
441 attr
.exclude_idle
= 1;
443 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
447 kill (child
, SIGKILL
);
448 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
450 pid
= waitpid (child
, &status
, 0);
453 warning (_("test bts: bad pid %ld, error: %s."),
454 (long) pid
, safe_strerror (errno
));
455 if (!WIFSIGNALED (status
))
456 warning (_("test bts: expected killed. status: %d."),
464 /* Check whether the kernel supports Intel(R) Processor Trace. */
467 kernel_supports_pt (void)
469 struct perf_event_attr attr
;
471 int status
, file
, type
;
478 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
482 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
485 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
486 safe_strerror (errno
));
490 status
= raise (SIGTRAP
);
493 warning (_("test pt: cannot raise SIGTRAP: %s."),
494 safe_strerror (errno
));
501 pid
= waitpid (child
, &status
, 0);
504 warning (_("test pt: bad pid %ld, error: %s."),
505 (long) pid
, safe_strerror (errno
));
509 if (!WIFSTOPPED (status
))
511 warning (_("test pt: expected stop. status: %d."),
516 status
= perf_event_pt_event_type (&type
);
521 memset (&attr
, 0, sizeof (attr
));
523 attr
.size
= sizeof (attr
);
525 attr
.exclude_kernel
= 1;
527 attr
.exclude_idle
= 1;
529 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
534 kill (child
, SIGKILL
);
535 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
537 pid
= waitpid (child
, &status
, 0);
540 warning (_("test pt: bad pid %ld, error: %s."),
541 (long) pid
, safe_strerror (errno
));
542 if (!WIFSIGNALED (status
))
543 warning (_("test pt: expected killed. status: %d."),
551 /* Check whether an Intel cpu supports BTS. */
554 intel_supports_bts (const struct btrace_cpu
*cpu
)
561 case 0x1a: /* Nehalem */
565 case 0x25: /* Westmere */
568 case 0x2a: /* Sandy Bridge */
570 case 0x3a: /* Ivy Bridge */
572 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
573 "from" information afer an EIST transition, T-states, C1E, or
574 Adaptive Thermal Throttling. */
582 /* Check whether the cpu supports BTS. */
585 cpu_supports_bts (void)
587 struct btrace_cpu cpu
;
589 cpu
= btrace_this_cpu ();
593 /* Don't know about others. Let's assume they do. */
597 return intel_supports_bts (&cpu
);
601 /* Check whether the linux target supports BTS. */
604 linux_supports_bts (void)
610 if (!kernel_supports_bts ())
612 else if (!cpu_supports_bts ())
621 /* Check whether the linux target supports Intel(R) Processor Trace. */
624 linux_supports_pt (void)
630 if (!kernel_supports_pt ())
639 /* See linux-btrace.h. */
642 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
646 case BTRACE_FORMAT_NONE
:
649 case BTRACE_FORMAT_BTS
:
650 return linux_supports_bts ();
652 case BTRACE_FORMAT_PT
:
653 return linux_supports_pt ();
656 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
659 /* Enable branch tracing in BTS format. */
661 static struct btrace_target_info
*
662 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
664 struct perf_event_mmap_page
*header
;
665 struct btrace_target_info
*tinfo
;
666 struct btrace_tinfo_bts
*bts
;
671 tinfo
= XCNEW (struct btrace_target_info
);
674 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
675 bts
= &tinfo
->variant
.bts
;
677 bts
->attr
.size
= sizeof (bts
->attr
);
678 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
679 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
680 bts
->attr
.sample_period
= 1;
682 /* We sample from and to address. */
683 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
685 bts
->attr
.exclude_kernel
= 1;
686 bts
->attr
.exclude_hv
= 1;
687 bts
->attr
.exclude_idle
= 1;
689 pid
= ptid_get_lwp (ptid
);
691 pid
= ptid_get_pid (ptid
);
694 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
698 /* Convert the requested size in bytes to pages (rounding up). */
699 pages
= ((size_t) conf
->size
/ PAGE_SIZE
700 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
701 /* We need at least one page. */
705 /* The buffer size can be requested in powers of two pages. Adjust PAGES
706 to the next power of two. */
707 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
708 if ((pages
& ((size_t) 1 << pg
)) != 0)
709 pages
+= ((size_t) 1 << pg
);
711 /* We try to allocate the requested size.
712 If that fails, try to get as much as we can. */
713 for (; pages
> 0; pages
>>= 1)
718 data_size
= (__u64
) pages
* PAGE_SIZE
;
720 /* Don't ask for more than we can represent in the configuration. */
721 if ((__u64
) UINT_MAX
< data_size
)
724 size
= (size_t) data_size
;
725 length
= size
+ PAGE_SIZE
;
727 /* Check for overflows. */
728 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
731 /* The number of pages we request needs to be a power of two. */
732 header
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0);
733 if (header
!= MAP_FAILED
)
740 data_offset
= PAGE_SIZE
;
742 #if defined (PERF_ATTR_SIZE_VER5)
743 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
747 data_offset
= header
->data_offset
;
748 data_size
= header
->data_size
;
750 size
= (unsigned int) data_size
;
752 /* Check for overflows. */
753 if ((__u64
) size
!= data_size
)
755 munmap ((void *) header
, size
+ PAGE_SIZE
);
759 #endif /* defined (PERF_ATTR_SIZE_VER5) */
761 bts
->header
= header
;
762 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
763 bts
->bts
.size
= size
;
764 bts
->bts
.data_head
= &header
->data_head
;
765 bts
->bts
.last_head
= 0ull;
767 tinfo
->conf
.bts
.size
= (unsigned int) size
;
771 /* We were not able to allocate any buffer. */
779 #if defined (PERF_ATTR_SIZE_VER5)
781 /* Enable branch tracing in Intel(R) Processor Trace format. */
783 static struct btrace_target_info
*
784 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
786 struct perf_event_mmap_page
*header
;
787 struct btrace_target_info
*tinfo
;
788 struct btrace_tinfo_pt
*pt
;
790 int pid
, pg
, errcode
, type
;
795 errcode
= perf_event_pt_event_type (&type
);
799 pid
= ptid_get_lwp (ptid
);
801 pid
= ptid_get_pid (ptid
);
803 tinfo
= XCNEW (struct btrace_target_info
);
806 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
807 pt
= &tinfo
->variant
.pt
;
809 pt
->attr
.size
= sizeof (pt
->attr
);
810 pt
->attr
.type
= type
;
812 pt
->attr
.exclude_kernel
= 1;
813 pt
->attr
.exclude_hv
= 1;
814 pt
->attr
.exclude_idle
= 1;
817 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
821 /* Allocate the configuration page. */
822 header
= mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
824 if (header
== MAP_FAILED
)
827 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
829 /* Convert the requested size in bytes to pages (rounding up). */
830 pages
= ((size_t) conf
->size
/ PAGE_SIZE
831 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
832 /* We need at least one page. */
836 /* The buffer size can be requested in powers of two pages. Adjust PAGES
837 to the next power of two. */
838 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
839 if ((pages
& ((size_t) 1 << pg
)) != 0)
840 pages
+= ((size_t) 1 << pg
);
842 /* We try to allocate the requested size.
843 If that fails, try to get as much as we can. */
844 for (; pages
> 0; pages
>>= 1)
849 data_size
= (__u64
) pages
* PAGE_SIZE
;
851 /* Don't ask for more than we can represent in the configuration. */
852 if ((__u64
) UINT_MAX
< data_size
)
855 size
= (size_t) data_size
;
857 /* Check for overflows. */
858 if ((__u64
) size
!= data_size
)
861 header
->aux_size
= data_size
;
864 pt
->pt
.mem
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
866 if (pt
->pt
.mem
!= MAP_FAILED
)
875 pt
->pt
.data_head
= &header
->aux_head
;
877 tinfo
->conf
.pt
.size
= (unsigned int) size
;
881 munmap((void *) header
, PAGE_SIZE
);
891 #else /* !defined (PERF_ATTR_SIZE_VER5) */
893 static struct btrace_target_info
*
894 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
900 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
902 /* See linux-btrace.h. */
904 struct btrace_target_info
*
905 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
907 struct btrace_target_info
*tinfo
;
910 switch (conf
->format
)
912 case BTRACE_FORMAT_NONE
:
915 case BTRACE_FORMAT_BTS
:
916 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
919 case BTRACE_FORMAT_PT
:
920 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
927 /* Disable BTS tracing. */
929 static enum btrace_error
930 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
932 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
935 return BTRACE_ERR_NONE
;
938 /* Disable Intel(R) Processor Trace tracing. */
940 static enum btrace_error
941 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
943 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
944 munmap((void *) tinfo
->header
, PAGE_SIZE
);
947 return BTRACE_ERR_NONE
;
950 /* See linux-btrace.h. */
953 linux_disable_btrace (struct btrace_target_info
*tinfo
)
955 enum btrace_error errcode
;
957 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
958 switch (tinfo
->conf
.format
)
960 case BTRACE_FORMAT_NONE
:
963 case BTRACE_FORMAT_BTS
:
964 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
967 case BTRACE_FORMAT_PT
:
968 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
972 if (errcode
== BTRACE_ERR_NONE
)
978 /* Read branch trace data in BTS format for the thread given by TINFO into
979 BTRACE using the TYPE reading method. */
981 static enum btrace_error
982 linux_read_bts (struct btrace_data_bts
*btrace
,
983 struct btrace_target_info
*tinfo
,
984 enum btrace_read_type type
)
986 struct perf_event_buffer
*pevent
;
987 const uint8_t *begin
, *end
, *start
;
988 size_t buffer_size
, size
;
989 __u64 data_head
, data_tail
;
990 unsigned int retries
= 5;
992 pevent
= &tinfo
->variant
.bts
.bts
;
994 /* For delta reads, we return at least the partial last block containing
996 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
997 return BTRACE_ERR_NONE
;
999 buffer_size
= pevent
->size
;
1000 data_tail
= pevent
->last_head
;
1002 /* We may need to retry reading the trace. See below. */
1005 data_head
= *pevent
->data_head
;
1007 /* Delete any leftover trace from the previous iteration. */
1008 VEC_free (btrace_block_s
, btrace
->blocks
);
1010 if (type
== BTRACE_READ_DELTA
)
1014 /* Determine the number of bytes to read and check for buffer
1017 /* Check for data head overflows. We might be able to recover from
1018 those but they are very unlikely and it's not really worth the
1020 if (data_head
< data_tail
)
1021 return BTRACE_ERR_OVERFLOW
;
1023 /* If the buffer is smaller than the trace delta, we overflowed. */
1024 data_size
= data_head
- data_tail
;
1025 if (buffer_size
< data_size
)
1026 return BTRACE_ERR_OVERFLOW
;
1028 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1029 size
= (size_t) data_size
;
1033 /* Read the entire buffer. */
1036 /* Adjust the size if the buffer has not overflowed, yet. */
1037 if (data_head
< size
)
1038 size
= (size_t) data_head
;
1041 /* Data_head keeps growing; the buffer itself is circular. */
1042 begin
= pevent
->mem
;
1043 start
= begin
+ data_head
% buffer_size
;
1045 if (data_head
<= buffer_size
)
1048 end
= begin
+ pevent
->size
;
1050 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1052 /* The stopping thread notifies its ptracer before it is scheduled out.
1053 On multi-core systems, the debugger might therefore run while the
1054 kernel might be writing the last branch trace records.
1056 Let's check whether the data head moved while we read the trace. */
1057 if (data_head
== *pevent
->data_head
)
1061 pevent
->last_head
= data_head
;
1063 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1064 if we're not doing a delta read. There is no way of filling in its zeroed
1066 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1067 && type
!= BTRACE_READ_DELTA
)
1068 VEC_pop (btrace_block_s
, btrace
->blocks
);
1070 return BTRACE_ERR_NONE
;
1073 /* Fill in the Intel(R) Processor Trace configuration information. */
1076 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1078 conf
->cpu
= btrace_this_cpu ();
1081 /* Read branch trace data in Intel(R) Processor Trace format for the thread
1082 given by TINFO into BTRACE using the TYPE reading method. */
1084 static enum btrace_error
1085 linux_read_pt (struct btrace_data_pt
*btrace
,
1086 struct btrace_target_info
*tinfo
,
1087 enum btrace_read_type type
)
1089 struct perf_event_buffer
*pt
;
1091 pt
= &tinfo
->variant
.pt
.pt
;
1093 linux_fill_btrace_pt_config (&btrace
->config
);
1097 case BTRACE_READ_DELTA
:
1098 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1099 around to stay inside the aux buffer. */
1100 return BTRACE_ERR_NOT_SUPPORTED
;
1102 case BTRACE_READ_NEW
:
1103 if (!perf_event_new_data (pt
))
1104 return BTRACE_ERR_NONE
;
1107 case BTRACE_READ_ALL
:
1108 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1109 return BTRACE_ERR_NONE
;
1112 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1115 /* See linux-btrace.h. */
1118 linux_read_btrace (struct btrace_data
*btrace
,
1119 struct btrace_target_info
*tinfo
,
1120 enum btrace_read_type type
)
1122 switch (tinfo
->conf
.format
)
1124 case BTRACE_FORMAT_NONE
:
1125 return BTRACE_ERR_NOT_SUPPORTED
;
1127 case BTRACE_FORMAT_BTS
:
1128 /* We read btrace in BTS format. */
1129 btrace
->format
= BTRACE_FORMAT_BTS
;
1130 btrace
->variant
.bts
.blocks
= NULL
;
1132 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1134 case BTRACE_FORMAT_PT
:
1135 /* We read btrace in Intel(R) Processor Trace format. */
1136 btrace
->format
= BTRACE_FORMAT_PT
;
1137 btrace
->variant
.pt
.data
= NULL
;
1138 btrace
->variant
.pt
.size
= 0;
1140 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1143 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1146 /* See linux-btrace.h. */
1148 const struct btrace_config
*
1149 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1151 return &tinfo
->conf
;
1154 #else /* !HAVE_LINUX_PERF_EVENT_H */
1156 /* See linux-btrace.h. */
1159 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1164 /* See linux-btrace.h. */
1166 struct btrace_target_info
*
1167 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1172 /* See linux-btrace.h. */
1175 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1177 return BTRACE_ERR_NOT_SUPPORTED
;
1180 /* See linux-btrace.h. */
1183 linux_read_btrace (struct btrace_data
*btrace
,
1184 struct btrace_target_info
*tinfo
,
1185 enum btrace_read_type type
)
1187 return BTRACE_ERR_NOT_SUPPORTED
;
1190 /* See linux-btrace.h. */
1192 const struct btrace_config
*
1193 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1198 #endif /* !HAVE_LINUX_PERF_EVENT_H */