1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Try to determine the size of a pointer in bits for the OS.
108 This is the same as the size of a pointer for the inferior process
109 except when a 32-bit inferior is running on a 64-bit OS. */
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
116 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
119 const gdb_byte
*begin
, *end
, *start
, *stop
;
127 gdb_assert (size
<= data_head
);
128 data_tail
= data_head
- size
;
130 buffer_size
= pev
->size
;
132 start
= begin
+ data_tail
% buffer_size
;
133 stop
= begin
+ data_head
% buffer_size
;
135 buffer
= (gdb_byte
*) xmalloc (size
);
138 memcpy (buffer
, start
, stop
- start
);
141 end
= begin
+ buffer_size
;
143 memcpy (buffer
, start
, end
- start
);
144 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
150 /* Copy the perf event buffer data from PEV.
151 Store a pointer to the copy into DATA and its size in SIZE. */
154 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
160 data_head
= *pev
->data_head
;
163 if (data_head
< size
)
164 size
= (size_t) data_head
;
166 *data
= perf_event_read (pev
, data_head
, size
);
169 pev
->last_head
= data_head
;
172 /* Determine the event type.
173 Returns zero on success and fills in TYPE; returns -1 otherwise. */
176 perf_event_pt_event_type (int *type
)
181 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
185 found
= fscanf (file
, "%d", type
);
194 /* Try to determine the start address of the Linux kernel. */
197 linux_determine_kernel_start (void)
199 static uint64_t kernel_start
;
208 file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
214 char buffer
[1024], symbol
[8], *line
;
218 line
= fgets (buffer
, sizeof (buffer
), file
);
222 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
226 if (strcmp (symbol
, "_text") == 0)
238 /* Check whether an address is in the kernel. */
241 perf_event_is_kernel_addr (uint64_t addr
)
243 uint64_t kernel_start
;
245 kernel_start
= linux_determine_kernel_start ();
246 if (kernel_start
!= 0ull)
247 return (addr
>= kernel_start
);
249 /* If we don't know the kernel's start address, let's check the most
250 significant bit. This will work at least for 64-bit kernels. */
251 return ((addr
& (1ull << 63)) != 0);
254 /* Check whether a perf event record should be skipped. */
257 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
259 /* The hardware may report branches from kernel into user space. Branches
260 from user into kernel space will be suppressed. We filter the former to
261 provide a consistent branch trace excluding kernel. */
262 return perf_event_is_kernel_addr (bts
->from
);
265 /* Perform a few consistency checks on a perf event sample record. This is
266 meant to catch cases when we get out of sync with the perf event stream. */
269 perf_event_sample_ok (const struct perf_event_sample
*sample
)
271 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
274 if (sample
->header
.size
!= sizeof (*sample
))
280 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
281 and to addresses (plus a header).
283 Start points into that buffer at the next sample position.
284 We read the collected samples backwards from start.
286 While reading the samples, we convert the information into a list of blocks.
287 For two adjacent samples s1 and s2, we form a block b such that b.begin =
288 s1.to and b.end = s2.from.
290 In case the buffer overflows during sampling, one sample may have its lower
291 part at the end and its upper part at the beginning of the buffer. */
293 static VEC (btrace_block_s
) *
294 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
295 const uint8_t *end
, const uint8_t *start
, size_t size
)
297 VEC (btrace_block_s
) *btrace
= NULL
;
298 struct perf_event_sample sample
;
300 struct btrace_block block
= { 0, 0 };
301 struct regcache
*regcache
;
303 gdb_assert (begin
<= start
);
304 gdb_assert (start
<= end
);
306 /* The first block ends at the current pc. */
307 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
308 block
.end
= regcache_read_pc (regcache
);
310 /* The buffer may contain a partial record as its last entry (i.e. when the
311 buffer size is not a multiple of the sample size). */
312 read
= sizeof (sample
) - 1;
314 for (; read
< size
; read
+= sizeof (sample
))
316 const struct perf_event_sample
*psample
;
318 /* Find the next perf_event sample in a backwards traversal. */
319 start
-= sizeof (sample
);
321 /* If we're still inside the buffer, we're done. */
323 psample
= (const struct perf_event_sample
*) start
;
328 /* We're to the left of the ring buffer, we will wrap around and
329 reappear at the very right of the ring buffer. */
331 missing
= (begin
- start
);
332 start
= (end
- missing
);
334 /* If the entire sample is missing, we're done. */
335 if (missing
== sizeof (sample
))
336 psample
= (const struct perf_event_sample
*) start
;
341 /* The sample wrapped around. The lower part is at the end and
342 the upper part is at the beginning of the buffer. */
343 stack
= (uint8_t *) &sample
;
345 /* Copy the two parts so we have a contiguous sample. */
346 memcpy (stack
, start
, missing
);
347 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
353 if (!perf_event_sample_ok (psample
))
355 warning (_("Branch trace may be incomplete."));
359 if (perf_event_skip_bts_record (&psample
->bts
))
362 /* We found a valid sample, so we can complete the current block. */
363 block
.begin
= psample
->bts
.to
;
365 VEC_safe_push (btrace_block_s
, btrace
, &block
);
367 /* Start the next block. */
368 block
.end
= psample
->bts
.from
;
371 /* Push the last block (i.e. the first one of inferior execution), as well.
372 We don't know where it ends, but we know where it starts. If we're
373 reading delta trace, we can fill in the start address later on.
374 Otherwise we will prune it. */
376 VEC_safe_push (btrace_block_s
, btrace
, &block
);
381 /* Check whether the kernel supports BTS. */
384 kernel_supports_bts (void)
386 struct perf_event_attr attr
;
395 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
399 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
402 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
403 safe_strerror (errno
));
407 status
= raise (SIGTRAP
);
410 warning (_("test bts: cannot raise SIGTRAP: %s."),
411 safe_strerror (errno
));
418 pid
= waitpid (child
, &status
, 0);
421 warning (_("test bts: bad pid %ld, error: %s."),
422 (long) pid
, safe_strerror (errno
));
426 if (!WIFSTOPPED (status
))
428 warning (_("test bts: expected stop. status: %d."),
433 memset (&attr
, 0, sizeof (attr
));
435 attr
.type
= PERF_TYPE_HARDWARE
;
436 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
437 attr
.sample_period
= 1;
438 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
439 attr
.exclude_kernel
= 1;
441 attr
.exclude_idle
= 1;
443 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
447 kill (child
, SIGKILL
);
448 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
450 pid
= waitpid (child
, &status
, 0);
453 warning (_("test bts: bad pid %ld, error: %s."),
454 (long) pid
, safe_strerror (errno
));
455 if (!WIFSIGNALED (status
))
456 warning (_("test bts: expected killed. status: %d."),
464 /* Check whether the kernel supports Intel(R) Processor Trace. */
467 kernel_supports_pt (void)
469 struct perf_event_attr attr
;
471 int status
, file
, type
;
478 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
482 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
485 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
486 safe_strerror (errno
));
490 status
= raise (SIGTRAP
);
493 warning (_("test pt: cannot raise SIGTRAP: %s."),
494 safe_strerror (errno
));
501 pid
= waitpid (child
, &status
, 0);
504 warning (_("test pt: bad pid %ld, error: %s."),
505 (long) pid
, safe_strerror (errno
));
509 if (!WIFSTOPPED (status
))
511 warning (_("test pt: expected stop. status: %d."),
516 status
= perf_event_pt_event_type (&type
);
521 memset (&attr
, 0, sizeof (attr
));
523 attr
.size
= sizeof (attr
);
525 attr
.exclude_kernel
= 1;
527 attr
.exclude_idle
= 1;
529 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
534 kill (child
, SIGKILL
);
535 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
537 pid
= waitpid (child
, &status
, 0);
540 warning (_("test pt: bad pid %ld, error: %s."),
541 (long) pid
, safe_strerror (errno
));
542 if (!WIFSIGNALED (status
))
543 warning (_("test pt: expected killed. status: %d."),
551 /* Check whether an Intel cpu supports BTS. */
554 intel_supports_bts (const struct btrace_cpu
*cpu
)
561 case 0x1a: /* Nehalem */
565 case 0x25: /* Westmere */
568 case 0x2a: /* Sandy Bridge */
570 case 0x3a: /* Ivy Bridge */
572 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
573 "from" information afer an EIST transition, T-states, C1E, or
574 Adaptive Thermal Throttling. */
582 /* Check whether the cpu supports BTS. */
585 cpu_supports_bts (void)
587 struct btrace_cpu cpu
;
589 cpu
= btrace_this_cpu ();
593 /* Don't know about others. Let's assume they do. */
597 return intel_supports_bts (&cpu
);
601 /* Check whether the linux target supports BTS. */
604 linux_supports_bts (void)
610 if (!kernel_supports_bts ())
612 else if (!cpu_supports_bts ())
621 /* Check whether the linux target supports Intel(R) Processor Trace. */
624 linux_supports_pt (void)
630 if (!kernel_supports_pt ())
639 /* See linux-btrace.h. */
642 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
646 case BTRACE_FORMAT_NONE
:
649 case BTRACE_FORMAT_BTS
:
650 return linux_supports_bts ();
652 case BTRACE_FORMAT_PT
:
653 return linux_supports_pt ();
656 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
659 /* Enable branch tracing in BTS format. */
661 static struct btrace_target_info
*
662 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
664 struct perf_event_mmap_page
*header
;
665 struct btrace_target_info
*tinfo
;
666 struct btrace_tinfo_bts
*bts
;
671 tinfo
= XCNEW (struct btrace_target_info
);
674 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
675 bts
= &tinfo
->variant
.bts
;
677 bts
->attr
.size
= sizeof (bts
->attr
);
678 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
679 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
680 bts
->attr
.sample_period
= 1;
682 /* We sample from and to address. */
683 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
685 bts
->attr
.exclude_kernel
= 1;
686 bts
->attr
.exclude_hv
= 1;
687 bts
->attr
.exclude_idle
= 1;
689 pid
= ptid_get_lwp (ptid
);
691 pid
= ptid_get_pid (ptid
);
694 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
698 /* Convert the requested size in bytes to pages (rounding up). */
699 pages
= ((size_t) conf
->size
/ PAGE_SIZE
700 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
701 /* We need at least one page. */
705 /* The buffer size can be requested in powers of two pages. Adjust PAGES
706 to the next power of two. */
707 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
708 if ((pages
& ((size_t) 1 << pg
)) != 0)
709 pages
+= ((size_t) 1 << pg
);
711 /* We try to allocate the requested size.
712 If that fails, try to get as much as we can. */
713 for (; pages
> 0; pages
>>= 1)
718 data_size
= (__u64
) pages
* PAGE_SIZE
;
720 /* Don't ask for more than we can represent in the configuration. */
721 if ((__u64
) UINT_MAX
< data_size
)
724 size
= (size_t) data_size
;
725 length
= size
+ PAGE_SIZE
;
727 /* Check for overflows. */
728 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
731 /* The number of pages we request needs to be a power of two. */
732 header
= ((struct perf_event_mmap_page
*)
733 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0));
734 if (header
!= MAP_FAILED
)
741 data_offset
= PAGE_SIZE
;
743 #if defined (PERF_ATTR_SIZE_VER5)
744 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
748 data_offset
= header
->data_offset
;
749 data_size
= header
->data_size
;
751 size
= (unsigned int) data_size
;
753 /* Check for overflows. */
754 if ((__u64
) size
!= data_size
)
756 munmap ((void *) header
, size
+ PAGE_SIZE
);
760 #endif /* defined (PERF_ATTR_SIZE_VER5) */
762 bts
->header
= header
;
763 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
764 bts
->bts
.size
= size
;
765 bts
->bts
.data_head
= &header
->data_head
;
766 bts
->bts
.last_head
= 0ull;
768 tinfo
->conf
.bts
.size
= (unsigned int) size
;
772 /* We were not able to allocate any buffer. */
780 #if defined (PERF_ATTR_SIZE_VER5)
782 /* Enable branch tracing in Intel(R) Processor Trace format. */
784 static struct btrace_target_info
*
785 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
787 struct perf_event_mmap_page
*header
;
788 struct btrace_target_info
*tinfo
;
789 struct btrace_tinfo_pt
*pt
;
791 int pid
, pg
, errcode
, type
;
796 errcode
= perf_event_pt_event_type (&type
);
800 pid
= ptid_get_lwp (ptid
);
802 pid
= ptid_get_pid (ptid
);
804 tinfo
= XCNEW (struct btrace_target_info
);
807 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
808 pt
= &tinfo
->variant
.pt
;
810 pt
->attr
.size
= sizeof (pt
->attr
);
811 pt
->attr
.type
= type
;
813 pt
->attr
.exclude_kernel
= 1;
814 pt
->attr
.exclude_hv
= 1;
815 pt
->attr
.exclude_idle
= 1;
818 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
822 /* Allocate the configuration page. */
823 header
= mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
825 if (header
== MAP_FAILED
)
828 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
830 /* Convert the requested size in bytes to pages (rounding up). */
831 pages
= ((size_t) conf
->size
/ PAGE_SIZE
832 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
833 /* We need at least one page. */
837 /* The buffer size can be requested in powers of two pages. Adjust PAGES
838 to the next power of two. */
839 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
840 if ((pages
& ((size_t) 1 << pg
)) != 0)
841 pages
+= ((size_t) 1 << pg
);
843 /* We try to allocate the requested size.
844 If that fails, try to get as much as we can. */
845 for (; pages
> 0; pages
>>= 1)
850 data_size
= (__u64
) pages
* PAGE_SIZE
;
852 /* Don't ask for more than we can represent in the configuration. */
853 if ((__u64
) UINT_MAX
< data_size
)
856 size
= (size_t) data_size
;
858 /* Check for overflows. */
859 if ((__u64
) size
!= data_size
)
862 header
->aux_size
= data_size
;
865 pt
->pt
.mem
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
867 if (pt
->pt
.mem
!= MAP_FAILED
)
876 pt
->pt
.data_head
= &header
->aux_head
;
878 tinfo
->conf
.pt
.size
= (unsigned int) size
;
882 munmap((void *) header
, PAGE_SIZE
);
892 #else /* !defined (PERF_ATTR_SIZE_VER5) */
894 static struct btrace_target_info
*
895 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
901 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
903 /* See linux-btrace.h. */
905 struct btrace_target_info
*
906 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
908 struct btrace_target_info
*tinfo
;
911 switch (conf
->format
)
913 case BTRACE_FORMAT_NONE
:
916 case BTRACE_FORMAT_BTS
:
917 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
920 case BTRACE_FORMAT_PT
:
921 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
928 /* Disable BTS tracing. */
930 static enum btrace_error
931 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
933 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
936 return BTRACE_ERR_NONE
;
939 /* Disable Intel(R) Processor Trace tracing. */
941 static enum btrace_error
942 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
944 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
945 munmap((void *) tinfo
->header
, PAGE_SIZE
);
948 return BTRACE_ERR_NONE
;
951 /* See linux-btrace.h. */
954 linux_disable_btrace (struct btrace_target_info
*tinfo
)
956 enum btrace_error errcode
;
958 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
959 switch (tinfo
->conf
.format
)
961 case BTRACE_FORMAT_NONE
:
964 case BTRACE_FORMAT_BTS
:
965 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
968 case BTRACE_FORMAT_PT
:
969 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
973 if (errcode
== BTRACE_ERR_NONE
)
979 /* Read branch trace data in BTS format for the thread given by TINFO into
980 BTRACE using the TYPE reading method. */
982 static enum btrace_error
983 linux_read_bts (struct btrace_data_bts
*btrace
,
984 struct btrace_target_info
*tinfo
,
985 enum btrace_read_type type
)
987 struct perf_event_buffer
*pevent
;
988 const uint8_t *begin
, *end
, *start
;
989 size_t buffer_size
, size
;
990 __u64 data_head
, data_tail
;
991 unsigned int retries
= 5;
993 pevent
= &tinfo
->variant
.bts
.bts
;
995 /* For delta reads, we return at least the partial last block containing
997 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
998 return BTRACE_ERR_NONE
;
1000 buffer_size
= pevent
->size
;
1001 data_tail
= pevent
->last_head
;
1003 /* We may need to retry reading the trace. See below. */
1006 data_head
= *pevent
->data_head
;
1008 /* Delete any leftover trace from the previous iteration. */
1009 VEC_free (btrace_block_s
, btrace
->blocks
);
1011 if (type
== BTRACE_READ_DELTA
)
1015 /* Determine the number of bytes to read and check for buffer
1018 /* Check for data head overflows. We might be able to recover from
1019 those but they are very unlikely and it's not really worth the
1021 if (data_head
< data_tail
)
1022 return BTRACE_ERR_OVERFLOW
;
1024 /* If the buffer is smaller than the trace delta, we overflowed. */
1025 data_size
= data_head
- data_tail
;
1026 if (buffer_size
< data_size
)
1027 return BTRACE_ERR_OVERFLOW
;
1029 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1030 size
= (size_t) data_size
;
1034 /* Read the entire buffer. */
1037 /* Adjust the size if the buffer has not overflowed, yet. */
1038 if (data_head
< size
)
1039 size
= (size_t) data_head
;
1042 /* Data_head keeps growing; the buffer itself is circular. */
1043 begin
= pevent
->mem
;
1044 start
= begin
+ data_head
% buffer_size
;
1046 if (data_head
<= buffer_size
)
1049 end
= begin
+ pevent
->size
;
1051 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1053 /* The stopping thread notifies its ptracer before it is scheduled out.
1054 On multi-core systems, the debugger might therefore run while the
1055 kernel might be writing the last branch trace records.
1057 Let's check whether the data head moved while we read the trace. */
1058 if (data_head
== *pevent
->data_head
)
1062 pevent
->last_head
= data_head
;
1064 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1065 if we're not doing a delta read. There is no way of filling in its zeroed
1067 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1068 && type
!= BTRACE_READ_DELTA
)
1069 VEC_pop (btrace_block_s
, btrace
->blocks
);
1071 return BTRACE_ERR_NONE
;
1074 /* Fill in the Intel(R) Processor Trace configuration information. */
1077 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1079 conf
->cpu
= btrace_this_cpu ();
1082 /* Read branch trace data in Intel(R) Processor Trace format for the thread
1083 given by TINFO into BTRACE using the TYPE reading method. */
1085 static enum btrace_error
1086 linux_read_pt (struct btrace_data_pt
*btrace
,
1087 struct btrace_target_info
*tinfo
,
1088 enum btrace_read_type type
)
1090 struct perf_event_buffer
*pt
;
1092 pt
= &tinfo
->variant
.pt
.pt
;
1094 linux_fill_btrace_pt_config (&btrace
->config
);
1098 case BTRACE_READ_DELTA
:
1099 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1100 around to stay inside the aux buffer. */
1101 return BTRACE_ERR_NOT_SUPPORTED
;
1103 case BTRACE_READ_NEW
:
1104 if (!perf_event_new_data (pt
))
1105 return BTRACE_ERR_NONE
;
1108 case BTRACE_READ_ALL
:
1109 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1110 return BTRACE_ERR_NONE
;
1113 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1116 /* See linux-btrace.h. */
1119 linux_read_btrace (struct btrace_data
*btrace
,
1120 struct btrace_target_info
*tinfo
,
1121 enum btrace_read_type type
)
1123 switch (tinfo
->conf
.format
)
1125 case BTRACE_FORMAT_NONE
:
1126 return BTRACE_ERR_NOT_SUPPORTED
;
1128 case BTRACE_FORMAT_BTS
:
1129 /* We read btrace in BTS format. */
1130 btrace
->format
= BTRACE_FORMAT_BTS
;
1131 btrace
->variant
.bts
.blocks
= NULL
;
1133 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1135 case BTRACE_FORMAT_PT
:
1136 /* We read btrace in Intel(R) Processor Trace format. */
1137 btrace
->format
= BTRACE_FORMAT_PT
;
1138 btrace
->variant
.pt
.data
= NULL
;
1139 btrace
->variant
.pt
.size
= 0;
1141 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1144 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1147 /* See linux-btrace.h. */
1149 const struct btrace_config
*
1150 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1152 return &tinfo
->conf
;
1155 #else /* !HAVE_LINUX_PERF_EVENT_H */
1157 /* See linux-btrace.h. */
1160 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1165 /* See linux-btrace.h. */
1167 struct btrace_target_info
*
1168 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1173 /* See linux-btrace.h. */
1176 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1178 return BTRACE_ERR_NOT_SUPPORTED
;
1181 /* See linux-btrace.h. */
1184 linux_read_btrace (struct btrace_data
*btrace
,
1185 struct btrace_target_info
*tinfo
,
1186 enum btrace_read_type type
)
1188 return BTRACE_ERR_NOT_SUPPORTED
;
1191 /* See linux-btrace.h. */
1193 const struct btrace_config
*
1194 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1199 #endif /* !HAVE_LINUX_PERF_EVENT_H */