1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
28 #ifdef HAVE_SYS_SYSCALL_H
29 #include <sys/syscall.h>
32 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <sys/ptrace.h>
37 #include <sys/types.h>
39 #include <sys/utsname.h>
41 /* A branch trace record in perf_event. */
44 /* The linear address of the branch source. */
47 /* The linear address of the branch destination. */
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
54 /* The perf_event sample header. */
55 struct perf_event_header header
;
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts
;
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
65 struct btrace_cpu cpu
;
66 unsigned int eax
, ebx
, ecx
, edx
;
69 memset (&cpu
, 0, sizeof (cpu
));
71 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
74 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
75 && edx
== signature_INTEL_edx
)
77 unsigned int cpuid
, ignore
;
79 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
82 cpu
.vendor
= CV_INTEL
;
84 cpu
.family
= (cpuid
>> 8) & 0xf;
85 cpu
.model
= (cpuid
>> 4) & 0xf;
87 if (cpu
.family
== 0x6)
88 cpu
.model
+= (cpuid
>> 12) & 0xf0;
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99 perf_event_new_data (const struct perf_event_buffer
*pev
)
101 return *pev
->data_head
!= pev
->last_head
;
104 /* Try to determine the size of a pointer in bits for the OS.
106 This is the same as the size of a pointer for the inferior process
107 except when a 32-bit inferior is running on a 64-bit OS. */
109 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
114 perf_event_read (const struct perf_event_buffer
*pev
, unsigned long data_head
,
117 const gdb_byte
*begin
, *end
, *start
, *stop
;
119 unsigned long data_tail
, buffer_size
;
124 gdb_assert (size
<= data_head
);
125 data_tail
= data_head
- size
;
127 buffer_size
= pev
->size
;
129 start
= begin
+ data_tail
% buffer_size
;
130 stop
= begin
+ data_head
% buffer_size
;
132 buffer
= xmalloc (size
);
135 memcpy (buffer
, start
, stop
- start
);
138 end
= begin
+ buffer_size
;
140 memcpy (buffer
, start
, end
- start
);
141 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
147 /* Copy the perf event buffer data from PEV.
148 Store a pointer to the copy into DATA and its size in SIZE. */
151 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
152 unsigned long *psize
)
154 unsigned long data_head
, size
;
156 data_head
= *pev
->data_head
;
159 if (data_head
< size
)
162 *data
= perf_event_read (pev
, data_head
, size
);
165 pev
->last_head
= data_head
;
168 /* Determine the event type.
169 Returns zero on success and fills in TYPE; returns -1 otherwise. */
172 perf_event_pt_event_type (int *type
)
177 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
181 found
= fscanf (file
, "%d", type
);
191 linux_determine_kernel_ptr_bits (void)
196 memset (&utsn
, 0, sizeof (utsn
));
198 errcode
= uname (&utsn
);
202 /* We only need to handle the 64-bit host case, here. For 32-bit host,
203 the pointer size can be filled in later based on the inferior. */
204 if (strcmp (utsn
.machine
, "x86_64") == 0)
210 /* Check whether an address is in the kernel. */
213 perf_event_is_kernel_addr (const struct btrace_target_info
*tinfo
,
218 /* If we don't know the size of a pointer, we can't check. Let's assume it's
219 not a kernel address in this case. */
220 if (tinfo
->ptr_bits
== 0)
223 /* A bit mask for the most significant bit in an address. */
224 mask
= (uint64_t) 1 << (tinfo
->ptr_bits
- 1);
226 /* Check whether the most significant bit in the address is set. */
227 return (addr
& mask
) != 0;
230 /* Check whether a perf event record should be skipped. */
233 perf_event_skip_bts_record (const struct btrace_target_info
*tinfo
,
234 const struct perf_event_bts
*bts
)
236 /* The hardware may report branches from kernel into user space. Branches
237 from user into kernel space will be suppressed. We filter the former to
238 provide a consistent branch trace excluding kernel. */
239 return perf_event_is_kernel_addr (tinfo
, bts
->from
);
242 /* Perform a few consistency checks on a perf event sample record. This is
243 meant to catch cases when we get out of sync with the perf event stream. */
246 perf_event_sample_ok (const struct perf_event_sample
*sample
)
248 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
251 if (sample
->header
.size
!= sizeof (*sample
))
257 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
258 and to addresses (plus a header).
260 Start points into that buffer at the next sample position.
261 We read the collected samples backwards from start.
263 While reading the samples, we convert the information into a list of blocks.
264 For two adjacent samples s1 and s2, we form a block b such that b.begin =
265 s1.to and b.end = s2.from.
267 In case the buffer overflows during sampling, one sample may have its lower
268 part at the end and its upper part at the beginning of the buffer. */
270 static VEC (btrace_block_s
) *
271 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
272 const uint8_t *end
, const uint8_t *start
,
273 unsigned long long size
)
275 VEC (btrace_block_s
) *btrace
= NULL
;
276 struct perf_event_sample sample
;
277 unsigned long long read
= 0;
278 struct btrace_block block
= { 0, 0 };
279 struct regcache
*regcache
;
281 gdb_assert (begin
<= start
);
282 gdb_assert (start
<= end
);
284 /* The first block ends at the current pc. */
285 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
286 block
.end
= regcache_read_pc (regcache
);
288 /* The buffer may contain a partial record as its last entry (i.e. when the
289 buffer size is not a multiple of the sample size). */
290 read
= sizeof (sample
) - 1;
292 for (; read
< size
; read
+= sizeof (sample
))
294 const struct perf_event_sample
*psample
;
296 /* Find the next perf_event sample in a backwards traversal. */
297 start
-= sizeof (sample
);
299 /* If we're still inside the buffer, we're done. */
301 psample
= (const struct perf_event_sample
*) start
;
306 /* We're to the left of the ring buffer, we will wrap around and
307 reappear at the very right of the ring buffer. */
309 missing
= (begin
- start
);
310 start
= (end
- missing
);
312 /* If the entire sample is missing, we're done. */
313 if (missing
== sizeof (sample
))
314 psample
= (const struct perf_event_sample
*) start
;
319 /* The sample wrapped around. The lower part is at the end and
320 the upper part is at the beginning of the buffer. */
321 stack
= (uint8_t *) &sample
;
323 /* Copy the two parts so we have a contiguous sample. */
324 memcpy (stack
, start
, missing
);
325 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
331 if (!perf_event_sample_ok (psample
))
333 warning (_("Branch trace may be incomplete."));
337 if (perf_event_skip_bts_record (tinfo
, &psample
->bts
))
340 /* We found a valid sample, so we can complete the current block. */
341 block
.begin
= psample
->bts
.to
;
343 VEC_safe_push (btrace_block_s
, btrace
, &block
);
345 /* Start the next block. */
346 block
.end
= psample
->bts
.from
;
349 /* Push the last block (i.e. the first one of inferior execution), as well.
350 We don't know where it ends, but we know where it starts. If we're
351 reading delta trace, we can fill in the start address later on.
352 Otherwise we will prune it. */
354 VEC_safe_push (btrace_block_s
, btrace
, &block
);
359 /* Check whether the kernel supports BTS. */
362 kernel_supports_bts (void)
364 struct perf_event_attr attr
;
373 warning (_("test bts: cannot fork: %s."), strerror (errno
));
377 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
380 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
385 status
= raise (SIGTRAP
);
388 warning (_("test bts: cannot raise SIGTRAP: %s."),
396 pid
= waitpid (child
, &status
, 0);
399 warning (_("test bts: bad pid %ld, error: %s."),
400 (long) pid
, strerror (errno
));
404 if (!WIFSTOPPED (status
))
406 warning (_("test bts: expected stop. status: %d."),
411 memset (&attr
, 0, sizeof (attr
));
413 attr
.type
= PERF_TYPE_HARDWARE
;
414 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
415 attr
.sample_period
= 1;
416 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
417 attr
.exclude_kernel
= 1;
419 attr
.exclude_idle
= 1;
421 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
425 kill (child
, SIGKILL
);
426 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
428 pid
= waitpid (child
, &status
, 0);
431 warning (_("test bts: bad pid %ld, error: %s."),
432 (long) pid
, strerror (errno
));
433 if (!WIFSIGNALED (status
))
434 warning (_("test bts: expected killed. status: %d."),
442 /* Check whether the kernel supports Intel(R) Processor Trace. */
445 kernel_supports_pt (void)
447 struct perf_event_attr attr
;
449 int status
, file
, type
;
456 warning (_("test pt: cannot fork: %s."), strerror (errno
));
460 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
463 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
468 status
= raise (SIGTRAP
);
471 warning (_("test pt: cannot raise SIGTRAP: %s."),
479 pid
= waitpid (child
, &status
, 0);
482 warning (_("test pt: bad pid %ld, error: %s."),
483 (long) pid
, strerror (errno
));
487 if (!WIFSTOPPED (status
))
489 warning (_("test pt: expected stop. status: %d."),
494 status
= perf_event_pt_event_type (&type
);
499 memset (&attr
, 0, sizeof (attr
));
501 attr
.size
= sizeof (attr
);
503 attr
.exclude_kernel
= 1;
505 attr
.exclude_idle
= 1;
507 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
512 kill (child
, SIGKILL
);
513 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
515 pid
= waitpid (child
, &status
, 0);
518 warning (_("test pt: bad pid %ld, error: %s."),
519 (long) pid
, strerror (errno
));
520 if (!WIFSIGNALED (status
))
521 warning (_("test pt: expected killed. status: %d."),
529 /* Check whether an Intel cpu supports BTS. */
532 intel_supports_bts (const struct btrace_cpu
*cpu
)
539 case 0x1a: /* Nehalem */
543 case 0x25: /* Westmere */
546 case 0x2a: /* Sandy Bridge */
548 case 0x3a: /* Ivy Bridge */
550 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
551 "from" information afer an EIST transition, T-states, C1E, or
552 Adaptive Thermal Throttling. */
560 /* Check whether the cpu supports BTS. */
563 cpu_supports_bts (void)
565 struct btrace_cpu cpu
;
567 cpu
= btrace_this_cpu ();
571 /* Don't know about others. Let's assume they do. */
575 return intel_supports_bts (&cpu
);
579 /* Check whether the linux target supports BTS. */
582 linux_supports_bts (void)
588 if (!kernel_supports_bts ())
590 else if (!cpu_supports_bts ())
599 /* Check whether the linux target supports Intel(R) Processor Trace. */
602 linux_supports_pt (void)
608 if (!kernel_supports_pt ())
617 /* See linux-btrace.h. */
620 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
624 case BTRACE_FORMAT_NONE
:
627 case BTRACE_FORMAT_BTS
:
628 return linux_supports_bts ();
630 case BTRACE_FORMAT_PT
:
631 return linux_supports_pt ();
634 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
637 /* Enable branch tracing in BTS format. */
639 static struct btrace_target_info
*
640 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
642 struct perf_event_mmap_page
*header
;
643 struct btrace_target_info
*tinfo
;
644 struct btrace_tinfo_bts
*bts
;
645 unsigned long long size
, pages
;
648 tinfo
= xzalloc (sizeof (*tinfo
));
650 tinfo
->ptr_bits
= linux_determine_kernel_ptr_bits ();
652 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
653 bts
= &tinfo
->variant
.bts
;
655 bts
->attr
.size
= sizeof (bts
->attr
);
656 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
657 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
658 bts
->attr
.sample_period
= 1;
660 /* We sample from and to address. */
661 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
663 bts
->attr
.exclude_kernel
= 1;
664 bts
->attr
.exclude_hv
= 1;
665 bts
->attr
.exclude_idle
= 1;
667 pid
= ptid_get_lwp (ptid
);
669 pid
= ptid_get_pid (ptid
);
672 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
676 /* Convert the requested size in bytes to pages (rounding up). */
677 pages
= (((unsigned long long) conf
->size
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
678 /* We need at least one page. */
682 /* The buffer size can be requested in powers of two pages. Adjust PAGES
683 to the next power of two. */
684 for (pg
= 0; pages
!= (1u << pg
); ++pg
)
685 if ((pages
& (1u << pg
)) != 0)
688 /* We try to allocate the requested size.
689 If that fails, try to get as much as we can. */
690 for (; pages
> 0; pages
>>= 1)
694 size
= pages
* PAGE_SIZE
;
695 length
= size
+ PAGE_SIZE
;
697 /* Check for overflows. */
698 if ((unsigned long long) length
< size
)
701 /* The number of pages we request needs to be a power of two. */
702 header
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0);
703 if (header
!= MAP_FAILED
)
707 if (header
== MAP_FAILED
)
710 bts
->header
= header
;
711 bts
->bts
.mem
= ((const uint8_t *) header
) + PAGE_SIZE
;
712 bts
->bts
.size
= size
;
713 bts
->bts
.data_head
= &header
->data_head
;
714 bts
->bts
.last_head
= 0;
716 tinfo
->conf
.bts
.size
= size
;
720 /* We were not able to allocate any buffer. */
728 #if defined (PERF_ATTR_SIZE_VER5)
730 /* Enable branch tracing in Intel(R) Processor Trace format. */
732 static struct btrace_target_info
*
733 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
735 struct perf_event_mmap_page
*header
;
736 struct btrace_target_info
*tinfo
;
737 struct btrace_tinfo_pt
*pt
;
738 unsigned long long pages
, size
;
739 int pid
, pg
, errcode
, type
;
744 errcode
= perf_event_pt_event_type (&type
);
748 pid
= ptid_get_lwp (ptid
);
750 pid
= ptid_get_pid (ptid
);
752 tinfo
= xzalloc (sizeof (*tinfo
));
756 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
757 pt
= &tinfo
->variant
.pt
;
759 pt
->attr
.size
= sizeof (pt
->attr
);
760 pt
->attr
.type
= type
;
762 pt
->attr
.exclude_kernel
= 1;
763 pt
->attr
.exclude_hv
= 1;
764 pt
->attr
.exclude_idle
= 1;
767 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
771 /* Allocate the configuration page. */
772 header
= mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
774 if (header
== MAP_FAILED
)
777 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
779 /* Convert the requested size in bytes to pages (rounding up). */
780 pages
= (((unsigned long long) conf
->size
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
781 /* We need at least one page. */
785 /* The buffer size can be requested in powers of two pages. Adjust PAGES
786 to the next power of two. */
787 for (pg
= 0; pages
!= (1u << pg
); ++pg
)
788 if ((pages
& (1u << pg
)) != 0)
791 /* We try to allocate the requested size.
792 If that fails, try to get as much as we can. */
793 for (; pages
> 0; pages
>>= 1)
797 size
= pages
* PAGE_SIZE
;
800 /* Check for overflows. */
801 if ((unsigned long long) length
< size
)
804 header
->aux_size
= size
;
806 pt
->pt
.mem
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
808 if (pt
->pt
.mem
!= MAP_FAILED
)
817 pt
->pt
.data_head
= &header
->aux_head
;
819 tinfo
->conf
.pt
.size
= size
;
823 munmap((void *) header
, PAGE_SIZE
);
833 #else /* !defined (PERF_ATTR_SIZE_VER5) */
835 static struct btrace_target_info
*
836 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
842 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
844 /* See linux-btrace.h. */
846 struct btrace_target_info
*
847 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
849 struct btrace_target_info
*tinfo
;
852 switch (conf
->format
)
854 case BTRACE_FORMAT_NONE
:
857 case BTRACE_FORMAT_BTS
:
858 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
861 case BTRACE_FORMAT_PT
:
862 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
869 /* Disable BTS tracing. */
871 static enum btrace_error
872 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
874 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
877 return BTRACE_ERR_NONE
;
880 /* Disable Intel(R) Processor Trace tracing. */
882 static enum btrace_error
883 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
885 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
886 munmap((void *) tinfo
->header
, PAGE_SIZE
);
889 return BTRACE_ERR_NONE
;
892 /* See linux-btrace.h. */
895 linux_disable_btrace (struct btrace_target_info
*tinfo
)
897 enum btrace_error errcode
;
899 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
900 switch (tinfo
->conf
.format
)
902 case BTRACE_FORMAT_NONE
:
905 case BTRACE_FORMAT_BTS
:
906 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
909 case BTRACE_FORMAT_PT
:
910 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
914 if (errcode
== BTRACE_ERR_NONE
)
920 /* Read branch trace data in BTS format for the thread given by TINFO into
921 BTRACE using the TYPE reading method. */
923 static enum btrace_error
924 linux_read_bts (struct btrace_data_bts
*btrace
,
925 struct btrace_target_info
*tinfo
,
926 enum btrace_read_type type
)
928 struct perf_event_buffer
*pevent
;
929 const uint8_t *begin
, *end
, *start
;
930 unsigned long long data_head
, data_tail
, buffer_size
, size
;
931 unsigned int retries
= 5;
933 pevent
= &tinfo
->variant
.bts
.bts
;
935 /* For delta reads, we return at least the partial last block containing
937 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
938 return BTRACE_ERR_NONE
;
940 buffer_size
= pevent
->size
;
941 data_tail
= pevent
->last_head
;
943 /* We may need to retry reading the trace. See below. */
946 data_head
= *pevent
->data_head
;
948 /* Delete any leftover trace from the previous iteration. */
949 VEC_free (btrace_block_s
, btrace
->blocks
);
951 if (type
== BTRACE_READ_DELTA
)
953 /* Determine the number of bytes to read and check for buffer
956 /* Check for data head overflows. We might be able to recover from
957 those but they are very unlikely and it's not really worth the
959 if (data_head
< data_tail
)
960 return BTRACE_ERR_OVERFLOW
;
962 /* If the buffer is smaller than the trace delta, we overflowed. */
963 size
= data_head
- data_tail
;
964 if (buffer_size
< size
)
965 return BTRACE_ERR_OVERFLOW
;
969 /* Read the entire buffer. */
972 /* Adjust the size if the buffer has not overflowed, yet. */
973 if (data_head
< size
)
977 /* Data_head keeps growing; the buffer itself is circular. */
979 start
= begin
+ data_head
% buffer_size
;
981 if (data_head
<= buffer_size
)
984 end
= begin
+ pevent
->size
;
986 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
988 /* The stopping thread notifies its ptracer before it is scheduled out.
989 On multi-core systems, the debugger might therefore run while the
990 kernel might be writing the last branch trace records.
992 Let's check whether the data head moved while we read the trace. */
993 if (data_head
== *pevent
->data_head
)
997 pevent
->last_head
= data_head
;
999 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1000 if we're not doing a delta read. There is no way of filling in its zeroed
1002 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1003 && type
!= BTRACE_READ_DELTA
)
1004 VEC_pop (btrace_block_s
, btrace
->blocks
);
1006 return BTRACE_ERR_NONE
;
1009 /* Fill in the Intel(R) Processor Trace configuration information. */
1012 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1014 conf
->cpu
= btrace_this_cpu ();
1017 /* Read branch trace data in Intel(R) Processor Trace format for the thread
1018 given by TINFO into BTRACE using the TYPE reading method. */
1020 static enum btrace_error
1021 linux_read_pt (struct btrace_data_pt
*btrace
,
1022 struct btrace_target_info
*tinfo
,
1023 enum btrace_read_type type
)
1025 struct perf_event_buffer
*pt
;
1027 pt
= &tinfo
->variant
.pt
.pt
;
1029 linux_fill_btrace_pt_config (&btrace
->config
);
1033 case BTRACE_READ_DELTA
:
1034 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1035 around to stay inside the aux buffer. */
1036 return BTRACE_ERR_NOT_SUPPORTED
;
1038 case BTRACE_READ_NEW
:
1039 if (!perf_event_new_data (pt
))
1040 return BTRACE_ERR_NONE
;
1043 case BTRACE_READ_ALL
:
1044 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1045 return BTRACE_ERR_NONE
;
1048 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1051 /* See linux-btrace.h. */
1054 linux_read_btrace (struct btrace_data
*btrace
,
1055 struct btrace_target_info
*tinfo
,
1056 enum btrace_read_type type
)
1058 switch (tinfo
->conf
.format
)
1060 case BTRACE_FORMAT_NONE
:
1061 return BTRACE_ERR_NOT_SUPPORTED
;
1063 case BTRACE_FORMAT_BTS
:
1064 /* We read btrace in BTS format. */
1065 btrace
->format
= BTRACE_FORMAT_BTS
;
1066 btrace
->variant
.bts
.blocks
= NULL
;
1068 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1070 case BTRACE_FORMAT_PT
:
1071 /* We read btrace in Intel(R) Processor Trace format. */
1072 btrace
->format
= BTRACE_FORMAT_PT
;
1073 btrace
->variant
.pt
.data
= NULL
;
1074 btrace
->variant
.pt
.size
= 0;
1076 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1079 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1082 /* See linux-btrace.h. */
1084 const struct btrace_config
*
1085 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1087 return &tinfo
->conf
;
1090 #else /* !HAVE_LINUX_PERF_EVENT_H */
1092 /* See linux-btrace.h. */
1095 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1100 /* See linux-btrace.h. */
1102 struct btrace_target_info
*
1103 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1108 /* See linux-btrace.h. */
1111 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1113 return BTRACE_ERR_NOT_SUPPORTED
;
1116 /* See linux-btrace.h. */
1119 linux_read_btrace (struct btrace_data
*btrace
,
1120 struct btrace_target_info
*tinfo
,
1121 enum btrace_read_type type
)
1123 return BTRACE_ERR_NOT_SUPPORTED
;
1126 /* See linux-btrace.h. */
1128 const struct btrace_config
*
1129 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1134 #endif /* !HAVE_LINUX_PERF_EVENT_H */