1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common/common-defs.h"
23 #include "linux-btrace.h"
24 #include "common/common-regcache.h"
25 #include "common/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "common/filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size
= pev
->size
;
124 gdb_assert (size
<= buffer_size
);
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head
< size
)
135 data_head
+= buffer_size
;
137 gdb_assert (size
<= data_head
);
138 data_tail
= data_head
- size
;
141 start
= begin
+ data_tail
% buffer_size
;
142 stop
= begin
+ data_head
% buffer_size
;
144 buffer
= (gdb_byte
*) xmalloc (size
);
147 memcpy (buffer
, start
, stop
- start
);
150 end
= begin
+ buffer_size
;
152 memcpy (buffer
, start
, end
- start
);
153 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
163 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
169 data_head
= *pev
->data_head
;
172 *data
= perf_event_read (pev
, data_head
, size
);
175 pev
->last_head
= data_head
;
178 /* Try to determine the start address of the Linux kernel. */
181 linux_determine_kernel_start (void)
183 static uint64_t kernel_start
;
191 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
195 while (!feof (file
.get ()))
197 char buffer
[1024], symbol
[8], *line
;
201 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
205 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
209 if (strcmp (symbol
, "_text") == 0)
219 /* Check whether an address is in the kernel. */
222 perf_event_is_kernel_addr (uint64_t addr
)
224 uint64_t kernel_start
;
226 kernel_start
= linux_determine_kernel_start ();
227 if (kernel_start
!= 0ull)
228 return (addr
>= kernel_start
);
230 /* If we don't know the kernel's start address, let's check the most
231 significant bit. This will work at least for 64-bit kernels. */
232 return ((addr
& (1ull << 63)) != 0);
235 /* Check whether a perf event record should be skipped. */
238 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
240 /* The hardware may report branches from kernel into user space. Branches
241 from user into kernel space will be suppressed. We filter the former to
242 provide a consistent branch trace excluding kernel. */
243 return perf_event_is_kernel_addr (bts
->from
);
246 /* Perform a few consistency checks on a perf event sample record. This is
247 meant to catch cases when we get out of sync with the perf event stream. */
250 perf_event_sample_ok (const struct perf_event_sample
*sample
)
252 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
255 if (sample
->header
.size
!= sizeof (*sample
))
261 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
262 and to addresses (plus a header).
264 Start points into that buffer at the next sample position.
265 We read the collected samples backwards from start.
267 While reading the samples, we convert the information into a list of blocks.
268 For two adjacent samples s1 and s2, we form a block b such that b.begin =
269 s1.to and b.end = s2.from.
271 In case the buffer overflows during sampling, one sample may have its lower
272 part at the end and its upper part at the beginning of the buffer. */
274 static VEC (btrace_block_s
) *
275 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
276 const uint8_t *end
, const uint8_t *start
, size_t size
)
278 VEC (btrace_block_s
) *btrace
= NULL
;
279 struct perf_event_sample sample
;
281 struct btrace_block block
= { 0, 0 };
282 struct regcache
*regcache
;
284 gdb_assert (begin
<= start
);
285 gdb_assert (start
<= end
);
287 /* The first block ends at the current pc. */
288 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
289 block
.end
= regcache_read_pc (regcache
);
291 /* The buffer may contain a partial record as its last entry (i.e. when the
292 buffer size is not a multiple of the sample size). */
293 read
= sizeof (sample
) - 1;
295 for (; read
< size
; read
+= sizeof (sample
))
297 const struct perf_event_sample
*psample
;
299 /* Find the next perf_event sample in a backwards traversal. */
300 start
-= sizeof (sample
);
302 /* If we're still inside the buffer, we're done. */
304 psample
= (const struct perf_event_sample
*) start
;
309 /* We're to the left of the ring buffer, we will wrap around and
310 reappear at the very right of the ring buffer. */
312 missing
= (begin
- start
);
313 start
= (end
- missing
);
315 /* If the entire sample is missing, we're done. */
316 if (missing
== sizeof (sample
))
317 psample
= (const struct perf_event_sample
*) start
;
322 /* The sample wrapped around. The lower part is at the end and
323 the upper part is at the beginning of the buffer. */
324 stack
= (uint8_t *) &sample
;
326 /* Copy the two parts so we have a contiguous sample. */
327 memcpy (stack
, start
, missing
);
328 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
334 if (!perf_event_sample_ok (psample
))
336 warning (_("Branch trace may be incomplete."));
340 if (perf_event_skip_bts_record (&psample
->bts
))
343 /* We found a valid sample, so we can complete the current block. */
344 block
.begin
= psample
->bts
.to
;
346 VEC_safe_push (btrace_block_s
, btrace
, &block
);
348 /* Start the next block. */
349 block
.end
= psample
->bts
.from
;
352 /* Push the last block (i.e. the first one of inferior execution), as well.
353 We don't know where it ends, but we know where it starts. If we're
354 reading delta trace, we can fill in the start address later on.
355 Otherwise we will prune it. */
357 VEC_safe_push (btrace_block_s
, btrace
, &block
);
362 /* Check whether an Intel cpu supports BTS. */
365 intel_supports_bts (const struct btrace_cpu
*cpu
)
372 case 0x1a: /* Nehalem */
376 case 0x25: /* Westmere */
379 case 0x2a: /* Sandy Bridge */
381 case 0x3a: /* Ivy Bridge */
383 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
384 "from" information afer an EIST transition, T-states, C1E, or
385 Adaptive Thermal Throttling. */
393 /* Check whether the cpu supports BTS. */
396 cpu_supports_bts (void)
398 struct btrace_cpu cpu
;
400 cpu
= btrace_this_cpu ();
404 /* Don't know about others. Let's assume they do. */
408 return intel_supports_bts (&cpu
);
412 /* The perf_event_open syscall failed. Try to print a helpful error
416 diagnose_perf_event_open_fail ()
423 static const char filename
[] = "/proc/sys/kernel/perf_event_paranoid";
424 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
425 if (file
.get () == nullptr)
428 int level
, found
= fscanf (file
.get (), "%d", &level
);
429 if (found
== 1 && level
> 2)
430 error (_("You do not have permission to record the process. "
431 "Try setting %s to 2 or less."), filename
);
437 error (_("Failed to start recording: %s"), safe_strerror (errno
));
440 /* Enable branch tracing in BTS format. */
442 static struct btrace_target_info
*
443 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
445 struct btrace_tinfo_bts
*bts
;
450 if (!cpu_supports_bts ())
451 error (_("BTS support has been disabled for the target cpu."));
453 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
454 (XCNEW (btrace_target_info
));
457 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
458 bts
= &tinfo
->variant
.bts
;
460 bts
->attr
.size
= sizeof (bts
->attr
);
461 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
462 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
463 bts
->attr
.sample_period
= 1;
465 /* We sample from and to address. */
466 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
468 bts
->attr
.exclude_kernel
= 1;
469 bts
->attr
.exclude_hv
= 1;
470 bts
->attr
.exclude_idle
= 1;
477 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
479 diagnose_perf_event_open_fail ();
481 /* Convert the requested size in bytes to pages (rounding up). */
482 pages
= ((size_t) conf
->size
/ PAGE_SIZE
483 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
484 /* We need at least one page. */
488 /* The buffer size can be requested in powers of two pages. Adjust PAGES
489 to the next power of two. */
490 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
491 if ((pages
& ((size_t) 1 << pg
)) != 0)
492 pages
+= ((size_t) 1 << pg
);
494 /* We try to allocate the requested size.
495 If that fails, try to get as much as we can. */
497 for (; pages
> 0; pages
>>= 1)
502 data_size
= (__u64
) pages
* PAGE_SIZE
;
504 /* Don't ask for more than we can represent in the configuration. */
505 if ((__u64
) UINT_MAX
< data_size
)
508 size
= (size_t) data_size
;
509 length
= size
+ PAGE_SIZE
;
511 /* Check for overflows. */
512 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
516 /* The number of pages we request needs to be a power of two. */
517 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
518 if (data
.get () != MAP_FAILED
)
523 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
525 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
527 data_offset
= PAGE_SIZE
;
529 #if defined (PERF_ATTR_SIZE_VER5)
530 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
534 data_offset
= header
->data_offset
;
535 data_size
= header
->data_size
;
537 size
= (unsigned int) data_size
;
539 /* Check for overflows. */
540 if ((__u64
) size
!= data_size
)
541 error (_("Failed to determine trace buffer size."));
543 #endif /* defined (PERF_ATTR_SIZE_VER5) */
545 bts
->bts
.size
= size
;
546 bts
->bts
.data_head
= &header
->data_head
;
547 bts
->bts
.mem
= (const uint8_t *) data
.get () + data_offset
;
548 bts
->bts
.last_head
= 0ull;
549 bts
->header
= header
;
550 bts
->file
= fd
.release ();
554 tinfo
->conf
.bts
.size
= (unsigned int) size
;
555 return tinfo
.release ();
558 #if defined (PERF_ATTR_SIZE_VER5)
560 /* Determine the event type. */
563 perf_event_pt_event_type ()
565 static const char filename
[] = "/sys/bus/event_source/devices/intel_pt/type";
568 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
569 if (file
.get () == nullptr)
570 error (_("Failed to open %s: %s."), filename
, safe_strerror (errno
));
572 int type
, found
= fscanf (file
.get (), "%d", &type
);
574 error (_("Failed to read the PT event type from %s."), filename
);
579 /* Enable branch tracing in Intel Processor Trace format. */
581 static struct btrace_target_info
*
582 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
584 struct btrace_tinfo_pt
*pt
;
592 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
593 (XCNEW (btrace_target_info
));
596 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
597 pt
= &tinfo
->variant
.pt
;
599 pt
->attr
.size
= sizeof (pt
->attr
);
600 pt
->attr
.type
= perf_event_pt_event_type ();
602 pt
->attr
.exclude_kernel
= 1;
603 pt
->attr
.exclude_hv
= 1;
604 pt
->attr
.exclude_idle
= 1;
607 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
609 diagnose_perf_event_open_fail ();
611 /* Allocate the configuration page. */
612 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
614 if (data
.get () == MAP_FAILED
)
615 error (_("Failed to map trace user page: %s."), safe_strerror (errno
));
617 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
620 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
622 /* Convert the requested size in bytes to pages (rounding up). */
623 pages
= ((size_t) conf
->size
/ PAGE_SIZE
624 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
625 /* We need at least one page. */
629 /* The buffer size can be requested in powers of two pages. Adjust PAGES
630 to the next power of two. */
631 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
632 if ((pages
& ((size_t) 1 << pg
)) != 0)
633 pages
+= ((size_t) 1 << pg
);
635 /* We try to allocate the requested size.
636 If that fails, try to get as much as we can. */
638 for (; pages
> 0; pages
>>= 1)
643 data_size
= (__u64
) pages
* PAGE_SIZE
;
645 /* Don't ask for more than we can represent in the configuration. */
646 if ((__u64
) UINT_MAX
< data_size
)
649 length
= (size_t) data_size
;
651 /* Check for overflows. */
652 if ((__u64
) length
!= data_size
)
655 header
->aux_size
= data_size
;
658 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
660 if (aux
.get () != MAP_FAILED
)
665 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
667 pt
->pt
.size
= aux
.size ();
668 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
669 pt
->pt
.data_head
= &header
->aux_head
;
671 pt
->file
= fd
.release ();
675 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
676 return tinfo
.release ();
679 #else /* !defined (PERF_ATTR_SIZE_VER5) */
681 static struct btrace_target_info
*
682 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
684 error (_("Intel Processor Trace support was disabled at compile time."));
687 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
689 /* See linux-btrace.h. */
691 struct btrace_target_info
*
692 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
694 switch (conf
->format
)
696 case BTRACE_FORMAT_NONE
:
697 error (_("Bad branch trace format."));
700 error (_("Unknown branch trace format."));
702 case BTRACE_FORMAT_BTS
:
703 return linux_enable_bts (ptid
, &conf
->bts
);
705 case BTRACE_FORMAT_PT
:
706 return linux_enable_pt (ptid
, &conf
->pt
);
710 /* Disable BTS tracing. */
712 static enum btrace_error
713 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
715 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
718 return BTRACE_ERR_NONE
;
721 /* Disable Intel Processor Trace tracing. */
723 static enum btrace_error
724 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
726 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
727 munmap((void *) tinfo
->header
, PAGE_SIZE
);
730 return BTRACE_ERR_NONE
;
733 /* See linux-btrace.h. */
736 linux_disable_btrace (struct btrace_target_info
*tinfo
)
738 enum btrace_error errcode
;
740 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
741 switch (tinfo
->conf
.format
)
743 case BTRACE_FORMAT_NONE
:
746 case BTRACE_FORMAT_BTS
:
747 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
750 case BTRACE_FORMAT_PT
:
751 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
755 if (errcode
== BTRACE_ERR_NONE
)
761 /* Read branch trace data in BTS format for the thread given by TINFO into
762 BTRACE using the TYPE reading method. */
764 static enum btrace_error
765 linux_read_bts (struct btrace_data_bts
*btrace
,
766 struct btrace_target_info
*tinfo
,
767 enum btrace_read_type type
)
769 struct perf_event_buffer
*pevent
;
770 const uint8_t *begin
, *end
, *start
;
771 size_t buffer_size
, size
;
772 __u64 data_head
, data_tail
;
773 unsigned int retries
= 5;
775 pevent
= &tinfo
->variant
.bts
.bts
;
777 /* For delta reads, we return at least the partial last block containing
779 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
780 return BTRACE_ERR_NONE
;
782 buffer_size
= pevent
->size
;
783 data_tail
= pevent
->last_head
;
785 /* We may need to retry reading the trace. See below. */
788 data_head
= *pevent
->data_head
;
790 /* Delete any leftover trace from the previous iteration. */
791 VEC_free (btrace_block_s
, btrace
->blocks
);
793 if (type
== BTRACE_READ_DELTA
)
797 /* Determine the number of bytes to read and check for buffer
800 /* Check for data head overflows. We might be able to recover from
801 those but they are very unlikely and it's not really worth the
803 if (data_head
< data_tail
)
804 return BTRACE_ERR_OVERFLOW
;
806 /* If the buffer is smaller than the trace delta, we overflowed. */
807 data_size
= data_head
- data_tail
;
808 if (buffer_size
< data_size
)
809 return BTRACE_ERR_OVERFLOW
;
811 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
812 size
= (size_t) data_size
;
816 /* Read the entire buffer. */
819 /* Adjust the size if the buffer has not overflowed, yet. */
820 if (data_head
< size
)
821 size
= (size_t) data_head
;
824 /* Data_head keeps growing; the buffer itself is circular. */
826 start
= begin
+ data_head
% buffer_size
;
828 if (data_head
<= buffer_size
)
831 end
= begin
+ pevent
->size
;
833 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
835 /* The stopping thread notifies its ptracer before it is scheduled out.
836 On multi-core systems, the debugger might therefore run while the
837 kernel might be writing the last branch trace records.
839 Let's check whether the data head moved while we read the trace. */
840 if (data_head
== *pevent
->data_head
)
844 pevent
->last_head
= data_head
;
846 /* Prune the incomplete last block (i.e. the first one of inferior execution)
847 if we're not doing a delta read. There is no way of filling in its zeroed
849 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
850 && type
!= BTRACE_READ_DELTA
)
851 VEC_pop (btrace_block_s
, btrace
->blocks
);
853 return BTRACE_ERR_NONE
;
856 /* Fill in the Intel Processor Trace configuration information. */
859 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
861 conf
->cpu
= btrace_this_cpu ();
864 /* Read branch trace data in Intel Processor Trace format for the thread
865 given by TINFO into BTRACE using the TYPE reading method. */
867 static enum btrace_error
868 linux_read_pt (struct btrace_data_pt
*btrace
,
869 struct btrace_target_info
*tinfo
,
870 enum btrace_read_type type
)
872 struct perf_event_buffer
*pt
;
874 pt
= &tinfo
->variant
.pt
.pt
;
876 linux_fill_btrace_pt_config (&btrace
->config
);
880 case BTRACE_READ_DELTA
:
881 /* We don't support delta reads. The data head (i.e. aux_head) wraps
882 around to stay inside the aux buffer. */
883 return BTRACE_ERR_NOT_SUPPORTED
;
885 case BTRACE_READ_NEW
:
886 if (!perf_event_new_data (pt
))
887 return BTRACE_ERR_NONE
;
890 case BTRACE_READ_ALL
:
891 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
892 return BTRACE_ERR_NONE
;
895 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
898 /* See linux-btrace.h. */
901 linux_read_btrace (struct btrace_data
*btrace
,
902 struct btrace_target_info
*tinfo
,
903 enum btrace_read_type type
)
905 switch (tinfo
->conf
.format
)
907 case BTRACE_FORMAT_NONE
:
908 return BTRACE_ERR_NOT_SUPPORTED
;
910 case BTRACE_FORMAT_BTS
:
911 /* We read btrace in BTS format. */
912 btrace
->format
= BTRACE_FORMAT_BTS
;
913 btrace
->variant
.bts
.blocks
= NULL
;
915 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
917 case BTRACE_FORMAT_PT
:
918 /* We read btrace in Intel Processor Trace format. */
919 btrace
->format
= BTRACE_FORMAT_PT
;
920 btrace
->variant
.pt
.data
= NULL
;
921 btrace
->variant
.pt
.size
= 0;
923 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
926 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
929 /* See linux-btrace.h. */
931 const struct btrace_config
*
932 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
937 #else /* !HAVE_LINUX_PERF_EVENT_H */
939 /* See linux-btrace.h. */
941 struct btrace_target_info
*
942 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
947 /* See linux-btrace.h. */
950 linux_disable_btrace (struct btrace_target_info
*tinfo
)
952 return BTRACE_ERR_NOT_SUPPORTED
;
955 /* See linux-btrace.h. */
958 linux_read_btrace (struct btrace_data
*btrace
,
959 struct btrace_target_info
*tinfo
,
960 enum btrace_read_type type
)
962 return BTRACE_ERR_NOT_SUPPORTED
;
965 /* See linux-btrace.h. */
967 const struct btrace_config
*
968 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
973 #endif /* !HAVE_LINUX_PERF_EVENT_H */