1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbsupport/common-defs.h"
23 #include "linux-btrace.h"
24 #include "gdbsupport/common-regcache.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "gdbsupport/filestuff.h"
28 #include "gdbsupport/scoped_fd.h"
29 #include "gdbsupport/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
93 else if (ebx
== signature_AMD_ebx
&& ecx
== signature_AMD_ecx
94 && edx
== signature_AMD_edx
)
101 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
104 perf_event_new_data (const struct perf_event_buffer
*pev
)
106 return *pev
->data_head
!= pev
->last_head
;
109 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
114 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
117 const gdb_byte
*begin
, *end
, *start
, *stop
;
125 /* We should never ask for more data than the buffer can hold. */
126 buffer_size
= pev
->size
;
127 gdb_assert (size
<= buffer_size
);
129 /* If we ask for more data than we seem to have, we wrap around and read
130 data from the end of the buffer. This is already handled by the %
131 BUFFER_SIZE operation, below. Here, we just need to make sure that we
134 Note that this is perfectly OK for perf event buffers where data_head
135 doesn'grow indefinitely and instead wraps around to remain within the
136 buffer's boundaries. */
137 if (data_head
< size
)
138 data_head
+= buffer_size
;
140 gdb_assert (size
<= data_head
);
141 data_tail
= data_head
- size
;
144 start
= begin
+ data_tail
% buffer_size
;
145 stop
= begin
+ data_head
% buffer_size
;
147 buffer
= (gdb_byte
*) xmalloc (size
);
150 memcpy (buffer
, start
, stop
- start
);
153 end
= begin
+ buffer_size
;
155 memcpy (buffer
, start
, end
- start
);
156 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
162 /* Copy the perf event buffer data from PEV.
163 Store a pointer to the copy into DATA and its size in SIZE. */
166 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
172 data_head
= *pev
->data_head
;
175 *data
= perf_event_read (pev
, data_head
, size
);
178 pev
->last_head
= data_head
;
181 /* Try to determine the start address of the Linux kernel. */
184 linux_determine_kernel_start (void)
186 static uint64_t kernel_start
;
194 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
198 while (!feof (file
.get ()))
200 char buffer
[1024], symbol
[8], *line
;
204 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
208 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
212 if (strcmp (symbol
, "_text") == 0)
222 /* Check whether an address is in the kernel. */
225 perf_event_is_kernel_addr (uint64_t addr
)
227 uint64_t kernel_start
;
229 kernel_start
= linux_determine_kernel_start ();
230 if (kernel_start
!= 0ull)
231 return (addr
>= kernel_start
);
233 /* If we don't know the kernel's start address, let's check the most
234 significant bit. This will work at least for 64-bit kernels. */
235 return ((addr
& (1ull << 63)) != 0);
238 /* Check whether a perf event record should be skipped. */
241 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
243 /* The hardware may report branches from kernel into user space. Branches
244 from user into kernel space will be suppressed. We filter the former to
245 provide a consistent branch trace excluding kernel. */
246 return perf_event_is_kernel_addr (bts
->from
);
249 /* Perform a few consistency checks on a perf event sample record. This is
250 meant to catch cases when we get out of sync with the perf event stream. */
253 perf_event_sample_ok (const struct perf_event_sample
*sample
)
255 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
258 if (sample
->header
.size
!= sizeof (*sample
))
264 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
265 and to addresses (plus a header).
267 Start points into that buffer at the next sample position.
268 We read the collected samples backwards from start.
270 While reading the samples, we convert the information into a list of blocks.
271 For two adjacent samples s1 and s2, we form a block b such that b.begin =
272 s1.to and b.end = s2.from.
274 In case the buffer overflows during sampling, one sample may have its lower
275 part at the end and its upper part at the beginning of the buffer. */
277 static std::vector
<btrace_block
> *
278 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
279 const uint8_t *end
, const uint8_t *start
, size_t size
)
281 std::vector
<btrace_block
> *btrace
= new std::vector
<btrace_block
>;
282 struct perf_event_sample sample
;
284 struct btrace_block block
= { 0, 0 };
285 struct regcache
*regcache
;
287 gdb_assert (begin
<= start
);
288 gdb_assert (start
<= end
);
290 /* The first block ends at the current pc. */
291 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
292 block
.end
= regcache_read_pc (regcache
);
294 /* The buffer may contain a partial record as its last entry (i.e. when the
295 buffer size is not a multiple of the sample size). */
296 read
= sizeof (sample
) - 1;
298 for (; read
< size
; read
+= sizeof (sample
))
300 const struct perf_event_sample
*psample
;
302 /* Find the next perf_event sample in a backwards traversal. */
303 start
-= sizeof (sample
);
305 /* If we're still inside the buffer, we're done. */
307 psample
= (const struct perf_event_sample
*) start
;
312 /* We're to the left of the ring buffer, we will wrap around and
313 reappear at the very right of the ring buffer. */
315 missing
= (begin
- start
);
316 start
= (end
- missing
);
318 /* If the entire sample is missing, we're done. */
319 if (missing
== sizeof (sample
))
320 psample
= (const struct perf_event_sample
*) start
;
325 /* The sample wrapped around. The lower part is at the end and
326 the upper part is at the beginning of the buffer. */
327 stack
= (uint8_t *) &sample
;
329 /* Copy the two parts so we have a contiguous sample. */
330 memcpy (stack
, start
, missing
);
331 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
337 if (!perf_event_sample_ok (psample
))
339 warning (_("Branch trace may be incomplete."));
343 if (perf_event_skip_bts_record (&psample
->bts
))
346 /* We found a valid sample, so we can complete the current block. */
347 block
.begin
= psample
->bts
.to
;
349 btrace
->push_back (block
);
351 /* Start the next block. */
352 block
.end
= psample
->bts
.from
;
355 /* Push the last block (i.e. the first one of inferior execution), as well.
356 We don't know where it ends, but we know where it starts. If we're
357 reading delta trace, we can fill in the start address later on.
358 Otherwise we will prune it. */
360 btrace
->push_back (block
);
365 /* Check whether an Intel cpu supports BTS. */
368 intel_supports_bts (const struct btrace_cpu
*cpu
)
375 case 0x1a: /* Nehalem */
379 case 0x25: /* Westmere */
382 case 0x2a: /* Sandy Bridge */
384 case 0x3a: /* Ivy Bridge */
386 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
387 "from" information afer an EIST transition, T-states, C1E, or
388 Adaptive Thermal Throttling. */
396 /* Check whether the cpu supports BTS. */
399 cpu_supports_bts (void)
401 struct btrace_cpu cpu
;
403 cpu
= btrace_this_cpu ();
407 /* Don't know about others. Let's assume they do. */
411 return intel_supports_bts (&cpu
);
418 /* The perf_event_open syscall failed. Try to print a helpful error
422 diagnose_perf_event_open_fail ()
429 static const char filename
[] = "/proc/sys/kernel/perf_event_paranoid";
430 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
431 if (file
.get () == nullptr)
434 int level
, found
= fscanf (file
.get (), "%d", &level
);
435 if (found
== 1 && level
> 2)
436 error (_("You do not have permission to record the process. "
437 "Try setting %s to 2 or less."), filename
);
443 error (_("Failed to start recording: %s"), safe_strerror (errno
));
446 /* Enable branch tracing in BTS format. */
448 static struct btrace_target_info
*
449 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
451 struct btrace_tinfo_bts
*bts
;
456 if (!cpu_supports_bts ())
457 error (_("BTS support has been disabled for the target cpu."));
459 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
460 (XCNEW (btrace_target_info
));
463 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
464 bts
= &tinfo
->variant
.bts
;
466 bts
->attr
.size
= sizeof (bts
->attr
);
467 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
468 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
469 bts
->attr
.sample_period
= 1;
471 /* We sample from and to address. */
472 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
474 bts
->attr
.exclude_kernel
= 1;
475 bts
->attr
.exclude_hv
= 1;
476 bts
->attr
.exclude_idle
= 1;
483 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
485 diagnose_perf_event_open_fail ();
487 /* Convert the requested size in bytes to pages (rounding up). */
488 pages
= ((size_t) conf
->size
/ PAGE_SIZE
489 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
490 /* We need at least one page. */
494 /* The buffer size can be requested in powers of two pages. Adjust PAGES
495 to the next power of two. */
496 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
497 if ((pages
& ((size_t) 1 << pg
)) != 0)
498 pages
+= ((size_t) 1 << pg
);
500 /* We try to allocate the requested size.
501 If that fails, try to get as much as we can. */
503 for (; pages
> 0; pages
>>= 1)
508 data_size
= (__u64
) pages
* PAGE_SIZE
;
510 /* Don't ask for more than we can represent in the configuration. */
511 if ((__u64
) UINT_MAX
< data_size
)
514 size
= (size_t) data_size
;
515 length
= size
+ PAGE_SIZE
;
517 /* Check for overflows. */
518 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
522 /* The number of pages we request needs to be a power of two. */
523 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
524 if (data
.get () != MAP_FAILED
)
529 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
531 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
533 data_offset
= PAGE_SIZE
;
535 #if defined (PERF_ATTR_SIZE_VER5)
536 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
540 data_offset
= header
->data_offset
;
541 data_size
= header
->data_size
;
543 size
= (unsigned int) data_size
;
545 /* Check for overflows. */
546 if ((__u64
) size
!= data_size
)
547 error (_("Failed to determine trace buffer size."));
549 #endif /* defined (PERF_ATTR_SIZE_VER5) */
551 bts
->bts
.size
= size
;
552 bts
->bts
.data_head
= &header
->data_head
;
553 bts
->bts
.mem
= (const uint8_t *) data
.release () + data_offset
;
554 bts
->bts
.last_head
= 0ull;
555 bts
->header
= header
;
556 bts
->file
= fd
.release ();
558 tinfo
->conf
.bts
.size
= (unsigned int) size
;
559 return tinfo
.release ();
562 #if defined (PERF_ATTR_SIZE_VER5)
564 /* Determine the event type. */
567 perf_event_pt_event_type ()
569 static const char filename
[] = "/sys/bus/event_source/devices/intel_pt/type";
572 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
573 if (file
.get () == nullptr)
574 error (_("Failed to open %s: %s."), filename
, safe_strerror (errno
));
576 int type
, found
= fscanf (file
.get (), "%d", &type
);
578 error (_("Failed to read the PT event type from %s."), filename
);
583 /* Enable branch tracing in Intel Processor Trace format. */
585 static struct btrace_target_info
*
586 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
588 struct btrace_tinfo_pt
*pt
;
596 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
597 (XCNEW (btrace_target_info
));
600 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
601 pt
= &tinfo
->variant
.pt
;
603 pt
->attr
.size
= sizeof (pt
->attr
);
604 pt
->attr
.type
= perf_event_pt_event_type ();
606 pt
->attr
.exclude_kernel
= 1;
607 pt
->attr
.exclude_hv
= 1;
608 pt
->attr
.exclude_idle
= 1;
611 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
613 diagnose_perf_event_open_fail ();
615 /* Allocate the configuration page. */
616 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
618 if (data
.get () == MAP_FAILED
)
619 error (_("Failed to map trace user page: %s."), safe_strerror (errno
));
621 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
624 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
626 /* Convert the requested size in bytes to pages (rounding up). */
627 pages
= ((size_t) conf
->size
/ PAGE_SIZE
628 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
629 /* We need at least one page. */
633 /* The buffer size can be requested in powers of two pages. Adjust PAGES
634 to the next power of two. */
635 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
636 if ((pages
& ((size_t) 1 << pg
)) != 0)
637 pages
+= ((size_t) 1 << pg
);
639 /* We try to allocate the requested size.
640 If that fails, try to get as much as we can. */
642 for (; pages
> 0; pages
>>= 1)
647 data_size
= (__u64
) pages
* PAGE_SIZE
;
649 /* Don't ask for more than we can represent in the configuration. */
650 if ((__u64
) UINT_MAX
< data_size
)
653 length
= (size_t) data_size
;
655 /* Check for overflows. */
656 if ((__u64
) length
!= data_size
)
659 header
->aux_size
= data_size
;
662 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
664 if (aux
.get () != MAP_FAILED
)
669 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
671 pt
->pt
.size
= aux
.size ();
672 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
673 pt
->pt
.data_head
= &header
->aux_head
;
674 pt
->header
= (struct perf_event_mmap_page
*) data
.release ();
675 gdb_assert (pt
->header
== header
);
676 pt
->file
= fd
.release ();
678 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
679 return tinfo
.release ();
682 #else /* !defined (PERF_ATTR_SIZE_VER5) */
684 static struct btrace_target_info
*
685 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
687 error (_("Intel Processor Trace support was disabled at compile time."));
690 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
692 /* See linux-btrace.h. */
694 struct btrace_target_info
*
695 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
697 switch (conf
->format
)
699 case BTRACE_FORMAT_NONE
:
700 error (_("Bad branch trace format."));
703 error (_("Unknown branch trace format."));
705 case BTRACE_FORMAT_BTS
:
706 return linux_enable_bts (ptid
, &conf
->bts
);
708 case BTRACE_FORMAT_PT
:
709 return linux_enable_pt (ptid
, &conf
->pt
);
713 /* Disable BTS tracing. */
715 static enum btrace_error
716 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
718 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
721 return BTRACE_ERR_NONE
;
724 /* Disable Intel Processor Trace tracing. */
726 static enum btrace_error
727 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
729 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
730 munmap((void *) tinfo
->header
, PAGE_SIZE
);
733 return BTRACE_ERR_NONE
;
736 /* See linux-btrace.h. */
739 linux_disable_btrace (struct btrace_target_info
*tinfo
)
741 enum btrace_error errcode
;
743 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
744 switch (tinfo
->conf
.format
)
746 case BTRACE_FORMAT_NONE
:
749 case BTRACE_FORMAT_BTS
:
750 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
753 case BTRACE_FORMAT_PT
:
754 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
758 if (errcode
== BTRACE_ERR_NONE
)
764 /* Read branch trace data in BTS format for the thread given by TINFO into
765 BTRACE using the TYPE reading method. */
767 static enum btrace_error
768 linux_read_bts (struct btrace_data_bts
*btrace
,
769 struct btrace_target_info
*tinfo
,
770 enum btrace_read_type type
)
772 struct perf_event_buffer
*pevent
;
773 const uint8_t *begin
, *end
, *start
;
774 size_t buffer_size
, size
;
775 __u64 data_head
, data_tail
;
776 unsigned int retries
= 5;
778 pevent
= &tinfo
->variant
.bts
.bts
;
780 /* For delta reads, we return at least the partial last block containing
782 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
783 return BTRACE_ERR_NONE
;
785 buffer_size
= pevent
->size
;
786 data_tail
= pevent
->last_head
;
788 /* We may need to retry reading the trace. See below. */
791 data_head
= *pevent
->data_head
;
793 /* Delete any leftover trace from the previous iteration. */
794 delete btrace
->blocks
;
795 btrace
->blocks
= nullptr;
797 if (type
== BTRACE_READ_DELTA
)
801 /* Determine the number of bytes to read and check for buffer
804 /* Check for data head overflows. We might be able to recover from
805 those but they are very unlikely and it's not really worth the
807 if (data_head
< data_tail
)
808 return BTRACE_ERR_OVERFLOW
;
810 /* If the buffer is smaller than the trace delta, we overflowed. */
811 data_size
= data_head
- data_tail
;
812 if (buffer_size
< data_size
)
813 return BTRACE_ERR_OVERFLOW
;
815 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
816 size
= (size_t) data_size
;
820 /* Read the entire buffer. */
823 /* Adjust the size if the buffer has not overflowed, yet. */
824 if (data_head
< size
)
825 size
= (size_t) data_head
;
828 /* Data_head keeps growing; the buffer itself is circular. */
830 start
= begin
+ data_head
% buffer_size
;
832 if (data_head
<= buffer_size
)
835 end
= begin
+ pevent
->size
;
837 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
839 /* The stopping thread notifies its ptracer before it is scheduled out.
840 On multi-core systems, the debugger might therefore run while the
841 kernel might be writing the last branch trace records.
843 Let's check whether the data head moved while we read the trace. */
844 if (data_head
== *pevent
->data_head
)
848 pevent
->last_head
= data_head
;
850 /* Prune the incomplete last block (i.e. the first one of inferior execution)
851 if we're not doing a delta read. There is no way of filling in its zeroed
853 if (!btrace
->blocks
->empty () && type
!= BTRACE_READ_DELTA
)
854 btrace
->blocks
->pop_back ();
856 return BTRACE_ERR_NONE
;
859 /* Fill in the Intel Processor Trace configuration information. */
862 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
864 conf
->cpu
= btrace_this_cpu ();
867 /* Read branch trace data in Intel Processor Trace format for the thread
868 given by TINFO into BTRACE using the TYPE reading method. */
870 static enum btrace_error
871 linux_read_pt (struct btrace_data_pt
*btrace
,
872 struct btrace_target_info
*tinfo
,
873 enum btrace_read_type type
)
875 struct perf_event_buffer
*pt
;
877 pt
= &tinfo
->variant
.pt
.pt
;
879 linux_fill_btrace_pt_config (&btrace
->config
);
883 case BTRACE_READ_DELTA
:
884 /* We don't support delta reads. The data head (i.e. aux_head) wraps
885 around to stay inside the aux buffer. */
886 return BTRACE_ERR_NOT_SUPPORTED
;
888 case BTRACE_READ_NEW
:
889 if (!perf_event_new_data (pt
))
890 return BTRACE_ERR_NONE
;
893 case BTRACE_READ_ALL
:
894 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
895 return BTRACE_ERR_NONE
;
898 internal_error (__FILE__
, __LINE__
, _("Unknown btrace read type."));
901 /* See linux-btrace.h. */
904 linux_read_btrace (struct btrace_data
*btrace
,
905 struct btrace_target_info
*tinfo
,
906 enum btrace_read_type type
)
908 switch (tinfo
->conf
.format
)
910 case BTRACE_FORMAT_NONE
:
911 return BTRACE_ERR_NOT_SUPPORTED
;
913 case BTRACE_FORMAT_BTS
:
914 /* We read btrace in BTS format. */
915 btrace
->format
= BTRACE_FORMAT_BTS
;
916 btrace
->variant
.bts
.blocks
= NULL
;
918 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
920 case BTRACE_FORMAT_PT
:
921 /* We read btrace in Intel Processor Trace format. */
922 btrace
->format
= BTRACE_FORMAT_PT
;
923 btrace
->variant
.pt
.data
= NULL
;
924 btrace
->variant
.pt
.size
= 0;
926 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
929 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
932 /* See linux-btrace.h. */
934 const struct btrace_config
*
935 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
940 #else /* !HAVE_LINUX_PERF_EVENT_H */
942 /* See linux-btrace.h. */
944 struct btrace_target_info
*
945 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
950 /* See linux-btrace.h. */
953 linux_disable_btrace (struct btrace_target_info
*tinfo
)
955 return BTRACE_ERR_NOT_SUPPORTED
;
958 /* See linux-btrace.h. */
961 linux_read_btrace (struct btrace_data
*btrace
,
962 struct btrace_target_info
*tinfo
,
963 enum btrace_read_type type
)
965 return BTRACE_ERR_NOT_SUPPORTED
;
968 /* See linux-btrace.h. */
970 const struct btrace_config
*
971 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
976 #endif /* !HAVE_LINUX_PERF_EVENT_H */