/* Linux-dependent part of branch trace support for GDB, and GDBserver.
- Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Copyright (C) 2013-2015 Free Software Foundation, Inc.
Contributed by Intel Corp. <markus.t.metzger@intel.com>
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
-#ifdef GDBSERVER
-#include "server.h"
-#else
-#include "defs.h"
-#endif
-
+#include "common-defs.h"
#include "linux-btrace.h"
-#include "regcache.h"
-#include "gdbthread.h"
+#include "common-regcache.h"
#include "gdb_wait.h"
-#include "i386-cpuid.h"
+#include "x86-cpuid.h"
#ifdef HAVE_SYS_SYSCALL_H
#include <sys/syscall.h>
#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
-#include <errno.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/mman.h>
struct perf_event_bts bts;
};
-/* Get the perf_event header. */
-
-static inline volatile struct perf_event_mmap_page *
-perf_event_header (struct btrace_target_info* tinfo)
+/* Identify the cpu we're running on. */
+static struct btrace_cpu
+btrace_this_cpu (void)
{
- return tinfo->buffer;
-}
+ struct btrace_cpu cpu;
+ unsigned int eax, ebx, ecx, edx;
+ int ok;
-/* Get the size of the perf_event mmap buffer. */
+ memset (&cpu, 0, sizeof (cpu));
-static inline size_t
-perf_event_mmap_size (const struct btrace_target_info *tinfo)
-{
- /* The branch trace buffer is preceded by a configuration page. */
- return (tinfo->size + 1) * PAGE_SIZE;
-}
+ ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
+ if (ok != 0)
+ {
+ if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
+ && edx == signature_INTEL_edx)
+ {
+ unsigned int cpuid, ignore;
-/* Get the size of the perf_event buffer. */
+ ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
+ if (ok != 0)
+ {
+ cpu.vendor = CV_INTEL;
-static inline size_t
-perf_event_buffer_size (struct btrace_target_info* tinfo)
-{
- return tinfo->size * PAGE_SIZE;
-}
+ cpu.family = (cpuid >> 8) & 0xf;
+ cpu.model = (cpuid >> 4) & 0xf;
-/* Get the start address of the perf_event buffer. */
+ if (cpu.family == 0x6)
+ cpu.model += (cpuid >> 12) & 0xf0;
+ }
+ }
+ }
-static inline const uint8_t *
-perf_event_buffer_begin (struct btrace_target_info* tinfo)
-{
- return ((const uint8_t *) tinfo->buffer) + PAGE_SIZE;
+ return cpu;
}
-/* Get the end address of the perf_event buffer. */
+/* Return non-zero if there is new data in PEVENT; zero otherwise. */
-static inline const uint8_t *
-perf_event_buffer_end (struct btrace_target_info* tinfo)
+static int
+perf_event_new_data (const struct perf_event_buffer *pev)
{
- return perf_event_buffer_begin (tinfo) + perf_event_buffer_size (tinfo);
+ return *pev->data_head != pev->last_head;
}
/* Check whether an address is in the kernel. */
/* Check whether a perf event record should be skipped. */
static inline int
-perf_event_skip_record (const struct btrace_target_info *tinfo,
- const struct perf_event_bts *bts)
+perf_event_skip_bts_record (const struct btrace_target_info *tinfo,
+ const struct perf_event_bts *bts)
{
/* The hardware may report branches from kernel into user space. Branches
from user into kernel space will be suppressed. We filter the former to
static VEC (btrace_block_s) *
perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
- const uint8_t *end, const uint8_t *start, size_t size)
+ const uint8_t *end, const uint8_t *start,
+ unsigned long long size)
{
VEC (btrace_block_s) *btrace = NULL;
struct perf_event_sample sample;
- size_t read = 0;
+ unsigned long long read = 0;
struct btrace_block block = { 0, 0 };
struct regcache *regcache;
gdb_assert (start <= end);
/* The first block ends at the current pc. */
-#ifdef GDBSERVER
- regcache = get_thread_regcache (find_thread_ptid (tinfo->ptid), 1);
-#else
- regcache = get_thread_regcache (tinfo->ptid);
-#endif
+ regcache = get_thread_regcache_for_ptid (tinfo->ptid);
block.end = regcache_read_pc (regcache);
/* The buffer may contain a partial record as its last entry (i.e. when the
break;
}
- if (perf_event_skip_record (tinfo, &psample->bts))
+ if (perf_event_skip_bts_record (tinfo, &psample->bts))
continue;
/* We found a valid sample, so we can complete the current block. */
return btrace;
}
-/* Check whether the kernel supports branch tracing. */
+/* Check whether the kernel supports BTS. */
static int
-kernel_supports_btrace (void)
+kernel_supports_bts (void)
{
struct perf_event_attr attr;
pid_t child, pid;
switch (child)
{
case -1:
- warning (_("test branch tracing: cannot fork: %s."), strerror (errno));
+ warning (_("test bts: cannot fork: %s."), strerror (errno));
return 0;
case 0:
status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
if (status != 0)
{
- warning (_("test branch tracing: cannot PTRACE_TRACEME: %s."),
+ warning (_("test bts: cannot PTRACE_TRACEME: %s."),
strerror (errno));
_exit (1);
}
status = raise (SIGTRAP);
if (status != 0)
{
- warning (_("test branch tracing: cannot raise SIGTRAP: %s."),
+ warning (_("test bts: cannot raise SIGTRAP: %s."),
strerror (errno));
_exit (1);
}
pid = waitpid (child, &status, 0);
if (pid != child)
{
- warning (_("test branch tracing: bad pid %ld, error: %s."),
+ warning (_("test bts: bad pid %ld, error: %s."),
(long) pid, strerror (errno));
return 0;
}
if (!WIFSTOPPED (status))
{
- warning (_("test branch tracing: expected stop. status: %d."),
+ warning (_("test bts: expected stop. status: %d."),
status);
return 0;
}
pid = waitpid (child, &status, 0);
if (pid != child)
{
- warning (_("test branch tracing: bad pid %ld, error: %s."),
+ warning (_("test bts: bad pid %ld, error: %s."),
(long) pid, strerror (errno));
if (!WIFSIGNALED (status))
- warning (_("test branch tracing: expected killed. status: %d."),
+ warning (_("test bts: expected killed. status: %d."),
status);
}
}
}
-/* Check whether an Intel cpu supports branch tracing. */
+/* Check whether an Intel cpu supports BTS. */
static int
-intel_supports_btrace (void)
+intel_supports_bts (const struct btrace_cpu *cpu)
{
- unsigned int cpuid, model, family;
-
- if (!i386_cpuid (1, &cpuid, NULL, NULL, NULL))
- return 0;
-
- family = (cpuid >> 8) & 0xf;
- model = (cpuid >> 4) & 0xf;
-
- switch (family)
+ switch (cpu->family)
{
case 0x6:
- model += (cpuid >> 12) & 0xf0;
-
- switch (model)
+ switch (cpu->model)
{
case 0x1a: /* Nehalem */
case 0x1f:
return 1;
}
-/* Check whether the cpu supports branch tracing. */
+/* Check whether the cpu supports BTS. */
static int
-cpu_supports_btrace (void)
+cpu_supports_bts (void)
{
- unsigned int ebx, ecx, edx;
-
- if (!i386_cpuid (0, NULL, &ebx, &ecx, &edx))
- return 0;
+ struct btrace_cpu cpu;
- if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
- && edx == signature_INTEL_edx)
- return intel_supports_btrace ();
+ cpu = btrace_this_cpu ();
+ switch (cpu.vendor)
+ {
+ default:
+ /* Don't know about others. Let's assume they do. */
+ return 1;
- /* Don't know about others. Let's assume they do. */
- return 1;
+ case CV_INTEL:
+ return intel_supports_bts (&cpu);
+ }
}
-/* See linux-btrace.h. */
+/* Check whether the linux target supports BTS. */
-int
-linux_supports_btrace (struct target_ops *ops)
+static int
+linux_supports_bts (void)
{
static int cached;
if (cached == 0)
{
- if (!kernel_supports_btrace ())
+ if (!kernel_supports_bts ())
cached = -1;
- else if (!cpu_supports_btrace ())
+ else if (!cpu_supports_bts ())
cached = -1;
else
cached = 1;
/* See linux-btrace.h. */
-struct btrace_target_info *
-linux_enable_btrace (ptid_t ptid)
+int
+linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
+{
+ switch (format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return 0;
+
+ case BTRACE_FORMAT_BTS:
+ return linux_supports_bts ();
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
+}
+
+/* Enable branch tracing in BTS format. */
+
+static struct btrace_target_info *
+linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
{
+ struct perf_event_mmap_page *header;
struct btrace_target_info *tinfo;
+ struct btrace_tinfo_bts *bts;
+ unsigned long long size, pages;
int pid, pg;
tinfo = xzalloc (sizeof (*tinfo));
tinfo->ptid = ptid;
+ tinfo->ptr_bits = 0;
- tinfo->attr.size = sizeof (tinfo->attr);
- tinfo->attr.type = PERF_TYPE_HARDWARE;
- tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
- tinfo->attr.sample_period = 1;
+ tinfo->conf.format = BTRACE_FORMAT_BTS;
+ bts = &tinfo->variant.bts;
- /* We sample from and to address. */
- tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
+ bts->attr.size = sizeof (bts->attr);
+ bts->attr.type = PERF_TYPE_HARDWARE;
+ bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+ bts->attr.sample_period = 1;
- tinfo->attr.exclude_kernel = 1;
- tinfo->attr.exclude_hv = 1;
- tinfo->attr.exclude_idle = 1;
+ /* We sample from and to address. */
+ bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
- tinfo->ptr_bits = 0;
+ bts->attr.exclude_kernel = 1;
+ bts->attr.exclude_hv = 1;
+ bts->attr.exclude_idle = 1;
pid = ptid_get_lwp (ptid);
if (pid == 0)
pid = ptid_get_pid (ptid);
errno = 0;
- tinfo->file = syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0);
- if (tinfo->file < 0)
+ bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
+ if (bts->file < 0)
goto err;
- /* We try to allocate as much buffer as we can get.
- We could allow the user to specify the size of the buffer, but then
- we'd leave this search for the maximum buffer size to him. */
- for (pg = 4; pg >= 0; --pg)
+ /* Convert the requested size in bytes to pages (rounding up). */
+ pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+ /* We need at least one page. */
+ if (pages == 0)
+ pages = 1;
+
+ /* The buffer size can be requested in powers of two pages. Adjust PAGES
+ to the next power of two. */
+ for (pg = 0; pages != (1u << pg); ++pg)
+ if ((pages & (1u << pg)) != 0)
+ pages += (1u << pg);
+
+ /* We try to allocate the requested size.
+ If that fails, try to get as much as we can. */
+ for (; pages > 0; pages >>= 1)
{
- /* The number of pages we request needs to be a power of two. */
- tinfo->size = 1 << pg;
- tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
- PROT_READ, MAP_SHARED, tinfo->file, 0);
- if (tinfo->buffer == MAP_FAILED)
+ size_t length;
+
+ size = pages * PAGE_SIZE;
+ length = size + PAGE_SIZE;
+
+ /* Check for overflows. */
+ if ((unsigned long long) length < size)
continue;
- return tinfo;
+ /* The number of pages we request needs to be a power of two. */
+ header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
+ if (header != MAP_FAILED)
+ break;
}
+ if (header == MAP_FAILED)
+ goto err_file;
+
+ bts->header = header;
+ bts->bts.mem = ((const uint8_t *) header) + PAGE_SIZE;
+ bts->bts.size = size;
+ bts->bts.data_head = &header->data_head;
+ bts->bts.last_head = 0;
+
+ tinfo->conf.bts.size = size;
+ return tinfo;
+
+ err_file:
/* We were not able to allocate any buffer. */
- close (tinfo->file);
+ close (bts->file);
err:
xfree (tinfo);
/* See linux-btrace.h. */
-enum btrace_error
-linux_disable_btrace (struct btrace_target_info *tinfo)
+struct btrace_target_info *
+linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
{
- int errcode;
+ struct btrace_target_info *tinfo;
- errno = 0;
- errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
- if (errcode != 0)
- return BTRACE_ERR_UNKNOWN;
+ tinfo = NULL;
+ switch (conf->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ break;
- close (tinfo->file);
- xfree (tinfo);
+ case BTRACE_FORMAT_BTS:
+ tinfo = linux_enable_bts (ptid, &conf->bts);
+ break;
+ }
- return BTRACE_ERR_NONE;
+ return tinfo;
}
-/* Check whether the branch trace has changed. */
+/* Disable BTS tracing. */
-static int
-linux_btrace_has_changed (struct btrace_target_info *tinfo)
+static enum btrace_error
+linux_disable_bts (struct btrace_tinfo_bts *tinfo)
{
- volatile struct perf_event_mmap_page *header = perf_event_header (tinfo);
+ munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
+ close (tinfo->file);
- return header->data_head != tinfo->data_head;
+ return BTRACE_ERR_NONE;
}
/* See linux-btrace.h. */
enum btrace_error
-linux_read_btrace (VEC (btrace_block_s) **btrace,
- struct btrace_target_info *tinfo,
- enum btrace_read_type type)
+linux_disable_btrace (struct btrace_target_info *tinfo)
{
- volatile struct perf_event_mmap_page *header;
+ enum btrace_error errcode;
+
+ errcode = BTRACE_ERR_NOT_SUPPORTED;
+ switch (tinfo->conf.format)
+ {
+ case BTRACE_FORMAT_NONE:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ errcode = linux_disable_bts (&tinfo->variant.bts);
+ break;
+ }
+
+ if (errcode == BTRACE_ERR_NONE)
+ xfree (tinfo);
+
+ return errcode;
+}
+
+/* Read branch trace data in BTS format for the thread given by TINFO into
+ BTRACE using the TYPE reading method. */
+
+static enum btrace_error
+linux_read_bts (struct btrace_data_bts *btrace,
+ struct btrace_target_info *tinfo,
+ enum btrace_read_type type)
+{
+ struct perf_event_buffer *pevent;
const uint8_t *begin, *end, *start;
- unsigned long data_head, data_tail, retries = 5;
- size_t buffer_size, size;
+ unsigned long long data_head, data_tail, buffer_size, size;
+ unsigned int retries = 5;
+
+ pevent = &tinfo->variant.bts.bts;
/* For delta reads, we return at least the partial last block containing
the current PC. */
- if (type == BTRACE_READ_NEW && !linux_btrace_has_changed (tinfo))
+ if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
return BTRACE_ERR_NONE;
- header = perf_event_header (tinfo);
- buffer_size = perf_event_buffer_size (tinfo);
- data_tail = tinfo->data_head;
+ buffer_size = pevent->size;
+ data_tail = pevent->last_head;
/* We may need to retry reading the trace. See below. */
while (retries--)
{
- data_head = header->data_head;
+ data_head = *pevent->data_head;
/* Delete any leftover trace from the previous iteration. */
- VEC_free (btrace_block_s, *btrace);
+ VEC_free (btrace_block_s, btrace->blocks);
if (type == BTRACE_READ_DELTA)
{
}
/* Data_head keeps growing; the buffer itself is circular. */
- begin = perf_event_buffer_begin (tinfo);
+ begin = pevent->mem;
start = begin + data_head % buffer_size;
if (data_head <= buffer_size)
end = start;
else
- end = perf_event_buffer_end (tinfo);
+ end = begin + pevent->size;
- *btrace = perf_event_read_bts (tinfo, begin, end, start, size);
+ btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
/* The stopping thread notifies its ptracer before it is scheduled out.
On multi-core systems, the debugger might therefore run while the
kernel might be writing the last branch trace records.
Let's check whether the data head moved while we read the trace. */
- if (data_head == header->data_head)
+ if (data_head == *pevent->data_head)
break;
}
- tinfo->data_head = data_head;
+ pevent->last_head = data_head;
/* Prune the incomplete last block (i.e. the first one of inferior execution)
if we're not doing a delta read. There is no way of filling in its zeroed
BEGIN element. */
- if (!VEC_empty (btrace_block_s, *btrace) && type != BTRACE_READ_DELTA)
- VEC_pop (btrace_block_s, *btrace);
+ if (!VEC_empty (btrace_block_s, btrace->blocks)
+ && type != BTRACE_READ_DELTA)
+ VEC_pop (btrace_block_s, btrace->blocks);
return BTRACE_ERR_NONE;
}
+/* See linux-btrace.h. */
+
+enum btrace_error
+linux_read_btrace (struct btrace_data *btrace,
+ struct btrace_target_info *tinfo,
+ enum btrace_read_type type)
+{
+ switch (tinfo->conf.format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return BTRACE_ERR_NOT_SUPPORTED;
+
+ case BTRACE_FORMAT_BTS:
+ /* We read btrace in BTS format. */
+ btrace->format = BTRACE_FORMAT_BTS;
+ btrace->variant.bts.blocks = NULL;
+
+ return linux_read_bts (&btrace->variant.bts, tinfo, type);
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
+/* See linux-btrace.h. */
+
+const struct btrace_config *
+linux_btrace_conf (const struct btrace_target_info *tinfo)
+{
+ return &tinfo->conf;
+}
+
#else /* !HAVE_LINUX_PERF_EVENT_H */
/* See linux-btrace.h. */
int
-linux_supports_btrace (struct target_ops *ops)
+linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
{
return 0;
}
/* See linux-btrace.h. */
struct btrace_target_info *
-linux_enable_btrace (ptid_t ptid)
+linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
{
return NULL;
}
/* See linux-btrace.h. */
enum btrace_error
-linux_read_btrace (VEC (btrace_block_s) **btrace,
+linux_read_btrace (struct btrace_data *btrace,
struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
return BTRACE_ERR_NOT_SUPPORTED;
}
+/* See linux-btrace.h. */
+
+const struct btrace_config *
+linux_btrace_conf (const struct btrace_target_info *tinfo)
+{
+ return NULL;
+}
+
#endif /* !HAVE_LINUX_PERF_EVENT_H */