/* Linux-dependent part of branch trace support for GDB, and GDBserver.
- Copyright (C) 2013-2015 Free Software Foundation, Inc.
+ Copyright (C) 2013-2017 Free Software Foundation, Inc.
Contributed by Intel Corp. <markus.t.metzger@intel.com>
#include "common-regcache.h"
#include "gdb_wait.h"
#include "x86-cpuid.h"
+#include "filestuff.h"
+
+#include <inttypes.h>
#ifdef HAVE_SYS_SYSCALL_H
#include <sys/syscall.h>
#include "nat/gdb_ptrace.h"
#include <sys/types.h>
#include <signal.h>
-#include <sys/utsname.h>
/* A branch trace record in perf_event. */
struct perf_event_bts
return *pev->data_head != pev->last_head;
}
-/* Try to determine the size of a pointer in bits for the OS.
-
- This is the same as the size of a pointer for the inferior process
- except when a 32-bit inferior is running on a 64-bit OS. */
-
/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
to the memory holding the copy.
The caller is responsible for freeing the memory. */
if (size == 0)
return NULL;
+ /* We should never ask for more data than the buffer can hold. */
+ buffer_size = pev->size;
+ gdb_assert (size <= buffer_size);
+
+ /* If we ask for more data than we seem to have, we wrap around and read
+ data from the end of the buffer. This is already handled by the %
+ BUFFER_SIZE operation, below. Here, we just need to make sure that we
+ don't underflow.
+
+ Note that this is perfectly OK for perf event buffers where data_head
+ doesn'grow indefinitely and instead wraps around to remain within the
+ buffer's boundaries. */
+ if (data_head < size)
+ data_head += buffer_size;
+
gdb_assert (size <= data_head);
data_tail = data_head - size;
- buffer_size = pev->size;
begin = pev->mem;
start = begin + data_tail % buffer_size;
stop = begin + data_head % buffer_size;
- buffer = xmalloc (size);
+ buffer = (gdb_byte *) xmalloc (size);
if (start < stop)
memcpy (buffer, start, stop - start);
__u64 data_head;
data_head = *pev->data_head;
-
size = pev->size;
- if (data_head < size)
- size = (size_t) data_head;
*data = perf_event_read (pev, data_head, size);
*psize = size;
return -1;
}
-static int
-linux_determine_kernel_ptr_bits (void)
+/* Try to determine the start address of the Linux kernel. */
+
+static uint64_t
+linux_determine_kernel_start (void)
{
- struct utsname utsn;
- int errcode;
+ static uint64_t kernel_start;
+ static int cached;
+ FILE *file;
- memset (&utsn, 0, sizeof (utsn));
+ if (cached != 0)
+ return kernel_start;
- errcode = uname (&utsn);
- if (errcode < 0)
- return 0;
+ cached = 1;
- /* We only need to handle the 64-bit host case, here. For 32-bit host,
- the pointer size can be filled in later based on the inferior. */
- if (strcmp (utsn.machine, "x86_64") == 0)
- return 64;
+ file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
+ if (file == NULL)
+ return kernel_start;
- return 0;
+ while (!feof (file))
+ {
+ char buffer[1024], symbol[8], *line;
+ uint64_t addr;
+ int match;
+
+ line = fgets (buffer, sizeof (buffer), file);
+ if (line == NULL)
+ break;
+
+ match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
+ if (match != 2)
+ continue;
+
+ if (strcmp (symbol, "_text") == 0)
+ {
+ kernel_start = addr;
+ break;
+ }
+ }
+
+ fclose (file);
+
+ return kernel_start;
}
/* Check whether an address is in the kernel. */
static inline int
-perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
- uint64_t addr)
+perf_event_is_kernel_addr (uint64_t addr)
{
- uint64_t mask;
-
- /* If we don't know the size of a pointer, we can't check. Let's assume it's
- not a kernel address in this case. */
- if (tinfo->ptr_bits == 0)
- return 0;
+ uint64_t kernel_start;
- /* A bit mask for the most significant bit in an address. */
- mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
+ kernel_start = linux_determine_kernel_start ();
+ if (kernel_start != 0ull)
+ return (addr >= kernel_start);
- /* Check whether the most significant bit in the address is set. */
- return (addr & mask) != 0;
+ /* If we don't know the kernel's start address, let's check the most
+ significant bit. This will work at least for 64-bit kernels. */
+ return ((addr & (1ull << 63)) != 0);
}
/* Check whether a perf event record should be skipped. */
static inline int
-perf_event_skip_bts_record (const struct btrace_target_info *tinfo,
- const struct perf_event_bts *bts)
+perf_event_skip_bts_record (const struct perf_event_bts *bts)
{
/* The hardware may report branches from kernel into user space. Branches
from user into kernel space will be suppressed. We filter the former to
provide a consistent branch trace excluding kernel. */
- return perf_event_is_kernel_addr (tinfo, bts->from);
+ return perf_event_is_kernel_addr (bts->from);
}
/* Perform a few consistency checks on a perf event sample record. This is
break;
}
- if (perf_event_skip_bts_record (tinfo, &psample->bts))
+ if (perf_event_skip_bts_record (&psample->bts))
continue;
/* We found a valid sample, so we can complete the current block. */
}
}
-/* Check whether the kernel supports Intel(R) Processor Trace. */
+/* Check whether the kernel supports Intel Processor Trace. */
static int
kernel_supports_pt (void)
return cached > 0;
}
-/* Check whether the linux target supports Intel(R) Processor Trace. */
+/* Check whether the linux target supports Intel Processor Trace. */
static int
linux_supports_pt (void)
__u64 data_offset;
int pid, pg;
- tinfo = xzalloc (sizeof (*tinfo));
+ tinfo = XCNEW (struct btrace_target_info);
tinfo->ptid = ptid;
- tinfo->ptr_bits = linux_determine_kernel_ptr_bits ();
tinfo->conf.format = BTRACE_FORMAT_BTS;
bts = &tinfo->variant.bts;
continue;
/* The number of pages we request needs to be a power of two. */
- header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
+ header = ((struct perf_event_mmap_page *)
+ mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
if (header != MAP_FAILED)
break;
}
#if defined (PERF_ATTR_SIZE_VER5)
-/* Enable branch tracing in Intel(R) Processor Trace format. */
+/* Enable branch tracing in Intel Processor Trace format. */
static struct btrace_target_info *
linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
if (pid == 0)
pid = ptid_get_pid (ptid);
- tinfo = xzalloc (sizeof (*tinfo));
+ tinfo = XCNEW (struct btrace_target_info);
tinfo->ptid = ptid;
- tinfo->ptr_bits = 0;
tinfo->conf.format = BTRACE_FORMAT_PT;
pt = &tinfo->variant.pt;
goto err;
/* Allocate the configuration page. */
- header = mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
- pt->file, 0);
+ header = ((struct perf_event_mmap_page *)
+ mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ pt->file, 0));
if (header == MAP_FAILED)
goto err_file;
header->aux_size = data_size;
length = size;
- pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
- header->aux_offset);
+ pt->pt.mem = ((const uint8_t *)
+ mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
+ header->aux_offset));
if (pt->pt.mem != MAP_FAILED)
break;
}
return BTRACE_ERR_NONE;
}
-/* Disable Intel(R) Processor Trace tracing. */
+/* Disable Intel Processor Trace tracing. */
static enum btrace_error
linux_disable_pt (struct btrace_tinfo_pt *tinfo)
return BTRACE_ERR_NONE;
}
-/* Fill in the Intel(R) Processor Trace configuration information. */
+/* Fill in the Intel Processor Trace configuration information. */
static void
linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
conf->cpu = btrace_this_cpu ();
}
-/* Read branch trace data in Intel(R) Processor Trace format for the thread
+/* Read branch trace data in Intel Processor Trace format for the thread
given by TINFO into BTRACE using the TYPE reading method. */
static enum btrace_error
return linux_read_bts (&btrace->variant.bts, tinfo, type);
case BTRACE_FORMAT_PT:
- /* We read btrace in Intel(R) Processor Trace format. */
+ /* We read btrace in Intel Processor Trace format. */
btrace->format = BTRACE_FORMAT_PT;
btrace->variant.pt.data = NULL;
btrace->variant.pt.size = 0;