X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Fnat%2Flinux-btrace.c;h=b2c84c17081bfa8de0247f838f2af63125c6fa62;hb=ffce45d2243e5f52f411e314fc4e1a69f431a81f;hp=88ddc612782fb92abbd325f1e3aa54e3ba4a2db3;hpb=5826e159863b6cd69953dc01faf7c20414409909;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/nat/linux-btrace.c b/gdb/nat/linux-btrace.c index 88ddc61278..b2c84c1708 100644 --- a/gdb/nat/linux-btrace.c +++ b/gdb/nat/linux-btrace.c @@ -1,6 +1,6 @@ /* Linux-dependent part of branch trace support for GDB, and GDBserver. - Copyright (C) 2013-2015 Free Software Foundation, Inc. + Copyright (C) 2013-2017 Free Software Foundation, Inc. Contributed by Intel Corp. @@ -24,6 +24,9 @@ #include "common-regcache.h" #include "gdb_wait.h" #include "x86-cpuid.h" +#include "filestuff.h" + +#include #ifdef HAVE_SYS_SYSCALL_H #include @@ -36,7 +39,6 @@ #include "nat/gdb_ptrace.h" #include #include -#include /* A branch trace record in perf_event. */ struct perf_event_bts @@ -101,11 +103,6 @@ perf_event_new_data (const struct perf_event_buffer *pev) return *pev->data_head != pev->last_head; } -/* Try to determine the size of a pointer in bits for the OS. - - This is the same as the size of a pointer for the inferior process - except when a 32-bit inferior is running on a 64-bit OS. */ - /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer to the memory holding the copy. The caller is responsible for freeing the memory. */ @@ -122,15 +119,29 @@ perf_event_read (const struct perf_event_buffer *pev, __u64 data_head, if (size == 0) return NULL; + /* We should never ask for more data than the buffer can hold. */ + buffer_size = pev->size; + gdb_assert (size <= buffer_size); + + /* If we ask for more data than we seem to have, we wrap around and read + data from the end of the buffer. This is already handled by the % + BUFFER_SIZE operation, below. Here, we just need to make sure that we + don't underflow. + + Note that this is perfectly OK for perf event buffers where data_head + doesn'grow indefinitely and instead wraps around to remain within the + buffer's boundaries. */ + if (data_head < size) + data_head += buffer_size; + gdb_assert (size <= data_head); data_tail = data_head - size; - buffer_size = pev->size; begin = pev->mem; start = begin + data_tail % buffer_size; stop = begin + data_head % buffer_size; - buffer = xmalloc (size); + buffer = (gdb_byte *) xmalloc (size); if (start < stop) memcpy (buffer, start, stop - start); @@ -156,10 +167,7 @@ perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data, __u64 data_head; data_head = *pev->data_head; - size = pev->size; - if (data_head < size) - size = (size_t) data_head; *data = perf_event_read (pev, data_head, size); *psize = size; @@ -189,56 +197,75 @@ perf_event_pt_event_type (int *type) return -1; } -static int -linux_determine_kernel_ptr_bits (void) +/* Try to determine the start address of the Linux kernel. */ + +static uint64_t +linux_determine_kernel_start (void) { - struct utsname utsn; - int errcode; + static uint64_t kernel_start; + static int cached; + FILE *file; - memset (&utsn, 0, sizeof (utsn)); + if (cached != 0) + return kernel_start; - errcode = uname (&utsn); - if (errcode < 0) - return 0; + cached = 1; - /* We only need to handle the 64-bit host case, here. For 32-bit host, - the pointer size can be filled in later based on the inferior. */ - if (strcmp (utsn.machine, "x86_64") == 0) - return 64; + file = gdb_fopen_cloexec ("/proc/kallsyms", "r"); + if (file == NULL) + return kernel_start; - return 0; + while (!feof (file)) + { + char buffer[1024], symbol[8], *line; + uint64_t addr; + int match; + + line = fgets (buffer, sizeof (buffer), file); + if (line == NULL) + break; + + match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol); + if (match != 2) + continue; + + if (strcmp (symbol, "_text") == 0) + { + kernel_start = addr; + break; + } + } + + fclose (file); + + return kernel_start; } /* Check whether an address is in the kernel. */ static inline int -perf_event_is_kernel_addr (const struct btrace_target_info *tinfo, - uint64_t addr) +perf_event_is_kernel_addr (uint64_t addr) { - uint64_t mask; - - /* If we don't know the size of a pointer, we can't check. Let's assume it's - not a kernel address in this case. */ - if (tinfo->ptr_bits == 0) - return 0; + uint64_t kernel_start; - /* A bit mask for the most significant bit in an address. */ - mask = (uint64_t) 1 << (tinfo->ptr_bits - 1); + kernel_start = linux_determine_kernel_start (); + if (kernel_start != 0ull) + return (addr >= kernel_start); - /* Check whether the most significant bit in the address is set. */ - return (addr & mask) != 0; + /* If we don't know the kernel's start address, let's check the most + significant bit. This will work at least for 64-bit kernels. */ + return ((addr & (1ull << 63)) != 0); } /* Check whether a perf event record should be skipped. */ static inline int -perf_event_skip_bts_record (const struct btrace_target_info *tinfo, - const struct perf_event_bts *bts) +perf_event_skip_bts_record (const struct perf_event_bts *bts) { /* The hardware may report branches from kernel into user space. Branches from user into kernel space will be suppressed. We filter the former to provide a consistent branch trace excluding kernel. */ - return perf_event_is_kernel_addr (tinfo, bts->from); + return perf_event_is_kernel_addr (bts->from); } /* Perform a few consistency checks on a perf event sample record. This is @@ -335,7 +362,7 @@ perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin, break; } - if (perf_event_skip_bts_record (tinfo, &psample->bts)) + if (perf_event_skip_bts_record (&psample->bts)) continue; /* We found a valid sample, so we can complete the current block. */ @@ -440,7 +467,7 @@ kernel_supports_bts (void) } } -/* Check whether the kernel supports Intel(R) Processor Trace. */ +/* Check whether the kernel supports Intel Processor Trace. */ static int kernel_supports_pt (void) @@ -597,7 +624,7 @@ linux_supports_bts (void) return cached > 0; } -/* Check whether the linux target supports Intel(R) Processor Trace. */ +/* Check whether the linux target supports Intel Processor Trace. */ static int linux_supports_pt (void) @@ -647,9 +674,8 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf) __u64 data_offset; int pid, pg; - tinfo = xzalloc (sizeof (*tinfo)); + tinfo = XCNEW (struct btrace_target_info); tinfo->ptid = ptid; - tinfo->ptr_bits = linux_determine_kernel_ptr_bits (); tinfo->conf.format = BTRACE_FORMAT_BTS; bts = &tinfo->variant.bts; @@ -709,7 +735,8 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf) continue; /* The number of pages we request needs to be a power of two. */ - header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0); + header = ((struct perf_event_mmap_page *) + mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0)); if (header != MAP_FAILED) break; } @@ -758,7 +785,7 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf) #if defined (PERF_ATTR_SIZE_VER5) -/* Enable branch tracing in Intel(R) Processor Trace format. */ +/* Enable branch tracing in Intel Processor Trace format. */ static struct btrace_target_info * linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf) @@ -780,9 +807,8 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf) if (pid == 0) pid = ptid_get_pid (ptid); - tinfo = xzalloc (sizeof (*tinfo)); + tinfo = XCNEW (struct btrace_target_info); tinfo->ptid = ptid; - tinfo->ptr_bits = 0; tinfo->conf.format = BTRACE_FORMAT_PT; pt = &tinfo->variant.pt; @@ -800,8 +826,9 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf) goto err; /* Allocate the configuration page. */ - header = mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, - pt->file, 0); + header = ((struct perf_event_mmap_page *) + mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, + pt->file, 0)); if (header == MAP_FAILED) goto err_file; @@ -842,8 +869,9 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf) header->aux_size = data_size; length = size; - pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file, - header->aux_offset); + pt->pt.mem = ((const uint8_t *) + mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file, + header->aux_offset)); if (pt->pt.mem != MAP_FAILED) break; } @@ -916,7 +944,7 @@ linux_disable_bts (struct btrace_tinfo_bts *tinfo) return BTRACE_ERR_NONE; } -/* Disable Intel(R) Processor Trace tracing. */ +/* Disable Intel Processor Trace tracing. */ static enum btrace_error linux_disable_pt (struct btrace_tinfo_pt *tinfo) @@ -1051,7 +1079,7 @@ linux_read_bts (struct btrace_data_bts *btrace, return BTRACE_ERR_NONE; } -/* Fill in the Intel(R) Processor Trace configuration information. */ +/* Fill in the Intel Processor Trace configuration information. */ static void linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf) @@ -1059,7 +1087,7 @@ linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf) conf->cpu = btrace_this_cpu (); } -/* Read branch trace data in Intel(R) Processor Trace format for the thread +/* Read branch trace data in Intel Processor Trace format for the thread given by TINFO into BTRACE using the TYPE reading method. */ static enum btrace_error @@ -1113,7 +1141,7 @@ linux_read_btrace (struct btrace_data *btrace, return linux_read_bts (&btrace->variant.bts, tinfo, type); case BTRACE_FORMAT_PT: - /* We read btrace in Intel(R) Processor Trace format. */ + /* We read btrace in Intel Processor Trace format. */ btrace->format = BTRACE_FORMAT_PT; btrace->variant.pt.data = NULL; btrace->variant.pt.size = 0;