+/* Determine the event type. */
+
+static int
+perf_event_pt_event_type ()
+{
+ static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
+
+ errno = 0;
+ gdb_file_up file = gdb_fopen_cloexec (filename, "r");
+ if (file.get () == nullptr)
+ error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
+
+ int type, found = fscanf (file.get (), "%d", &type);
+ if (found != 1)
+ error (_("Failed to read the PT event type from %s."), filename);
+
+ return type;
+}
+
+/* Enable branch tracing in Intel Processor Trace format. */
+
+static struct btrace_target_info *
+linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
+{
+ struct btrace_tinfo_pt *pt;
+ size_t pages;
+ int pid, pg;
+
+ pid = ptid.lwp ();
+ if (pid == 0)
+ pid = ptid.pid ();
+
+ gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
+ (XCNEW (btrace_target_info));
+ tinfo->ptid = ptid;
+
+ tinfo->conf.format = BTRACE_FORMAT_PT;
+ pt = &tinfo->variant.pt;
+
+ pt->attr.size = sizeof (pt->attr);
+ pt->attr.type = perf_event_pt_event_type ();
+
+ pt->attr.exclude_kernel = 1;
+ pt->attr.exclude_hv = 1;
+ pt->attr.exclude_idle = 1;
+
+ errno = 0;
+ scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
+ if (fd.get () < 0)
+ diagnose_perf_event_open_fail ();
+
+ /* Allocate the configuration page. */
+ scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd.get (), 0);
+ if (data.get () == MAP_FAILED)
+ error (_("Failed to map trace user page: %s."), safe_strerror (errno));
+
+ struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
+ data.get ();
+
+ header->aux_offset = header->data_offset + header->data_size;
+
+ /* Convert the requested size in bytes to pages (rounding up). */
+ pages = ((size_t) conf->size / PAGE_SIZE
+ + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
+ /* We need at least one page. */
+ if (pages == 0)
+ pages = 1;
+
+ /* The buffer size can be requested in powers of two pages. Adjust PAGES
+ to the next power of two. */
+ for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
+ if ((pages & ((size_t) 1 << pg)) != 0)
+ pages += ((size_t) 1 << pg);
+
+ /* We try to allocate the requested size.
+ If that fails, try to get as much as we can. */
+ scoped_mmap aux;
+ for (; pages > 0; pages >>= 1)
+ {
+ size_t length;
+ __u64 data_size;
+
+ data_size = (__u64) pages * PAGE_SIZE;
+
+ /* Don't ask for more than we can represent in the configuration. */
+ if ((__u64) UINT_MAX < data_size)
+ continue;
+
+ length = (size_t) data_size;
+
+ /* Check for overflows. */
+ if ((__u64) length != data_size)
+ continue;
+
+ header->aux_size = data_size;
+
+ errno = 0;
+ aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
+ header->aux_offset);
+ if (aux.get () != MAP_FAILED)
+ break;
+ }
+
+ if (pages == 0)
+ error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
+
+ pt->pt.size = aux.size ();
+ pt->pt.mem = (const uint8_t *) aux.release ();
+ pt->pt.data_head = &header->aux_head;
+ pt->header = (struct perf_event_mmap_page *) data.release ();
+ gdb_assert (pt->header == header);
+ pt->file = fd.release ();
+
+ tinfo->conf.pt.size = (unsigned int) pt->pt.size;
+ return tinfo.release ();
+}
+
+#else /* !defined (PERF_ATTR_SIZE_VER5) */
+
+static struct btrace_target_info *
+linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
+{
+ error (_("Intel Processor Trace support was disabled at compile time."));