+static int
+handle_pt_insn_events (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ std::vector<unsigned int> &gaps, int status)
+{
+#if defined (HAVE_PT_INSN_EVENT)
+ while (status & pts_event_pending)
+ {
+ struct btrace_function *bfun;
+ struct pt_event event;
+ uint64_t offset;
+
+ status = pt_insn_event (decoder, &event, sizeof (event));
+ if (status < 0)
+ break;
+
+ switch (event.type)
+ {
+ default:
+ break;
+
+ case ptev_enabled:
+ if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
+ {
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset);
+ }
+
+ break;
+
+ case ptev_overflow:
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
+ bfun->insn_offset - 1, offset);
+
+ break;
+ }
+ }
+#endif /* defined (HAVE_PT_INSN_EVENT) */
+
+ return status;
+}
+
+/* Handle events indicated by flags in INSN (libipt-v1). */
+
+static void
+handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ const struct pt_insn &insn,
+ std::vector<unsigned int> &gaps)
+{
+#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
+ /* Tracing is disabled and re-enabled each time we enter the kernel. Most
+ times, we continue from the same instruction we stopped before. This is
+ indicated via the RESUMED instruction flag. The ENABLED instruction flag
+ means that we continued from some other instruction. Indicate this as a
+ trace gap except when tracing just started. */
+ if (insn.enabled && !btinfo->functions.empty ())
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
+ insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
+
+#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
+}
+
+/* Add function branch trace to BTINFO using DECODER. */
+
+static void
+ftrace_add_pt (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ int *plevel,
+ std::vector<unsigned int> &gaps)
+{
+ struct btrace_function *bfun;
+ uint64_t offset;
+ int status;
+
+ for (;;)
+ {
+ struct pt_insn insn;
+
+ status = pt_insn_sync_forward (decoder);
+ if (status < 0)
+ {
+ if (status != -pte_eos)
+ warning (_("Failed to synchronize onto the Intel Processor "
+ "Trace stream: %s."), pt_errstr (pt_errcode (status)));
+ break;
+ }
+
+ for (;;)
+ {
+ /* Handle events from the previous iteration or synchronization. */
+ status = handle_pt_insn_events (btinfo, decoder, gaps, status);
+ if (status < 0)
+ break;
+
+ status = pt_insn_next (decoder, &insn, sizeof(insn));
+ if (status < 0)
+ break;
+
+ /* Handle events indicated by flags in INSN. */
+ handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
+
+ bfun = ftrace_update_function (btinfo, insn.ip);
+
+ /* Maintain the function level offset. */
+ *plevel = std::min (*plevel, bfun->level);
+
+ ftrace_update_insns (bfun, pt_btrace_insn (insn));
+ }
+
+ if (status == -pte_eos)
+ break;
+
+ /* Indicate the gap in the trace. */
+ bfun = ftrace_new_gap (btinfo, status, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
+ offset, insn.ip, pt_errstr (pt_errcode (status)));
+ }
+}
+
+/* A callback function to allow the trace decoder to read the inferior's
+ memory. */
+
+static int
+btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
+ const struct pt_asid *asid, uint64_t pc,
+ void *context)
+{
+ int result, errcode;
+
+ result = (int) size;
+ try
+ {
+ errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
+ if (errcode != 0)
+ result = -pte_nomap;
+ }
+ catch (const gdb_exception_error &error)
+ {
+ result = -pte_nomap;
+ }
+
+ return result;
+}
+
+/* Translate the vendor from one enum to another. */
+
+static enum pt_cpu_vendor
+pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
+{
+ switch (vendor)
+ {
+ default:
+ return pcv_unknown;
+
+ case CV_INTEL:
+ return pcv_intel;
+ }
+}
+
+/* Finalize the function branch trace after decode. */
+
+static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
+ struct thread_info *tp, int level)
+{
+ pt_insn_free_decoder (decoder);
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ tp->btrace.level = -level;
+
+ /* Add a single last instruction entry for the current PC.
+ This allows us to compute the backtrace at the current PC using both
+ standard unwind and btrace unwind.
+ This extra entry is ignored by all record commands. */
+ btrace_add_pc (tp);
+}
+
+/* Compute the function branch trace from Intel Processor Trace
+ format. */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace,
+ std::vector<unsigned int> &gaps)
+{
+ struct btrace_thread_info *btinfo;
+ struct pt_insn_decoder *decoder;
+ struct pt_config config;
+ int level, errcode;
+
+ if (btrace->size == 0)
+ return;
+
+ btinfo = &tp->btrace;
+ if (btinfo->functions.empty ())
+ level = INT_MAX;
+ else
+ level = -btinfo->level;
+
+ pt_config_init(&config);
+ config.begin = btrace->data;
+ config.end = btrace->data + btrace->size;
+
+ /* We treat an unknown vendor as 'no errata'. */
+ if (btrace->config.cpu.vendor != CV_UNKNOWN)
+ {
+ config.cpu.vendor
+ = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
+ config.cpu.family = btrace->config.cpu.family;
+ config.cpu.model = btrace->config.cpu.model;
+ config.cpu.stepping = btrace->config.cpu.stepping;
+
+ errcode = pt_cpu_errata (&config.errata, &config.cpu);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace "
+ "decoder: %s."), pt_errstr (pt_errcode (errcode)));
+ }
+
+ decoder = pt_insn_alloc_decoder (&config);
+ if (decoder == NULL)
+ error (_("Failed to allocate the Intel Processor Trace decoder."));
+
+ try
+ {
+ struct pt_image *image;
+
+ image = pt_insn_get_image(decoder);
+ if (image == NULL)
+ error (_("Failed to configure the Intel Processor Trace decoder."));
+
+ errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace decoder: "
+ "%s."), pt_errstr (pt_errcode (errcode)));
+
+ ftrace_add_pt (btinfo, decoder, &level, gaps);
+ }
+ catch (const gdb_exception &error)
+ {
+ /* Indicate a gap in the trace if we quit trace processing. */
+ if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
+ ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+
+ throw;
+ }
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+}
+
+#else /* defined (HAVE_LIBIPT) */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace,
+ std::vector<unsigned int> &gaps)
+{
+ internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
+}
+
+#endif /* defined (HAVE_LIBIPT) */
+
+/* Compute the function branch trace from a block branch trace BTRACE for
+ a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
+ branch trace configuration. This is currently only used for the PT
+ format. */
+
+static void
+btrace_compute_ftrace_1 (struct thread_info *tp,
+ struct btrace_data *btrace,
+ const struct btrace_cpu *cpu,
+ std::vector<unsigned int> &gaps)
+{
+ DEBUG ("compute ftrace");
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return;
+
+ case BTRACE_FORMAT_BTS:
+ btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
+ return;
+
+ case BTRACE_FORMAT_PT:
+ /* Overwrite the cpu we use for enabling errata workarounds. */
+ if (cpu != nullptr)
+ btrace->variant.pt.config.cpu = *cpu;
+
+ btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
+ return;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
+static void
+btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
+{
+ if (!gaps.empty ())
+ {
+ tp->btrace.ngaps += gaps.size ();
+ btrace_bridge_gaps (tp, gaps);
+ }
+}
+
+static void
+btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
+ const struct btrace_cpu *cpu)
+{
+ std::vector<unsigned int> gaps;
+
+ try
+ {
+ btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
+ }
+ catch (const gdb_exception &error)
+ {
+ btrace_finalize_ftrace (tp, gaps);
+
+ throw;
+ }
+
+ btrace_finalize_ftrace (tp, gaps);
+}
+
+/* Add an entry for the current PC. */
+
+static void
+btrace_add_pc (struct thread_info *tp)
+{
+ struct btrace_data btrace;
+ struct btrace_block *block;
+ struct regcache *regcache;
+ CORE_ADDR pc;
+
+ regcache = get_thread_regcache (tp);
+ pc = regcache_read_pc (regcache);
+
+ btrace.format = BTRACE_FORMAT_BTS;
+ btrace.variant.bts.blocks = NULL;
+
+ block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
+ block->begin = pc;
+ block->end = pc;
+
+ btrace_compute_ftrace (tp, &btrace, NULL);
+}
+
+/* See btrace.h. */
+
+void
+btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
+{
+ if (tp->btrace.target != NULL)
+ return;
+
+#if !defined (HAVE_LIBIPT)
+ if (conf->format == BTRACE_FORMAT_PT)
+ error (_("Intel Processor Trace support was disabled at compile time."));
+#endif /* !defined (HAVE_LIBIPT) */
+
+ DEBUG ("enable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid).c_str ());
+
+ tp->btrace.target = target_enable_btrace (tp->ptid, conf);
+
+ /* We're done if we failed to enable tracing. */
+ if (tp->btrace.target == NULL)
+ return;
+
+ /* We need to undo the enable in case of errors. */
+ try
+ {
+ /* Add an entry for the current PC so we start tracing from where we
+ enabled it.
+
+ If we can't access TP's registers, TP is most likely running. In this
+ case, we can't really say where tracing was enabled so it should be
+ safe to simply skip this step.
+
+ This is not relevant for BTRACE_FORMAT_PT since the trace will already
+ start at the PC at which tracing was enabled. */
+ if (conf->format != BTRACE_FORMAT_PT
+ && can_access_registers_thread (tp))
+ btrace_add_pc (tp);
+ }
+ catch (const gdb_exception &exception)
+ {
+ btrace_disable (tp);
+
+ throw;
+ }
+}
+
+/* See btrace.h. */
+
+const struct btrace_config *
+btrace_conf (const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->target == NULL)
+ return NULL;
+
+ return target_btrace_conf (btinfo->target);
+}
+
+/* See btrace.h. */
+
+void
+btrace_disable (struct thread_info *tp)