perf inject: Set branch stack feature flag when synthesizing branch stacks
[deliverable/linux.git] / tools / perf / util / intel-pt.c
CommitLineData
90e457f7
AH
1/*
2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <stdio.h>
17#include <stdbool.h>
18#include <errno.h>
19#include <linux/kernel.h>
20#include <linux/types.h>
21
22#include "../perf.h"
23#include "session.h"
24#include "machine.h"
25#include "tool.h"
26#include "event.h"
27#include "evlist.h"
28#include "evsel.h"
29#include "map.h"
30#include "color.h"
31#include "util.h"
32#include "thread.h"
33#include "thread-stack.h"
34#include "symbol.h"
35#include "callchain.h"
36#include "dso.h"
37#include "debug.h"
38#include "auxtrace.h"
39#include "tsc.h"
40#include "intel-pt.h"
41
42#include "intel-pt-decoder/intel-pt-log.h"
43#include "intel-pt-decoder/intel-pt-decoder.h"
44#include "intel-pt-decoder/intel-pt-insn-decoder.h"
45#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
46
47#define MAX_TIMESTAMP (~0ULL)
48
49struct intel_pt {
50 struct auxtrace auxtrace;
51 struct auxtrace_queues queues;
52 struct auxtrace_heap heap;
53 u32 auxtrace_type;
54 struct perf_session *session;
55 struct machine *machine;
56 struct perf_evsel *switch_evsel;
57 struct thread *unknown_thread;
58 bool timeless_decoding;
59 bool sampling_mode;
60 bool snapshot_mode;
61 bool per_cpu_mmaps;
62 bool have_tsc;
63 bool data_queued;
64 bool est_tsc;
65 bool sync_switch;
66 int have_sched_switch;
67 u32 pmu_type;
68 u64 kernel_start;
69 u64 switch_ip;
70 u64 ptss_ip;
71
72 struct perf_tsc_conversion tc;
73 bool cap_user_time_zero;
74
75 struct itrace_synth_opts synth_opts;
76
77 bool sample_instructions;
78 u64 instructions_sample_type;
79 u64 instructions_sample_period;
80 u64 instructions_id;
81
82 bool sample_branches;
83 u32 branches_filter;
84 u64 branches_sample_type;
85 u64 branches_id;
86
87 bool sample_transactions;
88 u64 transactions_sample_type;
89 u64 transactions_id;
90
91 bool synth_needs_swap;
92
93 u64 tsc_bit;
11fa7cb8
AH
94 u64 mtc_bit;
95 u64 mtc_freq_bits;
96 u32 tsc_ctc_ratio_n;
97 u32 tsc_ctc_ratio_d;
98 u64 cyc_bit;
90e457f7
AH
99 u64 noretcomp_bit;
100 unsigned max_non_turbo_ratio;
101};
102
103enum switch_state {
104 INTEL_PT_SS_NOT_TRACING,
105 INTEL_PT_SS_UNKNOWN,
106 INTEL_PT_SS_TRACING,
107 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
108 INTEL_PT_SS_EXPECTING_SWITCH_IP,
109};
110
111struct intel_pt_queue {
112 struct intel_pt *pt;
113 unsigned int queue_nr;
114 struct auxtrace_buffer *buffer;
115 void *decoder;
116 const struct intel_pt_state *state;
117 struct ip_callchain *chain;
118 union perf_event *event_buf;
119 bool on_heap;
120 bool stop;
121 bool step_through_buffers;
122 bool use_buffer_pid_tid;
123 pid_t pid, tid;
124 int cpu;
125 int switch_state;
126 pid_t next_tid;
127 struct thread *thread;
128 bool exclude_kernel;
129 bool have_sample;
130 u64 time;
131 u64 timestamp;
132 u32 flags;
133 u16 insn_len;
2a21d036 134 u64 last_insn_cnt;
90e457f7
AH
135};
136
137static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
138 unsigned char *buf, size_t len)
139{
140 struct intel_pt_pkt packet;
141 size_t pos = 0;
142 int ret, pkt_len, i;
143 char desc[INTEL_PT_PKT_DESC_MAX];
144 const char *color = PERF_COLOR_BLUE;
145
146 color_fprintf(stdout, color,
147 ". ... Intel Processor Trace data: size %zu bytes\n",
148 len);
149
150 while (len) {
151 ret = intel_pt_get_packet(buf, len, &packet);
152 if (ret > 0)
153 pkt_len = ret;
154 else
155 pkt_len = 1;
156 printf(".");
157 color_fprintf(stdout, color, " %08x: ", pos);
158 for (i = 0; i < pkt_len; i++)
159 color_fprintf(stdout, color, " %02x", buf[i]);
160 for (; i < 16; i++)
161 color_fprintf(stdout, color, " ");
162 if (ret > 0) {
163 ret = intel_pt_pkt_desc(&packet, desc,
164 INTEL_PT_PKT_DESC_MAX);
165 if (ret > 0)
166 color_fprintf(stdout, color, " %s\n", desc);
167 } else {
168 color_fprintf(stdout, color, " Bad packet!\n");
169 }
170 pos += pkt_len;
171 buf += pkt_len;
172 len -= pkt_len;
173 }
174}
175
176static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
177 size_t len)
178{
179 printf(".\n");
180 intel_pt_dump(pt, buf, len);
181}
182
183static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
184 struct auxtrace_buffer *b)
185{
186 void *start;
187
188 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
189 pt->have_tsc);
190 if (!start)
191 return -EINVAL;
192 b->use_size = b->data + b->size - start;
193 b->use_data = start;
194 return 0;
195}
196
197static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
198 struct auxtrace_queue *queue,
199 struct auxtrace_buffer *buffer)
200{
201 if (queue->cpu == -1 && buffer->cpu != -1)
202 ptq->cpu = buffer->cpu;
203
204 ptq->pid = buffer->pid;
205 ptq->tid = buffer->tid;
206
207 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
208 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
209
210 thread__zput(ptq->thread);
211
212 if (ptq->tid != -1) {
213 if (ptq->pid != -1)
214 ptq->thread = machine__findnew_thread(ptq->pt->machine,
215 ptq->pid,
216 ptq->tid);
217 else
218 ptq->thread = machine__find_thread(ptq->pt->machine, -1,
219 ptq->tid);
220 }
221}
222
223/* This function assumes data is processed sequentially only */
224static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
225{
226 struct intel_pt_queue *ptq = data;
227 struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
228 struct auxtrace_queue *queue;
229
230 if (ptq->stop) {
231 b->len = 0;
232 return 0;
233 }
234
235 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
236
237 buffer = auxtrace_buffer__next(queue, buffer);
238 if (!buffer) {
239 if (old_buffer)
240 auxtrace_buffer__drop_data(old_buffer);
241 b->len = 0;
242 return 0;
243 }
244
245 ptq->buffer = buffer;
246
247 if (!buffer->data) {
248 int fd = perf_data_file__fd(ptq->pt->session->file);
249
250 buffer->data = auxtrace_buffer__get_data(buffer, fd);
251 if (!buffer->data)
252 return -ENOMEM;
253 }
254
255 if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
256 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
257 return -ENOMEM;
258
259 if (old_buffer)
260 auxtrace_buffer__drop_data(old_buffer);
261
262 if (buffer->use_data) {
263 b->len = buffer->use_size;
264 b->buf = buffer->use_data;
265 } else {
266 b->len = buffer->size;
267 b->buf = buffer->data;
268 }
269 b->ref_timestamp = buffer->reference;
270
271 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
272 !buffer->consecutive)) {
273 b->consecutive = false;
274 b->trace_nr = buffer->buffer_nr + 1;
275 } else {
276 b->consecutive = true;
277 }
278
279 if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
280 ptq->tid != buffer->tid))
281 intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
282
283 if (ptq->step_through_buffers)
284 ptq->stop = true;
285
286 if (!b->len)
287 return intel_pt_get_trace(b, data);
288
289 return 0;
290}
291
292struct intel_pt_cache_entry {
293 struct auxtrace_cache_entry entry;
294 u64 insn_cnt;
295 u64 byte_cnt;
296 enum intel_pt_insn_op op;
297 enum intel_pt_insn_branch branch;
298 int length;
299 int32_t rel;
300};
301
302static int intel_pt_config_div(const char *var, const char *value, void *data)
303{
304 int *d = data;
305 long val;
306
307 if (!strcmp(var, "intel-pt.cache-divisor")) {
308 val = strtol(value, NULL, 0);
309 if (val > 0 && val <= INT_MAX)
310 *d = val;
311 }
312
313 return 0;
314}
315
316static int intel_pt_cache_divisor(void)
317{
318 static int d;
319
320 if (d)
321 return d;
322
323 perf_config(intel_pt_config_div, &d);
324
325 if (!d)
326 d = 64;
327
328 return d;
329}
330
331static unsigned int intel_pt_cache_size(struct dso *dso,
332 struct machine *machine)
333{
334 off_t size;
335
336 size = dso__data_size(dso, machine);
337 size /= intel_pt_cache_divisor();
338 if (size < 1000)
339 return 10;
340 if (size > (1 << 21))
341 return 21;
342 return 32 - __builtin_clz(size);
343}
344
345static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
346 struct machine *machine)
347{
348 struct auxtrace_cache *c;
349 unsigned int bits;
350
351 if (dso->auxtrace_cache)
352 return dso->auxtrace_cache;
353
354 bits = intel_pt_cache_size(dso, machine);
355
356 /* Ignoring cache creation failure */
357 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
358
359 dso->auxtrace_cache = c;
360
361 return c;
362}
363
364static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
365 u64 offset, u64 insn_cnt, u64 byte_cnt,
366 struct intel_pt_insn *intel_pt_insn)
367{
368 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
369 struct intel_pt_cache_entry *e;
370 int err;
371
372 if (!c)
373 return -ENOMEM;
374
375 e = auxtrace_cache__alloc_entry(c);
376 if (!e)
377 return -ENOMEM;
378
379 e->insn_cnt = insn_cnt;
380 e->byte_cnt = byte_cnt;
381 e->op = intel_pt_insn->op;
382 e->branch = intel_pt_insn->branch;
383 e->length = intel_pt_insn->length;
384 e->rel = intel_pt_insn->rel;
385
386 err = auxtrace_cache__add(c, offset, &e->entry);
387 if (err)
388 auxtrace_cache__free_entry(c, e);
389
390 return err;
391}
392
393static struct intel_pt_cache_entry *
394intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
395{
396 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
397
398 if (!c)
399 return NULL;
400
401 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
402}
403
404static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
405 uint64_t *insn_cnt_ptr, uint64_t *ip,
406 uint64_t to_ip, uint64_t max_insn_cnt,
407 void *data)
408{
409 struct intel_pt_queue *ptq = data;
410 struct machine *machine = ptq->pt->machine;
411 struct thread *thread;
412 struct addr_location al;
413 unsigned char buf[1024];
414 size_t bufsz;
415 ssize_t len;
416 int x86_64;
417 u8 cpumode;
418 u64 offset, start_offset, start_ip;
419 u64 insn_cnt = 0;
420 bool one_map = true;
421
422 if (to_ip && *ip == to_ip)
423 goto out_no_cache;
424
425 bufsz = intel_pt_insn_max_size();
426
427 if (*ip >= ptq->pt->kernel_start)
428 cpumode = PERF_RECORD_MISC_KERNEL;
429 else
430 cpumode = PERF_RECORD_MISC_USER;
431
432 thread = ptq->thread;
433 if (!thread) {
434 if (cpumode != PERF_RECORD_MISC_KERNEL)
435 return -EINVAL;
436 thread = ptq->pt->unknown_thread;
437 }
438
439 while (1) {
440 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
441 if (!al.map || !al.map->dso)
442 return -EINVAL;
443
444 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
445 dso__data_status_seen(al.map->dso,
446 DSO_DATA_STATUS_SEEN_ITRACE))
447 return -ENOENT;
448
449 offset = al.map->map_ip(al.map, *ip);
450
451 if (!to_ip && one_map) {
452 struct intel_pt_cache_entry *e;
453
454 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
455 if (e &&
456 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
457 *insn_cnt_ptr = e->insn_cnt;
458 *ip += e->byte_cnt;
459 intel_pt_insn->op = e->op;
460 intel_pt_insn->branch = e->branch;
461 intel_pt_insn->length = e->length;
462 intel_pt_insn->rel = e->rel;
463 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
464 return 0;
465 }
466 }
467
468 start_offset = offset;
469 start_ip = *ip;
470
471 /* Load maps to ensure dso->is_64_bit has been updated */
472 map__load(al.map, machine->symbol_filter);
473
474 x86_64 = al.map->dso->is_64_bit;
475
476 while (1) {
477 len = dso__data_read_offset(al.map->dso, machine,
478 offset, buf, bufsz);
479 if (len <= 0)
480 return -EINVAL;
481
482 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
483 return -EINVAL;
484
485 intel_pt_log_insn(intel_pt_insn, *ip);
486
487 insn_cnt += 1;
488
489 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
490 goto out;
491
492 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
493 goto out_no_cache;
494
495 *ip += intel_pt_insn->length;
496
497 if (to_ip && *ip == to_ip)
498 goto out_no_cache;
499
500 if (*ip >= al.map->end)
501 break;
502
503 offset += intel_pt_insn->length;
504 }
505 one_map = false;
506 }
507out:
508 *insn_cnt_ptr = insn_cnt;
509
510 if (!one_map)
511 goto out_no_cache;
512
513 /*
514 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
515 * entries.
516 */
517 if (to_ip) {
518 struct intel_pt_cache_entry *e;
519
520 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
521 if (e)
522 return 0;
523 }
524
525 /* Ignore cache errors */
526 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
527 *ip - start_ip, intel_pt_insn);
528
529 return 0;
530
531out_no_cache:
532 *insn_cnt_ptr = insn_cnt;
533 return 0;
534}
535
536static bool intel_pt_get_config(struct intel_pt *pt,
537 struct perf_event_attr *attr, u64 *config)
538{
539 if (attr->type == pt->pmu_type) {
540 if (config)
541 *config = attr->config;
542 return true;
543 }
544
545 return false;
546}
547
548static bool intel_pt_exclude_kernel(struct intel_pt *pt)
549{
550 struct perf_evsel *evsel;
551
552 evlist__for_each(pt->session->evlist, evsel) {
553 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
554 !evsel->attr.exclude_kernel)
555 return false;
556 }
557 return true;
558}
559
560static bool intel_pt_return_compression(struct intel_pt *pt)
561{
562 struct perf_evsel *evsel;
563 u64 config;
564
565 if (!pt->noretcomp_bit)
566 return true;
567
568 evlist__for_each(pt->session->evlist, evsel) {
569 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
570 (config & pt->noretcomp_bit))
571 return false;
572 }
573 return true;
574}
575
11fa7cb8
AH
576static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
577{
578 struct perf_evsel *evsel;
579 unsigned int shift;
580 u64 config;
581
582 if (!pt->mtc_freq_bits)
583 return 0;
584
585 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
586 config >>= 1;
587
588 evlist__for_each(pt->session->evlist, evsel) {
589 if (intel_pt_get_config(pt, &evsel->attr, &config))
590 return (config & pt->mtc_freq_bits) >> shift;
591 }
592 return 0;
593}
594
90e457f7
AH
595static bool intel_pt_timeless_decoding(struct intel_pt *pt)
596{
597 struct perf_evsel *evsel;
598 bool timeless_decoding = true;
599 u64 config;
600
601 if (!pt->tsc_bit || !pt->cap_user_time_zero)
602 return true;
603
604 evlist__for_each(pt->session->evlist, evsel) {
605 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
606 return true;
607 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
608 if (config & pt->tsc_bit)
609 timeless_decoding = false;
610 else
611 return true;
612 }
613 }
614 return timeless_decoding;
615}
616
617static bool intel_pt_tracing_kernel(struct intel_pt *pt)
618{
619 struct perf_evsel *evsel;
620
621 evlist__for_each(pt->session->evlist, evsel) {
622 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
623 !evsel->attr.exclude_kernel)
624 return true;
625 }
626 return false;
627}
628
629static bool intel_pt_have_tsc(struct intel_pt *pt)
630{
631 struct perf_evsel *evsel;
632 bool have_tsc = false;
633 u64 config;
634
635 if (!pt->tsc_bit)
636 return false;
637
638 evlist__for_each(pt->session->evlist, evsel) {
639 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
640 if (config & pt->tsc_bit)
641 have_tsc = true;
642 else
643 return false;
644 }
645 }
646 return have_tsc;
647}
648
649static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
650{
651 u64 quot, rem;
652
653 quot = ns / pt->tc.time_mult;
654 rem = ns % pt->tc.time_mult;
655 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
656 pt->tc.time_mult;
657}
658
659static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
660 unsigned int queue_nr)
661{
662 struct intel_pt_params params = { .get_trace = 0, };
663 struct intel_pt_queue *ptq;
664
665 ptq = zalloc(sizeof(struct intel_pt_queue));
666 if (!ptq)
667 return NULL;
668
669 if (pt->synth_opts.callchain) {
670 size_t sz = sizeof(struct ip_callchain);
671
672 sz += pt->synth_opts.callchain_sz * sizeof(u64);
673 ptq->chain = zalloc(sz);
674 if (!ptq->chain)
675 goto out_free;
676 }
677
678 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
679 if (!ptq->event_buf)
680 goto out_free;
681
682 ptq->pt = pt;
683 ptq->queue_nr = queue_nr;
684 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
685 ptq->pid = -1;
686 ptq->tid = -1;
687 ptq->cpu = -1;
688 ptq->next_tid = -1;
689
690 params.get_trace = intel_pt_get_trace;
691 params.walk_insn = intel_pt_walk_next_insn;
692 params.data = ptq;
693 params.return_compression = intel_pt_return_compression(pt);
694 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
11fa7cb8
AH
695 params.mtc_period = intel_pt_mtc_period(pt);
696 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
697 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
90e457f7
AH
698
699 if (pt->synth_opts.instructions) {
700 if (pt->synth_opts.period) {
701 switch (pt->synth_opts.period_type) {
702 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
703 params.period_type =
704 INTEL_PT_PERIOD_INSTRUCTIONS;
705 params.period = pt->synth_opts.period;
706 break;
707 case PERF_ITRACE_PERIOD_TICKS:
708 params.period_type = INTEL_PT_PERIOD_TICKS;
709 params.period = pt->synth_opts.period;
710 break;
711 case PERF_ITRACE_PERIOD_NANOSECS:
712 params.period_type = INTEL_PT_PERIOD_TICKS;
713 params.period = intel_pt_ns_to_ticks(pt,
714 pt->synth_opts.period);
715 break;
716 default:
717 break;
718 }
719 }
720
721 if (!params.period) {
722 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
e1791347 723 params.period = 1;
90e457f7
AH
724 }
725 }
726
727 ptq->decoder = intel_pt_decoder_new(&params);
728 if (!ptq->decoder)
729 goto out_free;
730
731 return ptq;
732
733out_free:
734 zfree(&ptq->event_buf);
735 zfree(&ptq->chain);
736 free(ptq);
737 return NULL;
738}
739
740static void intel_pt_free_queue(void *priv)
741{
742 struct intel_pt_queue *ptq = priv;
743
744 if (!ptq)
745 return;
746 thread__zput(ptq->thread);
747 intel_pt_decoder_free(ptq->decoder);
748 zfree(&ptq->event_buf);
749 zfree(&ptq->chain);
750 free(ptq);
751}
752
753static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
754 struct auxtrace_queue *queue)
755{
756 struct intel_pt_queue *ptq = queue->priv;
757
758 if (queue->tid == -1 || pt->have_sched_switch) {
759 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
760 thread__zput(ptq->thread);
761 }
762
763 if (!ptq->thread && ptq->tid != -1)
764 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
765
766 if (ptq->thread) {
767 ptq->pid = ptq->thread->pid_;
768 if (queue->cpu == -1)
769 ptq->cpu = ptq->thread->cpu;
770 }
771}
772
773static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
774{
775 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
776 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
777 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
778 if (ptq->state->to_ip)
779 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
780 PERF_IP_FLAG_ASYNC |
781 PERF_IP_FLAG_INTERRUPT;
782 else
783 ptq->flags = PERF_IP_FLAG_BRANCH |
784 PERF_IP_FLAG_TRACE_END;
785 ptq->insn_len = 0;
786 } else {
787 if (ptq->state->from_ip)
788 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
789 else
790 ptq->flags = PERF_IP_FLAG_BRANCH |
791 PERF_IP_FLAG_TRACE_BEGIN;
792 if (ptq->state->flags & INTEL_PT_IN_TX)
793 ptq->flags |= PERF_IP_FLAG_IN_TX;
794 ptq->insn_len = ptq->state->insn_len;
795 }
796}
797
798static int intel_pt_setup_queue(struct intel_pt *pt,
799 struct auxtrace_queue *queue,
800 unsigned int queue_nr)
801{
802 struct intel_pt_queue *ptq = queue->priv;
803
804 if (list_empty(&queue->head))
805 return 0;
806
807 if (!ptq) {
808 ptq = intel_pt_alloc_queue(pt, queue_nr);
809 if (!ptq)
810 return -ENOMEM;
811 queue->priv = ptq;
812
813 if (queue->cpu != -1)
814 ptq->cpu = queue->cpu;
815 ptq->tid = queue->tid;
816
817 if (pt->sampling_mode) {
818 if (pt->timeless_decoding)
819 ptq->step_through_buffers = true;
820 if (pt->timeless_decoding || !pt->have_sched_switch)
821 ptq->use_buffer_pid_tid = true;
822 }
823 }
824
825 if (!ptq->on_heap &&
826 (!pt->sync_switch ||
827 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
828 const struct intel_pt_state *state;
829 int ret;
830
831 if (pt->timeless_decoding)
832 return 0;
833
834 intel_pt_log("queue %u getting timestamp\n", queue_nr);
835 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
836 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
837 while (1) {
838 state = intel_pt_decode(ptq->decoder);
839 if (state->err) {
840 if (state->err == INTEL_PT_ERR_NODATA) {
841 intel_pt_log("queue %u has no timestamp\n",
842 queue_nr);
843 return 0;
844 }
845 continue;
846 }
847 if (state->timestamp)
848 break;
849 }
850
851 ptq->timestamp = state->timestamp;
852 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
853 queue_nr, ptq->timestamp);
854 ptq->state = state;
855 ptq->have_sample = true;
856 intel_pt_sample_flags(ptq);
857 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
858 if (ret)
859 return ret;
860 ptq->on_heap = true;
861 }
862
863 return 0;
864}
865
866static int intel_pt_setup_queues(struct intel_pt *pt)
867{
868 unsigned int i;
869 int ret;
870
871 for (i = 0; i < pt->queues.nr_queues; i++) {
872 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
873 if (ret)
874 return ret;
875 }
876 return 0;
877}
878
879static int intel_pt_inject_event(union perf_event *event,
880 struct perf_sample *sample, u64 type,
881 bool swapped)
882{
883 event->header.size = perf_event__sample_event_size(sample, type, 0);
884 return perf_event__synthesize_sample(event, type, 0, sample, swapped);
885}
886
887static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
888{
889 int ret;
890 struct intel_pt *pt = ptq->pt;
891 union perf_event *event = ptq->event_buf;
892 struct perf_sample sample = { .ip = 0, };
893
894 event->sample.header.type = PERF_RECORD_SAMPLE;
895 event->sample.header.misc = PERF_RECORD_MISC_USER;
896 event->sample.header.size = sizeof(struct perf_event_header);
897
898 if (!pt->timeless_decoding)
899 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
900
901 sample.ip = ptq->state->from_ip;
902 sample.pid = ptq->pid;
903 sample.tid = ptq->tid;
904 sample.addr = ptq->state->to_ip;
905 sample.id = ptq->pt->branches_id;
906 sample.stream_id = ptq->pt->branches_id;
907 sample.period = 1;
908 sample.cpu = ptq->cpu;
909 sample.flags = ptq->flags;
910 sample.insn_len = ptq->insn_len;
911
912 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
913 return 0;
914
915 if (pt->synth_opts.inject) {
916 ret = intel_pt_inject_event(event, &sample,
917 pt->branches_sample_type,
918 pt->synth_needs_swap);
919 if (ret)
920 return ret;
921 }
922
923 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
924 if (ret)
925 pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
926 ret);
927
928 return ret;
929}
930
931static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
932{
933 int ret;
934 struct intel_pt *pt = ptq->pt;
935 union perf_event *event = ptq->event_buf;
936 struct perf_sample sample = { .ip = 0, };
937
938 event->sample.header.type = PERF_RECORD_SAMPLE;
939 event->sample.header.misc = PERF_RECORD_MISC_USER;
940 event->sample.header.size = sizeof(struct perf_event_header);
941
942 if (!pt->timeless_decoding)
943 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
944
945 sample.ip = ptq->state->from_ip;
946 sample.pid = ptq->pid;
947 sample.tid = ptq->tid;
948 sample.addr = ptq->state->to_ip;
949 sample.id = ptq->pt->instructions_id;
950 sample.stream_id = ptq->pt->instructions_id;
2a21d036 951 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
90e457f7
AH
952 sample.cpu = ptq->cpu;
953 sample.flags = ptq->flags;
954 sample.insn_len = ptq->insn_len;
955
2a21d036
AH
956 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
957
90e457f7
AH
958 if (pt->synth_opts.callchain) {
959 thread_stack__sample(ptq->thread, ptq->chain,
960 pt->synth_opts.callchain_sz, sample.ip);
961 sample.callchain = ptq->chain;
962 }
963
964 if (pt->synth_opts.inject) {
965 ret = intel_pt_inject_event(event, &sample,
966 pt->instructions_sample_type,
967 pt->synth_needs_swap);
968 if (ret)
969 return ret;
970 }
971
972 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
973 if (ret)
974 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
975 ret);
976
977 return ret;
978}
979
980static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
981{
982 int ret;
983 struct intel_pt *pt = ptq->pt;
984 union perf_event *event = ptq->event_buf;
985 struct perf_sample sample = { .ip = 0, };
986
987 event->sample.header.type = PERF_RECORD_SAMPLE;
988 event->sample.header.misc = PERF_RECORD_MISC_USER;
989 event->sample.header.size = sizeof(struct perf_event_header);
990
991 if (!pt->timeless_decoding)
992 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
993
994 sample.ip = ptq->state->from_ip;
995 sample.pid = ptq->pid;
996 sample.tid = ptq->tid;
997 sample.addr = ptq->state->to_ip;
998 sample.id = ptq->pt->transactions_id;
999 sample.stream_id = ptq->pt->transactions_id;
1000 sample.period = 1;
1001 sample.cpu = ptq->cpu;
1002 sample.flags = ptq->flags;
1003 sample.insn_len = ptq->insn_len;
1004
1005 if (pt->synth_opts.callchain) {
1006 thread_stack__sample(ptq->thread, ptq->chain,
1007 pt->synth_opts.callchain_sz, sample.ip);
1008 sample.callchain = ptq->chain;
1009 }
1010
1011 if (pt->synth_opts.inject) {
1012 ret = intel_pt_inject_event(event, &sample,
1013 pt->transactions_sample_type,
1014 pt->synth_needs_swap);
1015 if (ret)
1016 return ret;
1017 }
1018
1019 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1020 if (ret)
1021 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1022 ret);
1023
1024 return ret;
1025}
1026
1027static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1028 pid_t pid, pid_t tid, u64 ip)
1029{
1030 union perf_event event;
1031 char msg[MAX_AUXTRACE_ERROR_MSG];
1032 int err;
1033
1034 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1035
1036 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1037 code, cpu, pid, tid, ip, msg);
1038
1039 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1040 if (err)
1041 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1042 err);
1043
1044 return err;
1045}
1046
1047static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1048{
1049 struct auxtrace_queue *queue;
1050 pid_t tid = ptq->next_tid;
1051 int err;
1052
1053 if (tid == -1)
1054 return 0;
1055
1056 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1057
1058 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1059
1060 queue = &pt->queues.queue_array[ptq->queue_nr];
1061 intel_pt_set_pid_tid_cpu(pt, queue);
1062
1063 ptq->next_tid = -1;
1064
1065 return err;
1066}
1067
1068static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1069{
1070 struct intel_pt *pt = ptq->pt;
1071
1072 return ip == pt->switch_ip &&
1073 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1074 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1075 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1076}
1077
1078static int intel_pt_sample(struct intel_pt_queue *ptq)
1079{
1080 const struct intel_pt_state *state = ptq->state;
1081 struct intel_pt *pt = ptq->pt;
1082 int err;
1083
1084 if (!ptq->have_sample)
1085 return 0;
1086
1087 ptq->have_sample = false;
1088
1089 if (pt->sample_instructions &&
1090 (state->type & INTEL_PT_INSTRUCTION)) {
1091 err = intel_pt_synth_instruction_sample(ptq);
1092 if (err)
1093 return err;
1094 }
1095
1096 if (pt->sample_transactions &&
1097 (state->type & INTEL_PT_TRANSACTION)) {
1098 err = intel_pt_synth_transaction_sample(ptq);
1099 if (err)
1100 return err;
1101 }
1102
1103 if (!(state->type & INTEL_PT_BRANCH))
1104 return 0;
1105
1106 if (pt->synth_opts.callchain)
1107 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1108 state->to_ip, ptq->insn_len,
1109 state->trace_nr);
1110 else
1111 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1112
1113 if (pt->sample_branches) {
1114 err = intel_pt_synth_branch_sample(ptq);
1115 if (err)
1116 return err;
1117 }
1118
1119 if (!pt->sync_switch)
1120 return 0;
1121
1122 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1123 switch (ptq->switch_state) {
1124 case INTEL_PT_SS_UNKNOWN:
1125 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1126 err = intel_pt_next_tid(pt, ptq);
1127 if (err)
1128 return err;
1129 ptq->switch_state = INTEL_PT_SS_TRACING;
1130 break;
1131 default:
1132 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1133 return 1;
1134 }
1135 } else if (!state->to_ip) {
1136 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1137 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1138 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1139 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1140 state->to_ip == pt->ptss_ip &&
1141 (ptq->flags & PERF_IP_FLAG_CALL)) {
1142 ptq->switch_state = INTEL_PT_SS_TRACING;
1143 }
1144
1145 return 0;
1146}
1147
86c27869 1148static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
90e457f7 1149{
86c27869 1150 struct machine *machine = pt->machine;
90e457f7
AH
1151 struct map *map;
1152 struct symbol *sym, *start;
1153 u64 ip, switch_ip = 0;
86c27869 1154 const char *ptss;
90e457f7
AH
1155
1156 if (ptss_ip)
1157 *ptss_ip = 0;
1158
1159 map = machine__kernel_map(machine, MAP__FUNCTION);
1160 if (!map)
1161 return 0;
1162
1163 if (map__load(map, machine->symbol_filter))
1164 return 0;
1165
1166 start = dso__first_symbol(map->dso, MAP__FUNCTION);
1167
1168 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1169 if (sym->binding == STB_GLOBAL &&
1170 !strcmp(sym->name, "__switch_to")) {
1171 ip = map->unmap_ip(map, sym->start);
1172 if (ip >= map->start && ip < map->end) {
1173 switch_ip = ip;
1174 break;
1175 }
1176 }
1177 }
1178
1179 if (!switch_ip || !ptss_ip)
1180 return 0;
1181
86c27869
AH
1182 if (pt->have_sched_switch == 1)
1183 ptss = "perf_trace_sched_switch";
1184 else
1185 ptss = "__perf_event_task_sched_out";
1186
90e457f7 1187 for (sym = start; sym; sym = dso__next_symbol(sym)) {
86c27869 1188 if (!strcmp(sym->name, ptss)) {
90e457f7
AH
1189 ip = map->unmap_ip(map, sym->start);
1190 if (ip >= map->start && ip < map->end) {
1191 *ptss_ip = ip;
1192 break;
1193 }
1194 }
1195 }
1196
1197 return switch_ip;
1198}
1199
1200static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1201{
1202 const struct intel_pt_state *state = ptq->state;
1203 struct intel_pt *pt = ptq->pt;
1204 int err;
1205
1206 if (!pt->kernel_start) {
1207 pt->kernel_start = machine__kernel_start(pt->machine);
86c27869
AH
1208 if (pt->per_cpu_mmaps &&
1209 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
90e457f7
AH
1210 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1211 !pt->sampling_mode) {
86c27869 1212 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
90e457f7
AH
1213 if (pt->switch_ip) {
1214 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1215 pt->switch_ip, pt->ptss_ip);
1216 pt->sync_switch = true;
1217 }
1218 }
1219 }
1220
1221 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1222 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1223 while (1) {
1224 err = intel_pt_sample(ptq);
1225 if (err)
1226 return err;
1227
1228 state = intel_pt_decode(ptq->decoder);
1229 if (state->err) {
1230 if (state->err == INTEL_PT_ERR_NODATA)
1231 return 1;
1232 if (pt->sync_switch &&
1233 state->from_ip >= pt->kernel_start) {
1234 pt->sync_switch = false;
1235 intel_pt_next_tid(pt, ptq);
1236 }
1237 if (pt->synth_opts.errors) {
1238 err = intel_pt_synth_error(pt, state->err,
1239 ptq->cpu, ptq->pid,
1240 ptq->tid,
1241 state->from_ip);
1242 if (err)
1243 return err;
1244 }
1245 continue;
1246 }
1247
1248 ptq->state = state;
1249 ptq->have_sample = true;
1250 intel_pt_sample_flags(ptq);
1251
1252 /* Use estimated TSC upon return to user space */
1253 if (pt->est_tsc &&
1254 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1255 state->to_ip && state->to_ip < pt->kernel_start) {
1256 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1257 state->timestamp, state->est_timestamp);
1258 ptq->timestamp = state->est_timestamp;
1259 /* Use estimated TSC in unknown switch state */
1260 } else if (pt->sync_switch &&
1261 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1262 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1263 ptq->next_tid == -1) {
1264 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1265 state->timestamp, state->est_timestamp);
1266 ptq->timestamp = state->est_timestamp;
1267 } else if (state->timestamp > ptq->timestamp) {
1268 ptq->timestamp = state->timestamp;
1269 }
1270
1271 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1272 *timestamp = ptq->timestamp;
1273 return 0;
1274 }
1275 }
1276 return 0;
1277}
1278
1279static inline int intel_pt_update_queues(struct intel_pt *pt)
1280{
1281 if (pt->queues.new_data) {
1282 pt->queues.new_data = false;
1283 return intel_pt_setup_queues(pt);
1284 }
1285 return 0;
1286}
1287
1288static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1289{
1290 unsigned int queue_nr;
1291 u64 ts;
1292 int ret;
1293
1294 while (1) {
1295 struct auxtrace_queue *queue;
1296 struct intel_pt_queue *ptq;
1297
1298 if (!pt->heap.heap_cnt)
1299 return 0;
1300
1301 if (pt->heap.heap_array[0].ordinal >= timestamp)
1302 return 0;
1303
1304 queue_nr = pt->heap.heap_array[0].queue_nr;
1305 queue = &pt->queues.queue_array[queue_nr];
1306 ptq = queue->priv;
1307
1308 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1309 queue_nr, pt->heap.heap_array[0].ordinal,
1310 timestamp);
1311
1312 auxtrace_heap__pop(&pt->heap);
1313
1314 if (pt->heap.heap_cnt) {
1315 ts = pt->heap.heap_array[0].ordinal + 1;
1316 if (ts > timestamp)
1317 ts = timestamp;
1318 } else {
1319 ts = timestamp;
1320 }
1321
1322 intel_pt_set_pid_tid_cpu(pt, queue);
1323
1324 ret = intel_pt_run_decoder(ptq, &ts);
1325
1326 if (ret < 0) {
1327 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1328 return ret;
1329 }
1330
1331 if (!ret) {
1332 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1333 if (ret < 0)
1334 return ret;
1335 } else {
1336 ptq->on_heap = false;
1337 }
1338 }
1339
1340 return 0;
1341}
1342
1343static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1344 u64 time_)
1345{
1346 struct auxtrace_queues *queues = &pt->queues;
1347 unsigned int i;
1348 u64 ts = 0;
1349
1350 for (i = 0; i < queues->nr_queues; i++) {
1351 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1352 struct intel_pt_queue *ptq = queue->priv;
1353
1354 if (ptq && (tid == -1 || ptq->tid == tid)) {
1355 ptq->time = time_;
1356 intel_pt_set_pid_tid_cpu(pt, queue);
1357 intel_pt_run_decoder(ptq, &ts);
1358 }
1359 }
1360 return 0;
1361}
1362
1363static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1364{
1365 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1366 sample->pid, sample->tid, 0);
1367}
1368
1369static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1370{
1371 unsigned i, j;
1372
1373 if (cpu < 0 || !pt->queues.nr_queues)
1374 return NULL;
1375
1376 if ((unsigned)cpu >= pt->queues.nr_queues)
1377 i = pt->queues.nr_queues - 1;
1378 else
1379 i = cpu;
1380
1381 if (pt->queues.queue_array[i].cpu == cpu)
1382 return pt->queues.queue_array[i].priv;
1383
1384 for (j = 0; i > 0; j++) {
1385 if (pt->queues.queue_array[--i].cpu == cpu)
1386 return pt->queues.queue_array[i].priv;
1387 }
1388
1389 for (; j < pt->queues.nr_queues; j++) {
1390 if (pt->queues.queue_array[j].cpu == cpu)
1391 return pt->queues.queue_array[j].priv;
1392 }
1393
1394 return NULL;
1395}
1396
86c27869
AH
1397static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1398 u64 timestamp)
90e457f7
AH
1399{
1400 struct intel_pt_queue *ptq;
86c27869 1401 int err;
90e457f7
AH
1402
1403 if (!pt->sync_switch)
86c27869 1404 return 1;
90e457f7
AH
1405
1406 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1407 if (!ptq)
86c27869 1408 return 1;
90e457f7
AH
1409
1410 switch (ptq->switch_state) {
1411 case INTEL_PT_SS_NOT_TRACING:
1412 ptq->next_tid = -1;
1413 break;
1414 case INTEL_PT_SS_UNKNOWN:
1415 case INTEL_PT_SS_TRACING:
1416 ptq->next_tid = tid;
1417 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1418 return 0;
1419 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1420 if (!ptq->on_heap) {
86c27869 1421 ptq->timestamp = perf_time_to_tsc(timestamp,
90e457f7
AH
1422 &pt->tc);
1423 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1424 ptq->timestamp);
1425 if (err)
1426 return err;
1427 ptq->on_heap = true;
1428 }
1429 ptq->switch_state = INTEL_PT_SS_TRACING;
1430 break;
1431 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1432 ptq->next_tid = tid;
1433 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1434 break;
1435 default:
1436 break;
1437 }
86c27869
AH
1438
1439 return 1;
1440}
1441
1442static int intel_pt_process_switch(struct intel_pt *pt,
1443 struct perf_sample *sample)
1444{
1445 struct perf_evsel *evsel;
1446 pid_t tid;
1447 int cpu, ret;
1448
1449 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1450 if (evsel != pt->switch_evsel)
1451 return 0;
1452
1453 tid = perf_evsel__intval(evsel, sample, "next_pid");
1454 cpu = sample->cpu;
1455
1456 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1457 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1458 &pt->tc));
1459
1460 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1461 if (ret <= 0)
1462 return ret;
1463
90e457f7
AH
1464 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1465}
1466
86c27869
AH
1467static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1468 struct perf_sample *sample)
1469{
1470 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1471 pid_t pid, tid;
1472 int cpu, ret;
1473
1474 cpu = sample->cpu;
1475
1476 if (pt->have_sched_switch == 3) {
1477 if (!out)
1478 return 0;
1479 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1480 pr_err("Expecting CPU-wide context switch event\n");
1481 return -EINVAL;
1482 }
1483 pid = event->context_switch.next_prev_pid;
1484 tid = event->context_switch.next_prev_tid;
1485 } else {
1486 if (out)
1487 return 0;
1488 pid = sample->pid;
1489 tid = sample->tid;
1490 }
1491
1492 if (tid == -1) {
1493 pr_err("context_switch event has no tid\n");
1494 return -EINVAL;
1495 }
1496
1497 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1498 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1499 &pt->tc));
1500
1501 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1502 if (ret <= 0)
1503 return ret;
1504
1505 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1506}
1507
90e457f7
AH
1508static int intel_pt_process_itrace_start(struct intel_pt *pt,
1509 union perf_event *event,
1510 struct perf_sample *sample)
1511{
1512 if (!pt->per_cpu_mmaps)
1513 return 0;
1514
1515 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1516 sample->cpu, event->itrace_start.pid,
1517 event->itrace_start.tid, sample->time,
1518 perf_time_to_tsc(sample->time, &pt->tc));
1519
1520 return machine__set_current_tid(pt->machine, sample->cpu,
1521 event->itrace_start.pid,
1522 event->itrace_start.tid);
1523}
1524
1525static int intel_pt_process_event(struct perf_session *session,
1526 union perf_event *event,
1527 struct perf_sample *sample,
1528 struct perf_tool *tool)
1529{
1530 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1531 auxtrace);
1532 u64 timestamp;
1533 int err = 0;
1534
1535 if (dump_trace)
1536 return 0;
1537
1538 if (!tool->ordered_events) {
1539 pr_err("Intel Processor Trace requires ordered events\n");
1540 return -EINVAL;
1541 }
1542
81cd60cc 1543 if (sample->time && sample->time != (u64)-1)
90e457f7
AH
1544 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
1545 else
1546 timestamp = 0;
1547
1548 if (timestamp || pt->timeless_decoding) {
1549 err = intel_pt_update_queues(pt);
1550 if (err)
1551 return err;
1552 }
1553
1554 if (pt->timeless_decoding) {
1555 if (event->header.type == PERF_RECORD_EXIT) {
1556 err = intel_pt_process_timeless_queues(pt,
53ff6bc3 1557 event->fork.tid,
90e457f7
AH
1558 sample->time);
1559 }
1560 } else if (timestamp) {
1561 err = intel_pt_process_queues(pt, timestamp);
1562 }
1563 if (err)
1564 return err;
1565
1566 if (event->header.type == PERF_RECORD_AUX &&
1567 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
1568 pt->synth_opts.errors) {
1569 err = intel_pt_lost(pt, sample);
1570 if (err)
1571 return err;
1572 }
1573
1574 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
1575 err = intel_pt_process_switch(pt, sample);
1576 else if (event->header.type == PERF_RECORD_ITRACE_START)
1577 err = intel_pt_process_itrace_start(pt, event, sample);
86c27869
AH
1578 else if (event->header.type == PERF_RECORD_SWITCH ||
1579 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
1580 err = intel_pt_context_switch(pt, event, sample);
90e457f7
AH
1581
1582 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
1583 perf_event__name(event->header.type), event->header.type,
1584 sample->cpu, sample->time, timestamp);
1585
1586 return err;
1587}
1588
1589static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
1590{
1591 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1592 auxtrace);
1593 int ret;
1594
1595 if (dump_trace)
1596 return 0;
1597
1598 if (!tool->ordered_events)
1599 return -EINVAL;
1600
1601 ret = intel_pt_update_queues(pt);
1602 if (ret < 0)
1603 return ret;
1604
1605 if (pt->timeless_decoding)
1606 return intel_pt_process_timeless_queues(pt, -1,
1607 MAX_TIMESTAMP - 1);
1608
1609 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
1610}
1611
1612static void intel_pt_free_events(struct perf_session *session)
1613{
1614 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1615 auxtrace);
1616 struct auxtrace_queues *queues = &pt->queues;
1617 unsigned int i;
1618
1619 for (i = 0; i < queues->nr_queues; i++) {
1620 intel_pt_free_queue(queues->queue_array[i].priv);
1621 queues->queue_array[i].priv = NULL;
1622 }
1623 intel_pt_log_disable();
1624 auxtrace_queues__free(queues);
1625}
1626
1627static void intel_pt_free(struct perf_session *session)
1628{
1629 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1630 auxtrace);
1631
1632 auxtrace_heap__free(&pt->heap);
1633 intel_pt_free_events(session);
1634 session->auxtrace = NULL;
1635 thread__delete(pt->unknown_thread);
1636 free(pt);
1637}
1638
1639static int intel_pt_process_auxtrace_event(struct perf_session *session,
1640 union perf_event *event,
1641 struct perf_tool *tool __maybe_unused)
1642{
1643 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1644 auxtrace);
1645
1646 if (pt->sampling_mode)
1647 return 0;
1648
1649 if (!pt->data_queued) {
1650 struct auxtrace_buffer *buffer;
1651 off_t data_offset;
1652 int fd = perf_data_file__fd(session->file);
1653 int err;
1654
1655 if (perf_data_file__is_pipe(session->file)) {
1656 data_offset = 0;
1657 } else {
1658 data_offset = lseek(fd, 0, SEEK_CUR);
1659 if (data_offset == -1)
1660 return -errno;
1661 }
1662
1663 err = auxtrace_queues__add_event(&pt->queues, session, event,
1664 data_offset, &buffer);
1665 if (err)
1666 return err;
1667
1668 /* Dump here now we have copied a piped trace out of the pipe */
1669 if (dump_trace) {
1670 if (auxtrace_buffer__get_data(buffer, fd)) {
1671 intel_pt_dump_event(pt, buffer->data,
1672 buffer->size);
1673 auxtrace_buffer__put_data(buffer);
1674 }
1675 }
1676 }
1677
1678 return 0;
1679}
1680
1681struct intel_pt_synth {
1682 struct perf_tool dummy_tool;
1683 struct perf_session *session;
1684};
1685
1686static int intel_pt_event_synth(struct perf_tool *tool,
1687 union perf_event *event,
1688 struct perf_sample *sample __maybe_unused,
1689 struct machine *machine __maybe_unused)
1690{
1691 struct intel_pt_synth *intel_pt_synth =
1692 container_of(tool, struct intel_pt_synth, dummy_tool);
1693
1694 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
1695 NULL);
1696}
1697
1698static int intel_pt_synth_event(struct perf_session *session,
1699 struct perf_event_attr *attr, u64 id)
1700{
1701 struct intel_pt_synth intel_pt_synth;
1702
1703 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
1704 intel_pt_synth.session = session;
1705
1706 return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
1707 &id, intel_pt_event_synth);
1708}
1709
1710static int intel_pt_synth_events(struct intel_pt *pt,
1711 struct perf_session *session)
1712{
1713 struct perf_evlist *evlist = session->evlist;
1714 struct perf_evsel *evsel;
1715 struct perf_event_attr attr;
1716 bool found = false;
1717 u64 id;
1718 int err;
1719
1720 evlist__for_each(evlist, evsel) {
1721 if (evsel->attr.type == pt->pmu_type && evsel->ids) {
1722 found = true;
1723 break;
1724 }
1725 }
1726
1727 if (!found) {
1728 pr_debug("There are no selected events with Intel Processor Trace data\n");
1729 return 0;
1730 }
1731
1732 memset(&attr, 0, sizeof(struct perf_event_attr));
1733 attr.size = sizeof(struct perf_event_attr);
1734 attr.type = PERF_TYPE_HARDWARE;
1735 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
1736 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1737 PERF_SAMPLE_PERIOD;
1738 if (pt->timeless_decoding)
1739 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1740 else
1741 attr.sample_type |= PERF_SAMPLE_TIME;
1742 if (!pt->per_cpu_mmaps)
1743 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
1744 attr.exclude_user = evsel->attr.exclude_user;
1745 attr.exclude_kernel = evsel->attr.exclude_kernel;
1746 attr.exclude_hv = evsel->attr.exclude_hv;
1747 attr.exclude_host = evsel->attr.exclude_host;
1748 attr.exclude_guest = evsel->attr.exclude_guest;
1749 attr.sample_id_all = evsel->attr.sample_id_all;
1750 attr.read_format = evsel->attr.read_format;
1751
1752 id = evsel->id[0] + 1000000000;
1753 if (!id)
1754 id = 1;
1755
1756 if (pt->synth_opts.instructions) {
1757 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1758 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
1759 attr.sample_period =
1760 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
1761 else
1762 attr.sample_period = pt->synth_opts.period;
1763 pt->instructions_sample_period = attr.sample_period;
1764 if (pt->synth_opts.callchain)
1765 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
1766 pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1767 id, (u64)attr.sample_type);
1768 err = intel_pt_synth_event(session, &attr, id);
1769 if (err) {
1770 pr_err("%s: failed to synthesize 'instructions' event type\n",
1771 __func__);
1772 return err;
1773 }
1774 pt->sample_instructions = true;
1775 pt->instructions_sample_type = attr.sample_type;
1776 pt->instructions_id = id;
1777 id += 1;
1778 }
1779
1780 if (pt->synth_opts.transactions) {
1781 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1782 attr.sample_period = 1;
1783 if (pt->synth_opts.callchain)
1784 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
1785 pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1786 id, (u64)attr.sample_type);
1787 err = intel_pt_synth_event(session, &attr, id);
1788 if (err) {
1789 pr_err("%s: failed to synthesize 'transactions' event type\n",
1790 __func__);
1791 return err;
1792 }
1793 pt->sample_transactions = true;
1794 pt->transactions_id = id;
1795 id += 1;
1796 evlist__for_each(evlist, evsel) {
1797 if (evsel->id && evsel->id[0] == pt->transactions_id) {
1798 if (evsel->name)
1799 zfree(&evsel->name);
1800 evsel->name = strdup("transactions");
1801 break;
1802 }
1803 }
1804 }
1805
1806 if (pt->synth_opts.branches) {
1807 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1808 attr.sample_period = 1;
1809 attr.sample_type |= PERF_SAMPLE_ADDR;
1810 attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
1811 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1812 id, (u64)attr.sample_type);
1813 err = intel_pt_synth_event(session, &attr, id);
1814 if (err) {
1815 pr_err("%s: failed to synthesize 'branches' event type\n",
1816 __func__);
1817 return err;
1818 }
1819 pt->sample_branches = true;
1820 pt->branches_sample_type = attr.sample_type;
1821 pt->branches_id = id;
1822 }
1823
1824 pt->synth_needs_swap = evsel->needs_swap;
1825
1826 return 0;
1827}
1828
1829static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
1830{
1831 struct perf_evsel *evsel;
1832
1833 evlist__for_each_reverse(evlist, evsel) {
1834 const char *name = perf_evsel__name(evsel);
1835
1836 if (!strcmp(name, "sched:sched_switch"))
1837 return evsel;
1838 }
1839
1840 return NULL;
1841}
1842
86c27869
AH
1843static bool intel_pt_find_switch(struct perf_evlist *evlist)
1844{
1845 struct perf_evsel *evsel;
1846
1847 evlist__for_each(evlist, evsel) {
1848 if (evsel->attr.context_switch)
1849 return true;
1850 }
1851
1852 return false;
1853}
1854
90e457f7 1855static const char * const intel_pt_info_fmts[] = {
11fa7cb8
AH
1856 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
1857 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
1858 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
1859 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
1860 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
1861 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
1862 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
1863 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
1864 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
1865 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
1866 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
1867 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
1868 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
1869 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
90e457f7
AH
1870};
1871
1872static void intel_pt_print_info(u64 *arr, int start, int finish)
1873{
1874 int i;
1875
1876 if (!dump_trace)
1877 return;
1878
1879 for (i = start; i <= finish; i++)
1880 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
1881}
1882
1883int intel_pt_process_auxtrace_info(union perf_event *event,
1884 struct perf_session *session)
1885{
1886 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1887 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
1888 struct intel_pt *pt;
1889 int err;
1890
1891 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
1892 min_sz)
1893 return -EINVAL;
1894
1895 pt = zalloc(sizeof(struct intel_pt));
1896 if (!pt)
1897 return -ENOMEM;
1898
1899 err = auxtrace_queues__init(&pt->queues);
1900 if (err)
1901 goto err_free;
1902
1903 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
1904
1905 pt->session = session;
1906 pt->machine = &session->machines.host; /* No kvm support */
1907 pt->auxtrace_type = auxtrace_info->type;
1908 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
1909 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
1910 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
1911 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
1912 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
1913 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
1914 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
1915 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
1916 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
1917 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
1918 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
1919 INTEL_PT_PER_CPU_MMAPS);
1920
11fa7cb8
AH
1921 if (auxtrace_info->header.size >= sizeof(struct auxtrace_info_event) +
1922 (sizeof(u64) * INTEL_PT_CYC_BIT)) {
1923 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
1924 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
1925 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
1926 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
1927 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
1928 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
1929 INTEL_PT_CYC_BIT);
1930 }
1931
90e457f7
AH
1932 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
1933 pt->have_tsc = intel_pt_have_tsc(pt);
1934 pt->sampling_mode = false;
1935 pt->est_tsc = !pt->timeless_decoding;
1936
1937 pt->unknown_thread = thread__new(999999999, 999999999);
1938 if (!pt->unknown_thread) {
1939 err = -ENOMEM;
1940 goto err_free_queues;
1941 }
1942 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
1943 if (err)
1944 goto err_delete_thread;
1945 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
1946 err = -ENOMEM;
1947 goto err_delete_thread;
1948 }
1949
1950 pt->auxtrace.process_event = intel_pt_process_event;
1951 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
1952 pt->auxtrace.flush_events = intel_pt_flush;
1953 pt->auxtrace.free_events = intel_pt_free_events;
1954 pt->auxtrace.free = intel_pt_free;
1955 session->auxtrace = &pt->auxtrace;
1956
1957 if (dump_trace)
1958 return 0;
1959
1960 if (pt->have_sched_switch == 1) {
1961 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
1962 if (!pt->switch_evsel) {
1963 pr_err("%s: missing sched_switch event\n", __func__);
1964 goto err_delete_thread;
1965 }
86c27869
AH
1966 } else if (pt->have_sched_switch == 2 &&
1967 !intel_pt_find_switch(session->evlist)) {
1968 pr_err("%s: missing context_switch attribute flag\n", __func__);
1969 goto err_delete_thread;
90e457f7
AH
1970 }
1971
1972 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1973 pt->synth_opts = *session->itrace_synth_opts;
1974 } else {
1975 itrace_synth_opts__set_default(&pt->synth_opts);
1976 if (use_browser != -1) {
1977 pt->synth_opts.branches = false;
1978 pt->synth_opts.callchain = true;
1979 }
1980 }
1981
1982 if (pt->synth_opts.log)
1983 intel_pt_log_enable();
1984
1985 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
1986 if (pt->tc.time_mult) {
1987 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
1988
1989 pt->max_non_turbo_ratio = (tsc_freq + 50000000) / 100000000;
1990 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
1991 intel_pt_log("Maximum non-turbo ratio %u\n",
1992 pt->max_non_turbo_ratio);
1993 }
1994
1995 if (pt->synth_opts.calls)
1996 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
1997 PERF_IP_FLAG_TRACE_END;
1998 if (pt->synth_opts.returns)
1999 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2000 PERF_IP_FLAG_TRACE_BEGIN;
2001
2002 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2003 symbol_conf.use_callchain = true;
2004 if (callchain_register_param(&callchain_param) < 0) {
2005 symbol_conf.use_callchain = false;
2006 pt->synth_opts.callchain = false;
2007 }
2008 }
2009
2010 err = intel_pt_synth_events(pt, session);
2011 if (err)
2012 goto err_delete_thread;
2013
2014 err = auxtrace_queues__process_index(&pt->queues, session);
2015 if (err)
2016 goto err_delete_thread;
2017
2018 if (pt->queues.populated)
2019 pt->data_queued = true;
2020
2021 if (pt->timeless_decoding)
2022 pr_debug2("Intel PT decoding without timestamps\n");
2023
2024 return 0;
2025
2026err_delete_thread:
2027 thread__delete(pt->unknown_thread);
2028err_free_queues:
2029 intel_pt_log_disable();
2030 auxtrace_queues__free(&pt->queues);
2031 session->auxtrace = NULL;
2032err_free:
2033 free(pt);
2034 return err;
2035}
This page took 0.272595 seconds and 5 git commands to generate.