2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 #include <linux/kernel.h>
20 #include <linux/types.h>
34 #include "thread-stack.h"
36 #include "callchain.h"
43 #include "intel-pt-decoder/intel-pt-log.h"
44 #include "intel-pt-decoder/intel-pt-decoder.h"
45 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
46 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
48 #define MAX_TIMESTAMP (~0ULL)
51 struct auxtrace auxtrace
;
52 struct auxtrace_queues queues
;
53 struct auxtrace_heap heap
;
55 struct perf_session
*session
;
56 struct machine
*machine
;
57 struct perf_evsel
*switch_evsel
;
58 struct thread
*unknown_thread
;
59 bool timeless_decoding
;
68 int have_sched_switch
;
74 struct perf_tsc_conversion tc
;
75 bool cap_user_time_zero
;
77 struct itrace_synth_opts synth_opts
;
79 bool sample_instructions
;
80 u64 instructions_sample_type
;
81 u64 instructions_sample_period
;
86 u64 branches_sample_type
;
89 bool sample_transactions
;
90 u64 transactions_sample_type
;
93 bool synth_needs_swap
;
102 unsigned max_non_turbo_ratio
;
104 unsigned long num_events
;
108 INTEL_PT_SS_NOT_TRACING
,
111 INTEL_PT_SS_EXPECTING_SWITCH_EVENT
,
112 INTEL_PT_SS_EXPECTING_SWITCH_IP
,
115 struct intel_pt_queue
{
117 unsigned int queue_nr
;
118 struct auxtrace_buffer
*buffer
;
120 const struct intel_pt_state
*state
;
121 struct ip_callchain
*chain
;
122 struct branch_stack
*last_branch
;
123 struct branch_stack
*last_branch_rb
;
124 size_t last_branch_pos
;
125 union perf_event
*event_buf
;
128 bool step_through_buffers
;
129 bool use_buffer_pid_tid
;
134 struct thread
*thread
;
144 static void intel_pt_dump(struct intel_pt
*pt __maybe_unused
,
145 unsigned char *buf
, size_t len
)
147 struct intel_pt_pkt packet
;
150 char desc
[INTEL_PT_PKT_DESC_MAX
];
151 const char *color
= PERF_COLOR_BLUE
;
153 color_fprintf(stdout
, color
,
154 ". ... Intel Processor Trace data: size %zu bytes\n",
158 ret
= intel_pt_get_packet(buf
, len
, &packet
);
164 color_fprintf(stdout
, color
, " %08x: ", pos
);
165 for (i
= 0; i
< pkt_len
; i
++)
166 color_fprintf(stdout
, color
, " %02x", buf
[i
]);
168 color_fprintf(stdout
, color
, " ");
170 ret
= intel_pt_pkt_desc(&packet
, desc
,
171 INTEL_PT_PKT_DESC_MAX
);
173 color_fprintf(stdout
, color
, " %s\n", desc
);
175 color_fprintf(stdout
, color
, " Bad packet!\n");
183 static void intel_pt_dump_event(struct intel_pt
*pt
, unsigned char *buf
,
187 intel_pt_dump(pt
, buf
, len
);
190 static int intel_pt_do_fix_overlap(struct intel_pt
*pt
, struct auxtrace_buffer
*a
,
191 struct auxtrace_buffer
*b
)
195 start
= intel_pt_find_overlap(a
->data
, a
->size
, b
->data
, b
->size
,
199 b
->use_size
= b
->data
+ b
->size
- start
;
204 static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue
*ptq
,
205 struct auxtrace_queue
*queue
,
206 struct auxtrace_buffer
*buffer
)
208 if (queue
->cpu
== -1 && buffer
->cpu
!= -1)
209 ptq
->cpu
= buffer
->cpu
;
211 ptq
->pid
= buffer
->pid
;
212 ptq
->tid
= buffer
->tid
;
214 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
215 ptq
->queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
217 thread__zput(ptq
->thread
);
219 if (ptq
->tid
!= -1) {
221 ptq
->thread
= machine__findnew_thread(ptq
->pt
->machine
,
225 ptq
->thread
= machine__find_thread(ptq
->pt
->machine
, -1,
230 /* This function assumes data is processed sequentially only */
231 static int intel_pt_get_trace(struct intel_pt_buffer
*b
, void *data
)
233 struct intel_pt_queue
*ptq
= data
;
234 struct auxtrace_buffer
*buffer
= ptq
->buffer
, *old_buffer
= buffer
;
235 struct auxtrace_queue
*queue
;
242 queue
= &ptq
->pt
->queues
.queue_array
[ptq
->queue_nr
];
244 buffer
= auxtrace_buffer__next(queue
, buffer
);
247 auxtrace_buffer__drop_data(old_buffer
);
252 ptq
->buffer
= buffer
;
255 int fd
= perf_data_file__fd(ptq
->pt
->session
->file
);
257 buffer
->data
= auxtrace_buffer__get_data(buffer
, fd
);
262 if (ptq
->pt
->snapshot_mode
&& !buffer
->consecutive
&& old_buffer
&&
263 intel_pt_do_fix_overlap(ptq
->pt
, old_buffer
, buffer
))
267 auxtrace_buffer__drop_data(old_buffer
);
269 if (buffer
->use_data
) {
270 b
->len
= buffer
->use_size
;
271 b
->buf
= buffer
->use_data
;
273 b
->len
= buffer
->size
;
274 b
->buf
= buffer
->data
;
276 b
->ref_timestamp
= buffer
->reference
;
278 if (!old_buffer
|| ptq
->pt
->sampling_mode
|| (ptq
->pt
->snapshot_mode
&&
279 !buffer
->consecutive
)) {
280 b
->consecutive
= false;
281 b
->trace_nr
= buffer
->buffer_nr
+ 1;
283 b
->consecutive
= true;
286 if (ptq
->use_buffer_pid_tid
&& (ptq
->pid
!= buffer
->pid
||
287 ptq
->tid
!= buffer
->tid
))
288 intel_pt_use_buffer_pid_tid(ptq
, queue
, buffer
);
290 if (ptq
->step_through_buffers
)
294 return intel_pt_get_trace(b
, data
);
299 struct intel_pt_cache_entry
{
300 struct auxtrace_cache_entry entry
;
303 enum intel_pt_insn_op op
;
304 enum intel_pt_insn_branch branch
;
309 static int intel_pt_config_div(const char *var
, const char *value
, void *data
)
314 if (!strcmp(var
, "intel-pt.cache-divisor")) {
315 val
= strtol(value
, NULL
, 0);
316 if (val
> 0 && val
<= INT_MAX
)
323 static int intel_pt_cache_divisor(void)
330 perf_config(intel_pt_config_div
, &d
);
338 static unsigned int intel_pt_cache_size(struct dso
*dso
,
339 struct machine
*machine
)
343 size
= dso__data_size(dso
, machine
);
344 size
/= intel_pt_cache_divisor();
347 if (size
> (1 << 21))
349 return 32 - __builtin_clz(size
);
352 static struct auxtrace_cache
*intel_pt_cache(struct dso
*dso
,
353 struct machine
*machine
)
355 struct auxtrace_cache
*c
;
358 if (dso
->auxtrace_cache
)
359 return dso
->auxtrace_cache
;
361 bits
= intel_pt_cache_size(dso
, machine
);
363 /* Ignoring cache creation failure */
364 c
= auxtrace_cache__new(bits
, sizeof(struct intel_pt_cache_entry
), 200);
366 dso
->auxtrace_cache
= c
;
371 static int intel_pt_cache_add(struct dso
*dso
, struct machine
*machine
,
372 u64 offset
, u64 insn_cnt
, u64 byte_cnt
,
373 struct intel_pt_insn
*intel_pt_insn
)
375 struct auxtrace_cache
*c
= intel_pt_cache(dso
, machine
);
376 struct intel_pt_cache_entry
*e
;
382 e
= auxtrace_cache__alloc_entry(c
);
386 e
->insn_cnt
= insn_cnt
;
387 e
->byte_cnt
= byte_cnt
;
388 e
->op
= intel_pt_insn
->op
;
389 e
->branch
= intel_pt_insn
->branch
;
390 e
->length
= intel_pt_insn
->length
;
391 e
->rel
= intel_pt_insn
->rel
;
393 err
= auxtrace_cache__add(c
, offset
, &e
->entry
);
395 auxtrace_cache__free_entry(c
, e
);
400 static struct intel_pt_cache_entry
*
401 intel_pt_cache_lookup(struct dso
*dso
, struct machine
*machine
, u64 offset
)
403 struct auxtrace_cache
*c
= intel_pt_cache(dso
, machine
);
408 return auxtrace_cache__lookup(dso
->auxtrace_cache
, offset
);
411 static int intel_pt_walk_next_insn(struct intel_pt_insn
*intel_pt_insn
,
412 uint64_t *insn_cnt_ptr
, uint64_t *ip
,
413 uint64_t to_ip
, uint64_t max_insn_cnt
,
416 struct intel_pt_queue
*ptq
= data
;
417 struct machine
*machine
= ptq
->pt
->machine
;
418 struct thread
*thread
;
419 struct addr_location al
;
420 unsigned char buf
[1024];
425 u64 offset
, start_offset
, start_ip
;
429 if (to_ip
&& *ip
== to_ip
)
432 bufsz
= intel_pt_insn_max_size();
434 if (*ip
>= ptq
->pt
->kernel_start
)
435 cpumode
= PERF_RECORD_MISC_KERNEL
;
437 cpumode
= PERF_RECORD_MISC_USER
;
439 thread
= ptq
->thread
;
441 if (cpumode
!= PERF_RECORD_MISC_KERNEL
)
443 thread
= ptq
->pt
->unknown_thread
;
447 thread__find_addr_map(thread
, cpumode
, MAP__FUNCTION
, *ip
, &al
);
448 if (!al
.map
|| !al
.map
->dso
)
451 if (al
.map
->dso
->data
.status
== DSO_DATA_STATUS_ERROR
&&
452 dso__data_status_seen(al
.map
->dso
,
453 DSO_DATA_STATUS_SEEN_ITRACE
))
456 offset
= al
.map
->map_ip(al
.map
, *ip
);
458 if (!to_ip
&& one_map
) {
459 struct intel_pt_cache_entry
*e
;
461 e
= intel_pt_cache_lookup(al
.map
->dso
, machine
, offset
);
463 (!max_insn_cnt
|| e
->insn_cnt
<= max_insn_cnt
)) {
464 *insn_cnt_ptr
= e
->insn_cnt
;
466 intel_pt_insn
->op
= e
->op
;
467 intel_pt_insn
->branch
= e
->branch
;
468 intel_pt_insn
->length
= e
->length
;
469 intel_pt_insn
->rel
= e
->rel
;
470 intel_pt_log_insn_no_data(intel_pt_insn
, *ip
);
475 start_offset
= offset
;
478 /* Load maps to ensure dso->is_64_bit has been updated */
479 map__load(al
.map
, machine
->symbol_filter
);
481 x86_64
= al
.map
->dso
->is_64_bit
;
484 len
= dso__data_read_offset(al
.map
->dso
, machine
,
489 if (intel_pt_get_insn(buf
, len
, x86_64
, intel_pt_insn
))
492 intel_pt_log_insn(intel_pt_insn
, *ip
);
496 if (intel_pt_insn
->branch
!= INTEL_PT_BR_NO_BRANCH
)
499 if (max_insn_cnt
&& insn_cnt
>= max_insn_cnt
)
502 *ip
+= intel_pt_insn
->length
;
504 if (to_ip
&& *ip
== to_ip
)
507 if (*ip
>= al
.map
->end
)
510 offset
+= intel_pt_insn
->length
;
515 *insn_cnt_ptr
= insn_cnt
;
521 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
525 struct intel_pt_cache_entry
*e
;
527 e
= intel_pt_cache_lookup(al
.map
->dso
, machine
, start_offset
);
532 /* Ignore cache errors */
533 intel_pt_cache_add(al
.map
->dso
, machine
, start_offset
, insn_cnt
,
534 *ip
- start_ip
, intel_pt_insn
);
539 *insn_cnt_ptr
= insn_cnt
;
543 static bool intel_pt_get_config(struct intel_pt
*pt
,
544 struct perf_event_attr
*attr
, u64
*config
)
546 if (attr
->type
== pt
->pmu_type
) {
548 *config
= attr
->config
;
555 static bool intel_pt_exclude_kernel(struct intel_pt
*pt
)
557 struct perf_evsel
*evsel
;
559 evlist__for_each(pt
->session
->evlist
, evsel
) {
560 if (intel_pt_get_config(pt
, &evsel
->attr
, NULL
) &&
561 !evsel
->attr
.exclude_kernel
)
567 static bool intel_pt_return_compression(struct intel_pt
*pt
)
569 struct perf_evsel
*evsel
;
572 if (!pt
->noretcomp_bit
)
575 evlist__for_each(pt
->session
->evlist
, evsel
) {
576 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
) &&
577 (config
& pt
->noretcomp_bit
))
583 static unsigned int intel_pt_mtc_period(struct intel_pt
*pt
)
585 struct perf_evsel
*evsel
;
589 if (!pt
->mtc_freq_bits
)
592 for (shift
= 0, config
= pt
->mtc_freq_bits
; !(config
& 1); shift
++)
595 evlist__for_each(pt
->session
->evlist
, evsel
) {
596 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
))
597 return (config
& pt
->mtc_freq_bits
) >> shift
;
602 static bool intel_pt_timeless_decoding(struct intel_pt
*pt
)
604 struct perf_evsel
*evsel
;
605 bool timeless_decoding
= true;
608 if (!pt
->tsc_bit
|| !pt
->cap_user_time_zero
)
611 evlist__for_each(pt
->session
->evlist
, evsel
) {
612 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_TIME
))
614 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
)) {
615 if (config
& pt
->tsc_bit
)
616 timeless_decoding
= false;
621 return timeless_decoding
;
624 static bool intel_pt_tracing_kernel(struct intel_pt
*pt
)
626 struct perf_evsel
*evsel
;
628 evlist__for_each(pt
->session
->evlist
, evsel
) {
629 if (intel_pt_get_config(pt
, &evsel
->attr
, NULL
) &&
630 !evsel
->attr
.exclude_kernel
)
636 static bool intel_pt_have_tsc(struct intel_pt
*pt
)
638 struct perf_evsel
*evsel
;
639 bool have_tsc
= false;
645 evlist__for_each(pt
->session
->evlist
, evsel
) {
646 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
)) {
647 if (config
& pt
->tsc_bit
)
656 static u64
intel_pt_ns_to_ticks(const struct intel_pt
*pt
, u64 ns
)
660 quot
= ns
/ pt
->tc
.time_mult
;
661 rem
= ns
% pt
->tc
.time_mult
;
662 return (quot
<< pt
->tc
.time_shift
) + (rem
<< pt
->tc
.time_shift
) /
666 static struct intel_pt_queue
*intel_pt_alloc_queue(struct intel_pt
*pt
,
667 unsigned int queue_nr
)
669 struct intel_pt_params params
= { .get_trace
= 0, };
670 struct intel_pt_queue
*ptq
;
672 ptq
= zalloc(sizeof(struct intel_pt_queue
));
676 if (pt
->synth_opts
.callchain
) {
677 size_t sz
= sizeof(struct ip_callchain
);
679 sz
+= pt
->synth_opts
.callchain_sz
* sizeof(u64
);
680 ptq
->chain
= zalloc(sz
);
685 if (pt
->synth_opts
.last_branch
) {
686 size_t sz
= sizeof(struct branch_stack
);
688 sz
+= pt
->synth_opts
.last_branch_sz
*
689 sizeof(struct branch_entry
);
690 ptq
->last_branch
= zalloc(sz
);
691 if (!ptq
->last_branch
)
693 ptq
->last_branch_rb
= zalloc(sz
);
694 if (!ptq
->last_branch_rb
)
698 ptq
->event_buf
= malloc(PERF_SAMPLE_MAX_SIZE
);
703 ptq
->queue_nr
= queue_nr
;
704 ptq
->exclude_kernel
= intel_pt_exclude_kernel(pt
);
710 params
.get_trace
= intel_pt_get_trace
;
711 params
.walk_insn
= intel_pt_walk_next_insn
;
713 params
.return_compression
= intel_pt_return_compression(pt
);
714 params
.max_non_turbo_ratio
= pt
->max_non_turbo_ratio
;
715 params
.mtc_period
= intel_pt_mtc_period(pt
);
716 params
.tsc_ctc_ratio_n
= pt
->tsc_ctc_ratio_n
;
717 params
.tsc_ctc_ratio_d
= pt
->tsc_ctc_ratio_d
;
719 if (pt
->synth_opts
.instructions
) {
720 if (pt
->synth_opts
.period
) {
721 switch (pt
->synth_opts
.period_type
) {
722 case PERF_ITRACE_PERIOD_INSTRUCTIONS
:
724 INTEL_PT_PERIOD_INSTRUCTIONS
;
725 params
.period
= pt
->synth_opts
.period
;
727 case PERF_ITRACE_PERIOD_TICKS
:
728 params
.period_type
= INTEL_PT_PERIOD_TICKS
;
729 params
.period
= pt
->synth_opts
.period
;
731 case PERF_ITRACE_PERIOD_NANOSECS
:
732 params
.period_type
= INTEL_PT_PERIOD_TICKS
;
733 params
.period
= intel_pt_ns_to_ticks(pt
,
734 pt
->synth_opts
.period
);
741 if (!params
.period
) {
742 params
.period_type
= INTEL_PT_PERIOD_INSTRUCTIONS
;
747 ptq
->decoder
= intel_pt_decoder_new(¶ms
);
754 zfree(&ptq
->event_buf
);
755 zfree(&ptq
->last_branch
);
756 zfree(&ptq
->last_branch_rb
);
762 static void intel_pt_free_queue(void *priv
)
764 struct intel_pt_queue
*ptq
= priv
;
768 thread__zput(ptq
->thread
);
769 intel_pt_decoder_free(ptq
->decoder
);
770 zfree(&ptq
->event_buf
);
771 zfree(&ptq
->last_branch
);
772 zfree(&ptq
->last_branch_rb
);
777 static void intel_pt_set_pid_tid_cpu(struct intel_pt
*pt
,
778 struct auxtrace_queue
*queue
)
780 struct intel_pt_queue
*ptq
= queue
->priv
;
782 if (queue
->tid
== -1 || pt
->have_sched_switch
) {
783 ptq
->tid
= machine__get_current_tid(pt
->machine
, ptq
->cpu
);
784 thread__zput(ptq
->thread
);
787 if (!ptq
->thread
&& ptq
->tid
!= -1)
788 ptq
->thread
= machine__find_thread(pt
->machine
, -1, ptq
->tid
);
791 ptq
->pid
= ptq
->thread
->pid_
;
792 if (queue
->cpu
== -1)
793 ptq
->cpu
= ptq
->thread
->cpu
;
797 static void intel_pt_sample_flags(struct intel_pt_queue
*ptq
)
799 if (ptq
->state
->flags
& INTEL_PT_ABORT_TX
) {
800 ptq
->flags
= PERF_IP_FLAG_BRANCH
| PERF_IP_FLAG_TX_ABORT
;
801 } else if (ptq
->state
->flags
& INTEL_PT_ASYNC
) {
802 if (ptq
->state
->to_ip
)
803 ptq
->flags
= PERF_IP_FLAG_BRANCH
| PERF_IP_FLAG_CALL
|
805 PERF_IP_FLAG_INTERRUPT
;
807 ptq
->flags
= PERF_IP_FLAG_BRANCH
|
808 PERF_IP_FLAG_TRACE_END
;
811 if (ptq
->state
->from_ip
)
812 ptq
->flags
= intel_pt_insn_type(ptq
->state
->insn_op
);
814 ptq
->flags
= PERF_IP_FLAG_BRANCH
|
815 PERF_IP_FLAG_TRACE_BEGIN
;
816 if (ptq
->state
->flags
& INTEL_PT_IN_TX
)
817 ptq
->flags
|= PERF_IP_FLAG_IN_TX
;
818 ptq
->insn_len
= ptq
->state
->insn_len
;
822 static int intel_pt_setup_queue(struct intel_pt
*pt
,
823 struct auxtrace_queue
*queue
,
824 unsigned int queue_nr
)
826 struct intel_pt_queue
*ptq
= queue
->priv
;
828 if (list_empty(&queue
->head
))
832 ptq
= intel_pt_alloc_queue(pt
, queue_nr
);
837 if (queue
->cpu
!= -1)
838 ptq
->cpu
= queue
->cpu
;
839 ptq
->tid
= queue
->tid
;
841 if (pt
->sampling_mode
) {
842 if (pt
->timeless_decoding
)
843 ptq
->step_through_buffers
= true;
844 if (pt
->timeless_decoding
|| !pt
->have_sched_switch
)
845 ptq
->use_buffer_pid_tid
= true;
851 ptq
->switch_state
!= INTEL_PT_SS_EXPECTING_SWITCH_EVENT
)) {
852 const struct intel_pt_state
*state
;
855 if (pt
->timeless_decoding
)
858 intel_pt_log("queue %u getting timestamp\n", queue_nr
);
859 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
860 queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
862 state
= intel_pt_decode(ptq
->decoder
);
864 if (state
->err
== INTEL_PT_ERR_NODATA
) {
865 intel_pt_log("queue %u has no timestamp\n",
871 if (state
->timestamp
)
875 ptq
->timestamp
= state
->timestamp
;
876 intel_pt_log("queue %u timestamp 0x%" PRIx64
"\n",
877 queue_nr
, ptq
->timestamp
);
879 ptq
->have_sample
= true;
880 intel_pt_sample_flags(ptq
);
881 ret
= auxtrace_heap__add(&pt
->heap
, queue_nr
, ptq
->timestamp
);
890 static int intel_pt_setup_queues(struct intel_pt
*pt
)
895 for (i
= 0; i
< pt
->queues
.nr_queues
; i
++) {
896 ret
= intel_pt_setup_queue(pt
, &pt
->queues
.queue_array
[i
], i
);
903 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue
*ptq
)
905 struct branch_stack
*bs_src
= ptq
->last_branch_rb
;
906 struct branch_stack
*bs_dst
= ptq
->last_branch
;
909 bs_dst
->nr
= bs_src
->nr
;
914 nr
= ptq
->pt
->synth_opts
.last_branch_sz
- ptq
->last_branch_pos
;
915 memcpy(&bs_dst
->entries
[0],
916 &bs_src
->entries
[ptq
->last_branch_pos
],
917 sizeof(struct branch_entry
) * nr
);
919 if (bs_src
->nr
>= ptq
->pt
->synth_opts
.last_branch_sz
) {
920 memcpy(&bs_dst
->entries
[nr
],
922 sizeof(struct branch_entry
) * ptq
->last_branch_pos
);
926 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue
*ptq
)
928 ptq
->last_branch_pos
= 0;
929 ptq
->last_branch_rb
->nr
= 0;
932 static void intel_pt_update_last_branch_rb(struct intel_pt_queue
*ptq
)
934 const struct intel_pt_state
*state
= ptq
->state
;
935 struct branch_stack
*bs
= ptq
->last_branch_rb
;
936 struct branch_entry
*be
;
938 if (!ptq
->last_branch_pos
)
939 ptq
->last_branch_pos
= ptq
->pt
->synth_opts
.last_branch_sz
;
941 ptq
->last_branch_pos
-= 1;
943 be
= &bs
->entries
[ptq
->last_branch_pos
];
944 be
->from
= state
->from_ip
;
945 be
->to
= state
->to_ip
;
946 be
->flags
.abort
= !!(state
->flags
& INTEL_PT_ABORT_TX
);
947 be
->flags
.in_tx
= !!(state
->flags
& INTEL_PT_IN_TX
);
948 /* No support for mispredict */
949 be
->flags
.mispred
= ptq
->pt
->mispred_all
;
951 if (bs
->nr
< ptq
->pt
->synth_opts
.last_branch_sz
)
955 static int intel_pt_inject_event(union perf_event
*event
,
956 struct perf_sample
*sample
, u64 type
,
959 event
->header
.size
= perf_event__sample_event_size(sample
, type
, 0);
960 return perf_event__synthesize_sample(event
, type
, 0, sample
, swapped
);
963 static int intel_pt_synth_branch_sample(struct intel_pt_queue
*ptq
)
966 struct intel_pt
*pt
= ptq
->pt
;
967 union perf_event
*event
= ptq
->event_buf
;
968 struct perf_sample sample
= { .ip
= 0, };
969 struct dummy_branch_stack
{
971 struct branch_entry entries
;
974 if (pt
->branches_filter
&& !(pt
->branches_filter
& ptq
->flags
))
977 if (pt
->synth_opts
.initial_skip
&&
978 pt
->num_events
++ < pt
->synth_opts
.initial_skip
)
981 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
982 event
->sample
.header
.misc
= PERF_RECORD_MISC_USER
;
983 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
985 if (!pt
->timeless_decoding
)
986 sample
.time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
988 sample
.cpumode
= PERF_RECORD_MISC_USER
;
989 sample
.ip
= ptq
->state
->from_ip
;
990 sample
.pid
= ptq
->pid
;
991 sample
.tid
= ptq
->tid
;
992 sample
.addr
= ptq
->state
->to_ip
;
993 sample
.id
= ptq
->pt
->branches_id
;
994 sample
.stream_id
= ptq
->pt
->branches_id
;
996 sample
.cpu
= ptq
->cpu
;
997 sample
.flags
= ptq
->flags
;
998 sample
.insn_len
= ptq
->insn_len
;
1001 * perf report cannot handle events without a branch stack when using
1002 * SORT_MODE__BRANCH so make a dummy one.
1004 if (pt
->synth_opts
.last_branch
&& sort__mode
== SORT_MODE__BRANCH
) {
1005 dummy_bs
= (struct dummy_branch_stack
){
1012 sample
.branch_stack
= (struct branch_stack
*)&dummy_bs
;
1015 if (pt
->synth_opts
.inject
) {
1016 ret
= intel_pt_inject_event(event
, &sample
,
1017 pt
->branches_sample_type
,
1018 pt
->synth_needs_swap
);
1023 ret
= perf_session__deliver_synth_event(pt
->session
, event
, &sample
);
1025 pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
1031 static int intel_pt_synth_instruction_sample(struct intel_pt_queue
*ptq
)
1034 struct intel_pt
*pt
= ptq
->pt
;
1035 union perf_event
*event
= ptq
->event_buf
;
1036 struct perf_sample sample
= { .ip
= 0, };
1038 if (pt
->synth_opts
.initial_skip
&&
1039 pt
->num_events
++ < pt
->synth_opts
.initial_skip
)
1042 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
1043 event
->sample
.header
.misc
= PERF_RECORD_MISC_USER
;
1044 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
1046 if (!pt
->timeless_decoding
)
1047 sample
.time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
1049 sample
.cpumode
= PERF_RECORD_MISC_USER
;
1050 sample
.ip
= ptq
->state
->from_ip
;
1051 sample
.pid
= ptq
->pid
;
1052 sample
.tid
= ptq
->tid
;
1053 sample
.addr
= ptq
->state
->to_ip
;
1054 sample
.id
= ptq
->pt
->instructions_id
;
1055 sample
.stream_id
= ptq
->pt
->instructions_id
;
1056 sample
.period
= ptq
->state
->tot_insn_cnt
- ptq
->last_insn_cnt
;
1057 sample
.cpu
= ptq
->cpu
;
1058 sample
.flags
= ptq
->flags
;
1059 sample
.insn_len
= ptq
->insn_len
;
1061 ptq
->last_insn_cnt
= ptq
->state
->tot_insn_cnt
;
1063 if (pt
->synth_opts
.callchain
) {
1064 thread_stack__sample(ptq
->thread
, ptq
->chain
,
1065 pt
->synth_opts
.callchain_sz
, sample
.ip
);
1066 sample
.callchain
= ptq
->chain
;
1069 if (pt
->synth_opts
.last_branch
) {
1070 intel_pt_copy_last_branch_rb(ptq
);
1071 sample
.branch_stack
= ptq
->last_branch
;
1074 if (pt
->synth_opts
.inject
) {
1075 ret
= intel_pt_inject_event(event
, &sample
,
1076 pt
->instructions_sample_type
,
1077 pt
->synth_needs_swap
);
1082 ret
= perf_session__deliver_synth_event(pt
->session
, event
, &sample
);
1084 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
1087 if (pt
->synth_opts
.last_branch
)
1088 intel_pt_reset_last_branch_rb(ptq
);
1093 static int intel_pt_synth_transaction_sample(struct intel_pt_queue
*ptq
)
1096 struct intel_pt
*pt
= ptq
->pt
;
1097 union perf_event
*event
= ptq
->event_buf
;
1098 struct perf_sample sample
= { .ip
= 0, };
1100 if (pt
->synth_opts
.initial_skip
&&
1101 pt
->num_events
++ < pt
->synth_opts
.initial_skip
)
1104 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
1105 event
->sample
.header
.misc
= PERF_RECORD_MISC_USER
;
1106 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
1108 if (!pt
->timeless_decoding
)
1109 sample
.time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
1111 sample
.cpumode
= PERF_RECORD_MISC_USER
;
1112 sample
.ip
= ptq
->state
->from_ip
;
1113 sample
.pid
= ptq
->pid
;
1114 sample
.tid
= ptq
->tid
;
1115 sample
.addr
= ptq
->state
->to_ip
;
1116 sample
.id
= ptq
->pt
->transactions_id
;
1117 sample
.stream_id
= ptq
->pt
->transactions_id
;
1119 sample
.cpu
= ptq
->cpu
;
1120 sample
.flags
= ptq
->flags
;
1121 sample
.insn_len
= ptq
->insn_len
;
1123 if (pt
->synth_opts
.callchain
) {
1124 thread_stack__sample(ptq
->thread
, ptq
->chain
,
1125 pt
->synth_opts
.callchain_sz
, sample
.ip
);
1126 sample
.callchain
= ptq
->chain
;
1129 if (pt
->synth_opts
.last_branch
) {
1130 intel_pt_copy_last_branch_rb(ptq
);
1131 sample
.branch_stack
= ptq
->last_branch
;
1134 if (pt
->synth_opts
.inject
) {
1135 ret
= intel_pt_inject_event(event
, &sample
,
1136 pt
->transactions_sample_type
,
1137 pt
->synth_needs_swap
);
1142 ret
= perf_session__deliver_synth_event(pt
->session
, event
, &sample
);
1144 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1147 if (pt
->synth_opts
.last_branch
)
1148 intel_pt_reset_last_branch_rb(ptq
);
1153 static int intel_pt_synth_error(struct intel_pt
*pt
, int code
, int cpu
,
1154 pid_t pid
, pid_t tid
, u64 ip
)
1156 union perf_event event
;
1157 char msg
[MAX_AUXTRACE_ERROR_MSG
];
1160 intel_pt__strerror(code
, msg
, MAX_AUXTRACE_ERROR_MSG
);
1162 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
1163 code
, cpu
, pid
, tid
, ip
, msg
);
1165 err
= perf_session__deliver_synth_event(pt
->session
, &event
, NULL
);
1167 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1173 static int intel_pt_next_tid(struct intel_pt
*pt
, struct intel_pt_queue
*ptq
)
1175 struct auxtrace_queue
*queue
;
1176 pid_t tid
= ptq
->next_tid
;
1182 intel_pt_log("switch: cpu %d tid %d\n", ptq
->cpu
, tid
);
1184 err
= machine__set_current_tid(pt
->machine
, ptq
->cpu
, -1, tid
);
1186 queue
= &pt
->queues
.queue_array
[ptq
->queue_nr
];
1187 intel_pt_set_pid_tid_cpu(pt
, queue
);
1194 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue
*ptq
, u64 ip
)
1196 struct intel_pt
*pt
= ptq
->pt
;
1198 return ip
== pt
->switch_ip
&&
1199 (ptq
->flags
& PERF_IP_FLAG_BRANCH
) &&
1200 !(ptq
->flags
& (PERF_IP_FLAG_CONDITIONAL
| PERF_IP_FLAG_ASYNC
|
1201 PERF_IP_FLAG_INTERRUPT
| PERF_IP_FLAG_TX_ABORT
));
1204 static int intel_pt_sample(struct intel_pt_queue
*ptq
)
1206 const struct intel_pt_state
*state
= ptq
->state
;
1207 struct intel_pt
*pt
= ptq
->pt
;
1210 if (!ptq
->have_sample
)
1213 ptq
->have_sample
= false;
1215 if (pt
->sample_instructions
&&
1216 (state
->type
& INTEL_PT_INSTRUCTION
) &&
1217 (!pt
->synth_opts
.initial_skip
||
1218 pt
->num_events
++ >= pt
->synth_opts
.initial_skip
)) {
1219 err
= intel_pt_synth_instruction_sample(ptq
);
1224 if (pt
->sample_transactions
&&
1225 (state
->type
& INTEL_PT_TRANSACTION
) &&
1226 (!pt
->synth_opts
.initial_skip
||
1227 pt
->num_events
++ >= pt
->synth_opts
.initial_skip
)) {
1228 err
= intel_pt_synth_transaction_sample(ptq
);
1233 if (!(state
->type
& INTEL_PT_BRANCH
))
1236 if (pt
->synth_opts
.callchain
)
1237 thread_stack__event(ptq
->thread
, ptq
->flags
, state
->from_ip
,
1238 state
->to_ip
, ptq
->insn_len
,
1241 thread_stack__set_trace_nr(ptq
->thread
, state
->trace_nr
);
1243 if (pt
->sample_branches
) {
1244 err
= intel_pt_synth_branch_sample(ptq
);
1249 if (pt
->synth_opts
.last_branch
)
1250 intel_pt_update_last_branch_rb(ptq
);
1252 if (!pt
->sync_switch
)
1255 if (intel_pt_is_switch_ip(ptq
, state
->to_ip
)) {
1256 switch (ptq
->switch_state
) {
1257 case INTEL_PT_SS_UNKNOWN
:
1258 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
1259 err
= intel_pt_next_tid(pt
, ptq
);
1262 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
1265 ptq
->switch_state
= INTEL_PT_SS_EXPECTING_SWITCH_EVENT
;
1268 } else if (!state
->to_ip
) {
1269 ptq
->switch_state
= INTEL_PT_SS_NOT_TRACING
;
1270 } else if (ptq
->switch_state
== INTEL_PT_SS_NOT_TRACING
) {
1271 ptq
->switch_state
= INTEL_PT_SS_UNKNOWN
;
1272 } else if (ptq
->switch_state
== INTEL_PT_SS_UNKNOWN
&&
1273 state
->to_ip
== pt
->ptss_ip
&&
1274 (ptq
->flags
& PERF_IP_FLAG_CALL
)) {
1275 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
1281 static u64
intel_pt_switch_ip(struct intel_pt
*pt
, u64
*ptss_ip
)
1283 struct machine
*machine
= pt
->machine
;
1285 struct symbol
*sym
, *start
;
1286 u64 ip
, switch_ip
= 0;
1292 map
= machine__kernel_map(machine
);
1296 if (map__load(map
, machine
->symbol_filter
))
1299 start
= dso__first_symbol(map
->dso
, MAP__FUNCTION
);
1301 for (sym
= start
; sym
; sym
= dso__next_symbol(sym
)) {
1302 if (sym
->binding
== STB_GLOBAL
&&
1303 !strcmp(sym
->name
, "__switch_to")) {
1304 ip
= map
->unmap_ip(map
, sym
->start
);
1305 if (ip
>= map
->start
&& ip
< map
->end
) {
1312 if (!switch_ip
|| !ptss_ip
)
1315 if (pt
->have_sched_switch
== 1)
1316 ptss
= "perf_trace_sched_switch";
1318 ptss
= "__perf_event_task_sched_out";
1320 for (sym
= start
; sym
; sym
= dso__next_symbol(sym
)) {
1321 if (!strcmp(sym
->name
, ptss
)) {
1322 ip
= map
->unmap_ip(map
, sym
->start
);
1323 if (ip
>= map
->start
&& ip
< map
->end
) {
1333 static int intel_pt_run_decoder(struct intel_pt_queue
*ptq
, u64
*timestamp
)
1335 const struct intel_pt_state
*state
= ptq
->state
;
1336 struct intel_pt
*pt
= ptq
->pt
;
1339 if (!pt
->kernel_start
) {
1340 pt
->kernel_start
= machine__kernel_start(pt
->machine
);
1341 if (pt
->per_cpu_mmaps
&&
1342 (pt
->have_sched_switch
== 1 || pt
->have_sched_switch
== 3) &&
1343 !pt
->timeless_decoding
&& intel_pt_tracing_kernel(pt
) &&
1344 !pt
->sampling_mode
) {
1345 pt
->switch_ip
= intel_pt_switch_ip(pt
, &pt
->ptss_ip
);
1346 if (pt
->switch_ip
) {
1347 intel_pt_log("switch_ip: %"PRIx64
" ptss_ip: %"PRIx64
"\n",
1348 pt
->switch_ip
, pt
->ptss_ip
);
1349 pt
->sync_switch
= true;
1354 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1355 ptq
->queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
1357 err
= intel_pt_sample(ptq
);
1361 state
= intel_pt_decode(ptq
->decoder
);
1363 if (state
->err
== INTEL_PT_ERR_NODATA
)
1365 if (pt
->sync_switch
&&
1366 state
->from_ip
>= pt
->kernel_start
) {
1367 pt
->sync_switch
= false;
1368 intel_pt_next_tid(pt
, ptq
);
1370 if (pt
->synth_opts
.errors
) {
1371 err
= intel_pt_synth_error(pt
, state
->err
,
1382 ptq
->have_sample
= true;
1383 intel_pt_sample_flags(ptq
);
1385 /* Use estimated TSC upon return to user space */
1387 (state
->from_ip
>= pt
->kernel_start
|| !state
->from_ip
) &&
1388 state
->to_ip
&& state
->to_ip
< pt
->kernel_start
) {
1389 intel_pt_log("TSC %"PRIx64
" est. TSC %"PRIx64
"\n",
1390 state
->timestamp
, state
->est_timestamp
);
1391 ptq
->timestamp
= state
->est_timestamp
;
1392 /* Use estimated TSC in unknown switch state */
1393 } else if (pt
->sync_switch
&&
1394 ptq
->switch_state
== INTEL_PT_SS_UNKNOWN
&&
1395 intel_pt_is_switch_ip(ptq
, state
->to_ip
) &&
1396 ptq
->next_tid
== -1) {
1397 intel_pt_log("TSC %"PRIx64
" est. TSC %"PRIx64
"\n",
1398 state
->timestamp
, state
->est_timestamp
);
1399 ptq
->timestamp
= state
->est_timestamp
;
1400 } else if (state
->timestamp
> ptq
->timestamp
) {
1401 ptq
->timestamp
= state
->timestamp
;
1404 if (!pt
->timeless_decoding
&& ptq
->timestamp
>= *timestamp
) {
1405 *timestamp
= ptq
->timestamp
;
1412 static inline int intel_pt_update_queues(struct intel_pt
*pt
)
1414 if (pt
->queues
.new_data
) {
1415 pt
->queues
.new_data
= false;
1416 return intel_pt_setup_queues(pt
);
1421 static int intel_pt_process_queues(struct intel_pt
*pt
, u64 timestamp
)
1423 unsigned int queue_nr
;
1428 struct auxtrace_queue
*queue
;
1429 struct intel_pt_queue
*ptq
;
1431 if (!pt
->heap
.heap_cnt
)
1434 if (pt
->heap
.heap_array
[0].ordinal
>= timestamp
)
1437 queue_nr
= pt
->heap
.heap_array
[0].queue_nr
;
1438 queue
= &pt
->queues
.queue_array
[queue_nr
];
1441 intel_pt_log("queue %u processing 0x%" PRIx64
" to 0x%" PRIx64
"\n",
1442 queue_nr
, pt
->heap
.heap_array
[0].ordinal
,
1445 auxtrace_heap__pop(&pt
->heap
);
1447 if (pt
->heap
.heap_cnt
) {
1448 ts
= pt
->heap
.heap_array
[0].ordinal
+ 1;
1455 intel_pt_set_pid_tid_cpu(pt
, queue
);
1457 ret
= intel_pt_run_decoder(ptq
, &ts
);
1460 auxtrace_heap__add(&pt
->heap
, queue_nr
, ts
);
1465 ret
= auxtrace_heap__add(&pt
->heap
, queue_nr
, ts
);
1469 ptq
->on_heap
= false;
1476 static int intel_pt_process_timeless_queues(struct intel_pt
*pt
, pid_t tid
,
1479 struct auxtrace_queues
*queues
= &pt
->queues
;
1483 for (i
= 0; i
< queues
->nr_queues
; i
++) {
1484 struct auxtrace_queue
*queue
= &pt
->queues
.queue_array
[i
];
1485 struct intel_pt_queue
*ptq
= queue
->priv
;
1487 if (ptq
&& (tid
== -1 || ptq
->tid
== tid
)) {
1489 intel_pt_set_pid_tid_cpu(pt
, queue
);
1490 intel_pt_run_decoder(ptq
, &ts
);
1496 static int intel_pt_lost(struct intel_pt
*pt
, struct perf_sample
*sample
)
1498 return intel_pt_synth_error(pt
, INTEL_PT_ERR_LOST
, sample
->cpu
,
1499 sample
->pid
, sample
->tid
, 0);
1502 static struct intel_pt_queue
*intel_pt_cpu_to_ptq(struct intel_pt
*pt
, int cpu
)
1506 if (cpu
< 0 || !pt
->queues
.nr_queues
)
1509 if ((unsigned)cpu
>= pt
->queues
.nr_queues
)
1510 i
= pt
->queues
.nr_queues
- 1;
1514 if (pt
->queues
.queue_array
[i
].cpu
== cpu
)
1515 return pt
->queues
.queue_array
[i
].priv
;
1517 for (j
= 0; i
> 0; j
++) {
1518 if (pt
->queues
.queue_array
[--i
].cpu
== cpu
)
1519 return pt
->queues
.queue_array
[i
].priv
;
1522 for (; j
< pt
->queues
.nr_queues
; j
++) {
1523 if (pt
->queues
.queue_array
[j
].cpu
== cpu
)
1524 return pt
->queues
.queue_array
[j
].priv
;
1530 static int intel_pt_sync_switch(struct intel_pt
*pt
, int cpu
, pid_t tid
,
1533 struct intel_pt_queue
*ptq
;
1536 if (!pt
->sync_switch
)
1539 ptq
= intel_pt_cpu_to_ptq(pt
, cpu
);
1543 switch (ptq
->switch_state
) {
1544 case INTEL_PT_SS_NOT_TRACING
:
1547 case INTEL_PT_SS_UNKNOWN
:
1548 case INTEL_PT_SS_TRACING
:
1549 ptq
->next_tid
= tid
;
1550 ptq
->switch_state
= INTEL_PT_SS_EXPECTING_SWITCH_IP
;
1552 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT
:
1553 if (!ptq
->on_heap
) {
1554 ptq
->timestamp
= perf_time_to_tsc(timestamp
,
1556 err
= auxtrace_heap__add(&pt
->heap
, ptq
->queue_nr
,
1560 ptq
->on_heap
= true;
1562 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
1564 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
1565 ptq
->next_tid
= tid
;
1566 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu
);
1575 static int intel_pt_process_switch(struct intel_pt
*pt
,
1576 struct perf_sample
*sample
)
1578 struct perf_evsel
*evsel
;
1582 evsel
= perf_evlist__id2evsel(pt
->session
->evlist
, sample
->id
);
1583 if (evsel
!= pt
->switch_evsel
)
1586 tid
= perf_evsel__intval(evsel
, sample
, "next_pid");
1589 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1590 cpu
, tid
, sample
->time
, perf_time_to_tsc(sample
->time
,
1593 ret
= intel_pt_sync_switch(pt
, cpu
, tid
, sample
->time
);
1597 return machine__set_current_tid(pt
->machine
, cpu
, -1, tid
);
1600 static int intel_pt_context_switch(struct intel_pt
*pt
, union perf_event
*event
,
1601 struct perf_sample
*sample
)
1603 bool out
= event
->header
.misc
& PERF_RECORD_MISC_SWITCH_OUT
;
1609 if (pt
->have_sched_switch
== 3) {
1612 if (event
->header
.type
!= PERF_RECORD_SWITCH_CPU_WIDE
) {
1613 pr_err("Expecting CPU-wide context switch event\n");
1616 pid
= event
->context_switch
.next_prev_pid
;
1617 tid
= event
->context_switch
.next_prev_tid
;
1626 pr_err("context_switch event has no tid\n");
1630 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1631 cpu
, pid
, tid
, sample
->time
, perf_time_to_tsc(sample
->time
,
1634 ret
= intel_pt_sync_switch(pt
, cpu
, tid
, sample
->time
);
1638 return machine__set_current_tid(pt
->machine
, cpu
, pid
, tid
);
1641 static int intel_pt_process_itrace_start(struct intel_pt
*pt
,
1642 union perf_event
*event
,
1643 struct perf_sample
*sample
)
1645 if (!pt
->per_cpu_mmaps
)
1648 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1649 sample
->cpu
, event
->itrace_start
.pid
,
1650 event
->itrace_start
.tid
, sample
->time
,
1651 perf_time_to_tsc(sample
->time
, &pt
->tc
));
1653 return machine__set_current_tid(pt
->machine
, sample
->cpu
,
1654 event
->itrace_start
.pid
,
1655 event
->itrace_start
.tid
);
1658 static int intel_pt_process_event(struct perf_session
*session
,
1659 union perf_event
*event
,
1660 struct perf_sample
*sample
,
1661 struct perf_tool
*tool
)
1663 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1671 if (!tool
->ordered_events
) {
1672 pr_err("Intel Processor Trace requires ordered events\n");
1676 if (sample
->time
&& sample
->time
!= (u64
)-1)
1677 timestamp
= perf_time_to_tsc(sample
->time
, &pt
->tc
);
1681 if (timestamp
|| pt
->timeless_decoding
) {
1682 err
= intel_pt_update_queues(pt
);
1687 if (pt
->timeless_decoding
) {
1688 if (event
->header
.type
== PERF_RECORD_EXIT
) {
1689 err
= intel_pt_process_timeless_queues(pt
,
1693 } else if (timestamp
) {
1694 err
= intel_pt_process_queues(pt
, timestamp
);
1699 if (event
->header
.type
== PERF_RECORD_AUX
&&
1700 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
) &&
1701 pt
->synth_opts
.errors
) {
1702 err
= intel_pt_lost(pt
, sample
);
1707 if (pt
->switch_evsel
&& event
->header
.type
== PERF_RECORD_SAMPLE
)
1708 err
= intel_pt_process_switch(pt
, sample
);
1709 else if (event
->header
.type
== PERF_RECORD_ITRACE_START
)
1710 err
= intel_pt_process_itrace_start(pt
, event
, sample
);
1711 else if (event
->header
.type
== PERF_RECORD_SWITCH
||
1712 event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
)
1713 err
= intel_pt_context_switch(pt
, event
, sample
);
1715 intel_pt_log("event %s (%u): cpu %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1716 perf_event__name(event
->header
.type
), event
->header
.type
,
1717 sample
->cpu
, sample
->time
, timestamp
);
1722 static int intel_pt_flush(struct perf_session
*session
, struct perf_tool
*tool
)
1724 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1731 if (!tool
->ordered_events
)
1734 ret
= intel_pt_update_queues(pt
);
1738 if (pt
->timeless_decoding
)
1739 return intel_pt_process_timeless_queues(pt
, -1,
1742 return intel_pt_process_queues(pt
, MAX_TIMESTAMP
);
1745 static void intel_pt_free_events(struct perf_session
*session
)
1747 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1749 struct auxtrace_queues
*queues
= &pt
->queues
;
1752 for (i
= 0; i
< queues
->nr_queues
; i
++) {
1753 intel_pt_free_queue(queues
->queue_array
[i
].priv
);
1754 queues
->queue_array
[i
].priv
= NULL
;
1756 intel_pt_log_disable();
1757 auxtrace_queues__free(queues
);
1760 static void intel_pt_free(struct perf_session
*session
)
1762 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1765 auxtrace_heap__free(&pt
->heap
);
1766 intel_pt_free_events(session
);
1767 session
->auxtrace
= NULL
;
1768 thread__put(pt
->unknown_thread
);
1772 static int intel_pt_process_auxtrace_event(struct perf_session
*session
,
1773 union perf_event
*event
,
1774 struct perf_tool
*tool __maybe_unused
)
1776 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1779 if (pt
->sampling_mode
)
1782 if (!pt
->data_queued
) {
1783 struct auxtrace_buffer
*buffer
;
1785 int fd
= perf_data_file__fd(session
->file
);
1788 if (perf_data_file__is_pipe(session
->file
)) {
1791 data_offset
= lseek(fd
, 0, SEEK_CUR
);
1792 if (data_offset
== -1)
1796 err
= auxtrace_queues__add_event(&pt
->queues
, session
, event
,
1797 data_offset
, &buffer
);
1801 /* Dump here now we have copied a piped trace out of the pipe */
1803 if (auxtrace_buffer__get_data(buffer
, fd
)) {
1804 intel_pt_dump_event(pt
, buffer
->data
,
1806 auxtrace_buffer__put_data(buffer
);
1814 struct intel_pt_synth
{
1815 struct perf_tool dummy_tool
;
1816 struct perf_session
*session
;
1819 static int intel_pt_event_synth(struct perf_tool
*tool
,
1820 union perf_event
*event
,
1821 struct perf_sample
*sample __maybe_unused
,
1822 struct machine
*machine __maybe_unused
)
1824 struct intel_pt_synth
*intel_pt_synth
=
1825 container_of(tool
, struct intel_pt_synth
, dummy_tool
);
1827 return perf_session__deliver_synth_event(intel_pt_synth
->session
, event
,
1831 static int intel_pt_synth_event(struct perf_session
*session
,
1832 struct perf_event_attr
*attr
, u64 id
)
1834 struct intel_pt_synth intel_pt_synth
;
1836 memset(&intel_pt_synth
, 0, sizeof(struct intel_pt_synth
));
1837 intel_pt_synth
.session
= session
;
1839 return perf_event__synthesize_attr(&intel_pt_synth
.dummy_tool
, attr
, 1,
1840 &id
, intel_pt_event_synth
);
1843 static int intel_pt_synth_events(struct intel_pt
*pt
,
1844 struct perf_session
*session
)
1846 struct perf_evlist
*evlist
= session
->evlist
;
1847 struct perf_evsel
*evsel
;
1848 struct perf_event_attr attr
;
1853 evlist__for_each(evlist
, evsel
) {
1854 if (evsel
->attr
.type
== pt
->pmu_type
&& evsel
->ids
) {
1861 pr_debug("There are no selected events with Intel Processor Trace data\n");
1865 memset(&attr
, 0, sizeof(struct perf_event_attr
));
1866 attr
.size
= sizeof(struct perf_event_attr
);
1867 attr
.type
= PERF_TYPE_HARDWARE
;
1868 attr
.sample_type
= evsel
->attr
.sample_type
& PERF_SAMPLE_MASK
;
1869 attr
.sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
|
1871 if (pt
->timeless_decoding
)
1872 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_TIME
;
1874 attr
.sample_type
|= PERF_SAMPLE_TIME
;
1875 if (!pt
->per_cpu_mmaps
)
1876 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CPU
;
1877 attr
.exclude_user
= evsel
->attr
.exclude_user
;
1878 attr
.exclude_kernel
= evsel
->attr
.exclude_kernel
;
1879 attr
.exclude_hv
= evsel
->attr
.exclude_hv
;
1880 attr
.exclude_host
= evsel
->attr
.exclude_host
;
1881 attr
.exclude_guest
= evsel
->attr
.exclude_guest
;
1882 attr
.sample_id_all
= evsel
->attr
.sample_id_all
;
1883 attr
.read_format
= evsel
->attr
.read_format
;
1885 id
= evsel
->id
[0] + 1000000000;
1889 if (pt
->synth_opts
.instructions
) {
1890 attr
.config
= PERF_COUNT_HW_INSTRUCTIONS
;
1891 if (pt
->synth_opts
.period_type
== PERF_ITRACE_PERIOD_NANOSECS
)
1892 attr
.sample_period
=
1893 intel_pt_ns_to_ticks(pt
, pt
->synth_opts
.period
);
1895 attr
.sample_period
= pt
->synth_opts
.period
;
1896 pt
->instructions_sample_period
= attr
.sample_period
;
1897 if (pt
->synth_opts
.callchain
)
1898 attr
.sample_type
|= PERF_SAMPLE_CALLCHAIN
;
1899 if (pt
->synth_opts
.last_branch
)
1900 attr
.sample_type
|= PERF_SAMPLE_BRANCH_STACK
;
1901 pr_debug("Synthesizing 'instructions' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
1902 id
, (u64
)attr
.sample_type
);
1903 err
= intel_pt_synth_event(session
, &attr
, id
);
1905 pr_err("%s: failed to synthesize 'instructions' event type\n",
1909 pt
->sample_instructions
= true;
1910 pt
->instructions_sample_type
= attr
.sample_type
;
1911 pt
->instructions_id
= id
;
1915 if (pt
->synth_opts
.transactions
) {
1916 attr
.config
= PERF_COUNT_HW_INSTRUCTIONS
;
1917 attr
.sample_period
= 1;
1918 if (pt
->synth_opts
.callchain
)
1919 attr
.sample_type
|= PERF_SAMPLE_CALLCHAIN
;
1920 if (pt
->synth_opts
.last_branch
)
1921 attr
.sample_type
|= PERF_SAMPLE_BRANCH_STACK
;
1922 pr_debug("Synthesizing 'transactions' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
1923 id
, (u64
)attr
.sample_type
);
1924 err
= intel_pt_synth_event(session
, &attr
, id
);
1926 pr_err("%s: failed to synthesize 'transactions' event type\n",
1930 pt
->sample_transactions
= true;
1931 pt
->transactions_id
= id
;
1933 evlist__for_each(evlist
, evsel
) {
1934 if (evsel
->id
&& evsel
->id
[0] == pt
->transactions_id
) {
1936 zfree(&evsel
->name
);
1937 evsel
->name
= strdup("transactions");
1943 if (pt
->synth_opts
.branches
) {
1944 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
1945 attr
.sample_period
= 1;
1946 attr
.sample_type
|= PERF_SAMPLE_ADDR
;
1947 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CALLCHAIN
;
1948 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_BRANCH_STACK
;
1949 pr_debug("Synthesizing 'branches' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
1950 id
, (u64
)attr
.sample_type
);
1951 err
= intel_pt_synth_event(session
, &attr
, id
);
1953 pr_err("%s: failed to synthesize 'branches' event type\n",
1957 pt
->sample_branches
= true;
1958 pt
->branches_sample_type
= attr
.sample_type
;
1959 pt
->branches_id
= id
;
1962 pt
->synth_needs_swap
= evsel
->needs_swap
;
1967 static struct perf_evsel
*intel_pt_find_sched_switch(struct perf_evlist
*evlist
)
1969 struct perf_evsel
*evsel
;
1971 evlist__for_each_reverse(evlist
, evsel
) {
1972 const char *name
= perf_evsel__name(evsel
);
1974 if (!strcmp(name
, "sched:sched_switch"))
1981 static bool intel_pt_find_switch(struct perf_evlist
*evlist
)
1983 struct perf_evsel
*evsel
;
1985 evlist__for_each(evlist
, evsel
) {
1986 if (evsel
->attr
.context_switch
)
1993 static int intel_pt_perf_config(const char *var
, const char *value
, void *data
)
1995 struct intel_pt
*pt
= data
;
1997 if (!strcmp(var
, "intel-pt.mispred-all"))
1998 pt
->mispred_all
= perf_config_bool(var
, value
);
2003 static const char * const intel_pt_info_fmts
[] = {
2004 [INTEL_PT_PMU_TYPE
] = " PMU Type %"PRId64
"\n",
2005 [INTEL_PT_TIME_SHIFT
] = " Time Shift %"PRIu64
"\n",
2006 [INTEL_PT_TIME_MULT
] = " Time Muliplier %"PRIu64
"\n",
2007 [INTEL_PT_TIME_ZERO
] = " Time Zero %"PRIu64
"\n",
2008 [INTEL_PT_CAP_USER_TIME_ZERO
] = " Cap Time Zero %"PRId64
"\n",
2009 [INTEL_PT_TSC_BIT
] = " TSC bit %#"PRIx64
"\n",
2010 [INTEL_PT_NORETCOMP_BIT
] = " NoRETComp bit %#"PRIx64
"\n",
2011 [INTEL_PT_HAVE_SCHED_SWITCH
] = " Have sched_switch %"PRId64
"\n",
2012 [INTEL_PT_SNAPSHOT_MODE
] = " Snapshot mode %"PRId64
"\n",
2013 [INTEL_PT_PER_CPU_MMAPS
] = " Per-cpu maps %"PRId64
"\n",
2014 [INTEL_PT_MTC_BIT
] = " MTC bit %#"PRIx64
"\n",
2015 [INTEL_PT_TSC_CTC_N
] = " TSC:CTC numerator %"PRIu64
"\n",
2016 [INTEL_PT_TSC_CTC_D
] = " TSC:CTC denominator %"PRIu64
"\n",
2017 [INTEL_PT_CYC_BIT
] = " CYC bit %#"PRIx64
"\n",
2020 static void intel_pt_print_info(u64
*arr
, int start
, int finish
)
2027 for (i
= start
; i
<= finish
; i
++)
2028 fprintf(stdout
, intel_pt_info_fmts
[i
], arr
[i
]);
2031 int intel_pt_process_auxtrace_info(union perf_event
*event
,
2032 struct perf_session
*session
)
2034 struct auxtrace_info_event
*auxtrace_info
= &event
->auxtrace_info
;
2035 size_t min_sz
= sizeof(u64
) * INTEL_PT_PER_CPU_MMAPS
;
2036 struct intel_pt
*pt
;
2039 if (auxtrace_info
->header
.size
< sizeof(struct auxtrace_info_event
) +
2043 pt
= zalloc(sizeof(struct intel_pt
));
2047 perf_config(intel_pt_perf_config
, pt
);
2049 err
= auxtrace_queues__init(&pt
->queues
);
2053 intel_pt_log_set_name(INTEL_PT_PMU_NAME
);
2055 pt
->session
= session
;
2056 pt
->machine
= &session
->machines
.host
; /* No kvm support */
2057 pt
->auxtrace_type
= auxtrace_info
->type
;
2058 pt
->pmu_type
= auxtrace_info
->priv
[INTEL_PT_PMU_TYPE
];
2059 pt
->tc
.time_shift
= auxtrace_info
->priv
[INTEL_PT_TIME_SHIFT
];
2060 pt
->tc
.time_mult
= auxtrace_info
->priv
[INTEL_PT_TIME_MULT
];
2061 pt
->tc
.time_zero
= auxtrace_info
->priv
[INTEL_PT_TIME_ZERO
];
2062 pt
->cap_user_time_zero
= auxtrace_info
->priv
[INTEL_PT_CAP_USER_TIME_ZERO
];
2063 pt
->tsc_bit
= auxtrace_info
->priv
[INTEL_PT_TSC_BIT
];
2064 pt
->noretcomp_bit
= auxtrace_info
->priv
[INTEL_PT_NORETCOMP_BIT
];
2065 pt
->have_sched_switch
= auxtrace_info
->priv
[INTEL_PT_HAVE_SCHED_SWITCH
];
2066 pt
->snapshot_mode
= auxtrace_info
->priv
[INTEL_PT_SNAPSHOT_MODE
];
2067 pt
->per_cpu_mmaps
= auxtrace_info
->priv
[INTEL_PT_PER_CPU_MMAPS
];
2068 intel_pt_print_info(&auxtrace_info
->priv
[0], INTEL_PT_PMU_TYPE
,
2069 INTEL_PT_PER_CPU_MMAPS
);
2071 if (auxtrace_info
->header
.size
>= sizeof(struct auxtrace_info_event
) +
2072 (sizeof(u64
) * INTEL_PT_CYC_BIT
)) {
2073 pt
->mtc_bit
= auxtrace_info
->priv
[INTEL_PT_MTC_BIT
];
2074 pt
->mtc_freq_bits
= auxtrace_info
->priv
[INTEL_PT_MTC_FREQ_BITS
];
2075 pt
->tsc_ctc_ratio_n
= auxtrace_info
->priv
[INTEL_PT_TSC_CTC_N
];
2076 pt
->tsc_ctc_ratio_d
= auxtrace_info
->priv
[INTEL_PT_TSC_CTC_D
];
2077 pt
->cyc_bit
= auxtrace_info
->priv
[INTEL_PT_CYC_BIT
];
2078 intel_pt_print_info(&auxtrace_info
->priv
[0], INTEL_PT_MTC_BIT
,
2082 pt
->timeless_decoding
= intel_pt_timeless_decoding(pt
);
2083 pt
->have_tsc
= intel_pt_have_tsc(pt
);
2084 pt
->sampling_mode
= false;
2085 pt
->est_tsc
= !pt
->timeless_decoding
;
2087 pt
->unknown_thread
= thread__new(999999999, 999999999);
2088 if (!pt
->unknown_thread
) {
2090 goto err_free_queues
;
2094 * Since this thread will not be kept in any rbtree not in a
2095 * list, initialize its list node so that at thread__put() the
2096 * current thread lifetime assuption is kept and we don't segfault
2097 * at list_del_init().
2099 INIT_LIST_HEAD(&pt
->unknown_thread
->node
);
2101 err
= thread__set_comm(pt
->unknown_thread
, "unknown", 0);
2103 goto err_delete_thread
;
2104 if (thread__init_map_groups(pt
->unknown_thread
, pt
->machine
)) {
2106 goto err_delete_thread
;
2109 pt
->auxtrace
.process_event
= intel_pt_process_event
;
2110 pt
->auxtrace
.process_auxtrace_event
= intel_pt_process_auxtrace_event
;
2111 pt
->auxtrace
.flush_events
= intel_pt_flush
;
2112 pt
->auxtrace
.free_events
= intel_pt_free_events
;
2113 pt
->auxtrace
.free
= intel_pt_free
;
2114 session
->auxtrace
= &pt
->auxtrace
;
2119 if (pt
->have_sched_switch
== 1) {
2120 pt
->switch_evsel
= intel_pt_find_sched_switch(session
->evlist
);
2121 if (!pt
->switch_evsel
) {
2122 pr_err("%s: missing sched_switch event\n", __func__
);
2123 goto err_delete_thread
;
2125 } else if (pt
->have_sched_switch
== 2 &&
2126 !intel_pt_find_switch(session
->evlist
)) {
2127 pr_err("%s: missing context_switch attribute flag\n", __func__
);
2128 goto err_delete_thread
;
2131 if (session
->itrace_synth_opts
&& session
->itrace_synth_opts
->set
) {
2132 pt
->synth_opts
= *session
->itrace_synth_opts
;
2134 itrace_synth_opts__set_default(&pt
->synth_opts
);
2135 if (use_browser
!= -1) {
2136 pt
->synth_opts
.branches
= false;
2137 pt
->synth_opts
.callchain
= true;
2141 if (pt
->synth_opts
.log
)
2142 intel_pt_log_enable();
2144 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2145 if (pt
->tc
.time_mult
) {
2146 u64 tsc_freq
= intel_pt_ns_to_ticks(pt
, 1000000000);
2148 pt
->max_non_turbo_ratio
= (tsc_freq
+ 50000000) / 100000000;
2149 intel_pt_log("TSC frequency %"PRIu64
"\n", tsc_freq
);
2150 intel_pt_log("Maximum non-turbo ratio %u\n",
2151 pt
->max_non_turbo_ratio
);
2154 if (pt
->synth_opts
.calls
)
2155 pt
->branches_filter
|= PERF_IP_FLAG_CALL
| PERF_IP_FLAG_ASYNC
|
2156 PERF_IP_FLAG_TRACE_END
;
2157 if (pt
->synth_opts
.returns
)
2158 pt
->branches_filter
|= PERF_IP_FLAG_RETURN
|
2159 PERF_IP_FLAG_TRACE_BEGIN
;
2161 if (pt
->synth_opts
.callchain
&& !symbol_conf
.use_callchain
) {
2162 symbol_conf
.use_callchain
= true;
2163 if (callchain_register_param(&callchain_param
) < 0) {
2164 symbol_conf
.use_callchain
= false;
2165 pt
->synth_opts
.callchain
= false;
2169 err
= intel_pt_synth_events(pt
, session
);
2171 goto err_delete_thread
;
2173 err
= auxtrace_queues__process_index(&pt
->queues
, session
);
2175 goto err_delete_thread
;
2177 if (pt
->queues
.populated
)
2178 pt
->data_queued
= true;
2180 if (pt
->timeless_decoding
)
2181 pr_debug2("Intel PT decoding without timestamps\n");
2186 thread__zput(pt
->unknown_thread
);
2188 intel_pt_log_disable();
2189 auxtrace_queues__free(&pt
->queues
);
2190 session
->auxtrace
= NULL
;