2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 #include <linux/kernel.h>
20 #include <linux/types.h>
34 #include "thread-stack.h"
36 #include "callchain.h"
44 #include "intel-pt-decoder/intel-pt-log.h"
45 #include "intel-pt-decoder/intel-pt-decoder.h"
46 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
47 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
49 #define MAX_TIMESTAMP (~0ULL)
52 struct auxtrace auxtrace
;
53 struct auxtrace_queues queues
;
54 struct auxtrace_heap heap
;
56 struct perf_session
*session
;
57 struct machine
*machine
;
58 struct perf_evsel
*switch_evsel
;
59 struct thread
*unknown_thread
;
60 bool timeless_decoding
;
69 int have_sched_switch
;
75 struct perf_tsc_conversion tc
;
76 bool cap_user_time_zero
;
78 struct itrace_synth_opts synth_opts
;
80 bool sample_instructions
;
81 u64 instructions_sample_type
;
82 u64 instructions_sample_period
;
87 u64 branches_sample_type
;
90 bool sample_transactions
;
91 u64 transactions_sample_type
;
94 bool synth_needs_swap
;
103 unsigned max_non_turbo_ratio
;
105 unsigned long num_events
;
109 INTEL_PT_SS_NOT_TRACING
,
112 INTEL_PT_SS_EXPECTING_SWITCH_EVENT
,
113 INTEL_PT_SS_EXPECTING_SWITCH_IP
,
116 struct intel_pt_queue
{
118 unsigned int queue_nr
;
119 struct auxtrace_buffer
*buffer
;
121 const struct intel_pt_state
*state
;
122 struct ip_callchain
*chain
;
123 struct branch_stack
*last_branch
;
124 struct branch_stack
*last_branch_rb
;
125 size_t last_branch_pos
;
126 union perf_event
*event_buf
;
129 bool step_through_buffers
;
130 bool use_buffer_pid_tid
;
135 struct thread
*thread
;
145 static void intel_pt_dump(struct intel_pt
*pt __maybe_unused
,
146 unsigned char *buf
, size_t len
)
148 struct intel_pt_pkt packet
;
151 char desc
[INTEL_PT_PKT_DESC_MAX
];
152 const char *color
= PERF_COLOR_BLUE
;
154 color_fprintf(stdout
, color
,
155 ". ... Intel Processor Trace data: size %zu bytes\n",
159 ret
= intel_pt_get_packet(buf
, len
, &packet
);
165 color_fprintf(stdout
, color
, " %08x: ", pos
);
166 for (i
= 0; i
< pkt_len
; i
++)
167 color_fprintf(stdout
, color
, " %02x", buf
[i
]);
169 color_fprintf(stdout
, color
, " ");
171 ret
= intel_pt_pkt_desc(&packet
, desc
,
172 INTEL_PT_PKT_DESC_MAX
);
174 color_fprintf(stdout
, color
, " %s\n", desc
);
176 color_fprintf(stdout
, color
, " Bad packet!\n");
184 static void intel_pt_dump_event(struct intel_pt
*pt
, unsigned char *buf
,
188 intel_pt_dump(pt
, buf
, len
);
191 static int intel_pt_do_fix_overlap(struct intel_pt
*pt
, struct auxtrace_buffer
*a
,
192 struct auxtrace_buffer
*b
)
196 start
= intel_pt_find_overlap(a
->data
, a
->size
, b
->data
, b
->size
,
200 b
->use_size
= b
->data
+ b
->size
- start
;
205 static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue
*ptq
,
206 struct auxtrace_queue
*queue
,
207 struct auxtrace_buffer
*buffer
)
209 if (queue
->cpu
== -1 && buffer
->cpu
!= -1)
210 ptq
->cpu
= buffer
->cpu
;
212 ptq
->pid
= buffer
->pid
;
213 ptq
->tid
= buffer
->tid
;
215 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
216 ptq
->queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
218 thread__zput(ptq
->thread
);
220 if (ptq
->tid
!= -1) {
222 ptq
->thread
= machine__findnew_thread(ptq
->pt
->machine
,
226 ptq
->thread
= machine__find_thread(ptq
->pt
->machine
, -1,
231 /* This function assumes data is processed sequentially only */
232 static int intel_pt_get_trace(struct intel_pt_buffer
*b
, void *data
)
234 struct intel_pt_queue
*ptq
= data
;
235 struct auxtrace_buffer
*buffer
= ptq
->buffer
, *old_buffer
= buffer
;
236 struct auxtrace_queue
*queue
;
243 queue
= &ptq
->pt
->queues
.queue_array
[ptq
->queue_nr
];
245 buffer
= auxtrace_buffer__next(queue
, buffer
);
248 auxtrace_buffer__drop_data(old_buffer
);
253 ptq
->buffer
= buffer
;
256 int fd
= perf_data_file__fd(ptq
->pt
->session
->file
);
258 buffer
->data
= auxtrace_buffer__get_data(buffer
, fd
);
263 if (ptq
->pt
->snapshot_mode
&& !buffer
->consecutive
&& old_buffer
&&
264 intel_pt_do_fix_overlap(ptq
->pt
, old_buffer
, buffer
))
268 auxtrace_buffer__drop_data(old_buffer
);
270 if (buffer
->use_data
) {
271 b
->len
= buffer
->use_size
;
272 b
->buf
= buffer
->use_data
;
274 b
->len
= buffer
->size
;
275 b
->buf
= buffer
->data
;
277 b
->ref_timestamp
= buffer
->reference
;
279 if (!old_buffer
|| ptq
->pt
->sampling_mode
|| (ptq
->pt
->snapshot_mode
&&
280 !buffer
->consecutive
)) {
281 b
->consecutive
= false;
282 b
->trace_nr
= buffer
->buffer_nr
+ 1;
284 b
->consecutive
= true;
287 if (ptq
->use_buffer_pid_tid
&& (ptq
->pid
!= buffer
->pid
||
288 ptq
->tid
!= buffer
->tid
))
289 intel_pt_use_buffer_pid_tid(ptq
, queue
, buffer
);
291 if (ptq
->step_through_buffers
)
295 return intel_pt_get_trace(b
, data
);
300 struct intel_pt_cache_entry
{
301 struct auxtrace_cache_entry entry
;
304 enum intel_pt_insn_op op
;
305 enum intel_pt_insn_branch branch
;
310 static int intel_pt_config_div(const char *var
, const char *value
, void *data
)
315 if (!strcmp(var
, "intel-pt.cache-divisor")) {
316 val
= strtol(value
, NULL
, 0);
317 if (val
> 0 && val
<= INT_MAX
)
324 static int intel_pt_cache_divisor(void)
331 perf_config(intel_pt_config_div
, &d
);
339 static unsigned int intel_pt_cache_size(struct dso
*dso
,
340 struct machine
*machine
)
344 size
= dso__data_size(dso
, machine
);
345 size
/= intel_pt_cache_divisor();
348 if (size
> (1 << 21))
350 return 32 - __builtin_clz(size
);
353 static struct auxtrace_cache
*intel_pt_cache(struct dso
*dso
,
354 struct machine
*machine
)
356 struct auxtrace_cache
*c
;
359 if (dso
->auxtrace_cache
)
360 return dso
->auxtrace_cache
;
362 bits
= intel_pt_cache_size(dso
, machine
);
364 /* Ignoring cache creation failure */
365 c
= auxtrace_cache__new(bits
, sizeof(struct intel_pt_cache_entry
), 200);
367 dso
->auxtrace_cache
= c
;
372 static int intel_pt_cache_add(struct dso
*dso
, struct machine
*machine
,
373 u64 offset
, u64 insn_cnt
, u64 byte_cnt
,
374 struct intel_pt_insn
*intel_pt_insn
)
376 struct auxtrace_cache
*c
= intel_pt_cache(dso
, machine
);
377 struct intel_pt_cache_entry
*e
;
383 e
= auxtrace_cache__alloc_entry(c
);
387 e
->insn_cnt
= insn_cnt
;
388 e
->byte_cnt
= byte_cnt
;
389 e
->op
= intel_pt_insn
->op
;
390 e
->branch
= intel_pt_insn
->branch
;
391 e
->length
= intel_pt_insn
->length
;
392 e
->rel
= intel_pt_insn
->rel
;
394 err
= auxtrace_cache__add(c
, offset
, &e
->entry
);
396 auxtrace_cache__free_entry(c
, e
);
401 static struct intel_pt_cache_entry
*
402 intel_pt_cache_lookup(struct dso
*dso
, struct machine
*machine
, u64 offset
)
404 struct auxtrace_cache
*c
= intel_pt_cache(dso
, machine
);
409 return auxtrace_cache__lookup(dso
->auxtrace_cache
, offset
);
412 static int intel_pt_walk_next_insn(struct intel_pt_insn
*intel_pt_insn
,
413 uint64_t *insn_cnt_ptr
, uint64_t *ip
,
414 uint64_t to_ip
, uint64_t max_insn_cnt
,
417 struct intel_pt_queue
*ptq
= data
;
418 struct machine
*machine
= ptq
->pt
->machine
;
419 struct thread
*thread
;
420 struct addr_location al
;
421 unsigned char buf
[1024];
426 u64 offset
, start_offset
, start_ip
;
430 if (to_ip
&& *ip
== to_ip
)
433 bufsz
= intel_pt_insn_max_size();
435 if (*ip
>= ptq
->pt
->kernel_start
)
436 cpumode
= PERF_RECORD_MISC_KERNEL
;
438 cpumode
= PERF_RECORD_MISC_USER
;
440 thread
= ptq
->thread
;
442 if (cpumode
!= PERF_RECORD_MISC_KERNEL
)
444 thread
= ptq
->pt
->unknown_thread
;
448 thread__find_addr_map(thread
, cpumode
, MAP__FUNCTION
, *ip
, &al
);
449 if (!al
.map
|| !al
.map
->dso
)
452 if (al
.map
->dso
->data
.status
== DSO_DATA_STATUS_ERROR
&&
453 dso__data_status_seen(al
.map
->dso
,
454 DSO_DATA_STATUS_SEEN_ITRACE
))
457 offset
= al
.map
->map_ip(al
.map
, *ip
);
459 if (!to_ip
&& one_map
) {
460 struct intel_pt_cache_entry
*e
;
462 e
= intel_pt_cache_lookup(al
.map
->dso
, machine
, offset
);
464 (!max_insn_cnt
|| e
->insn_cnt
<= max_insn_cnt
)) {
465 *insn_cnt_ptr
= e
->insn_cnt
;
467 intel_pt_insn
->op
= e
->op
;
468 intel_pt_insn
->branch
= e
->branch
;
469 intel_pt_insn
->length
= e
->length
;
470 intel_pt_insn
->rel
= e
->rel
;
471 intel_pt_log_insn_no_data(intel_pt_insn
, *ip
);
476 start_offset
= offset
;
479 /* Load maps to ensure dso->is_64_bit has been updated */
480 map__load(al
.map
, machine
->symbol_filter
);
482 x86_64
= al
.map
->dso
->is_64_bit
;
485 len
= dso__data_read_offset(al
.map
->dso
, machine
,
490 if (intel_pt_get_insn(buf
, len
, x86_64
, intel_pt_insn
))
493 intel_pt_log_insn(intel_pt_insn
, *ip
);
497 if (intel_pt_insn
->branch
!= INTEL_PT_BR_NO_BRANCH
)
500 if (max_insn_cnt
&& insn_cnt
>= max_insn_cnt
)
503 *ip
+= intel_pt_insn
->length
;
505 if (to_ip
&& *ip
== to_ip
)
508 if (*ip
>= al
.map
->end
)
511 offset
+= intel_pt_insn
->length
;
516 *insn_cnt_ptr
= insn_cnt
;
522 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
526 struct intel_pt_cache_entry
*e
;
528 e
= intel_pt_cache_lookup(al
.map
->dso
, machine
, start_offset
);
533 /* Ignore cache errors */
534 intel_pt_cache_add(al
.map
->dso
, machine
, start_offset
, insn_cnt
,
535 *ip
- start_ip
, intel_pt_insn
);
540 *insn_cnt_ptr
= insn_cnt
;
544 static bool intel_pt_get_config(struct intel_pt
*pt
,
545 struct perf_event_attr
*attr
, u64
*config
)
547 if (attr
->type
== pt
->pmu_type
) {
549 *config
= attr
->config
;
556 static bool intel_pt_exclude_kernel(struct intel_pt
*pt
)
558 struct perf_evsel
*evsel
;
560 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
561 if (intel_pt_get_config(pt
, &evsel
->attr
, NULL
) &&
562 !evsel
->attr
.exclude_kernel
)
568 static bool intel_pt_return_compression(struct intel_pt
*pt
)
570 struct perf_evsel
*evsel
;
573 if (!pt
->noretcomp_bit
)
576 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
577 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
) &&
578 (config
& pt
->noretcomp_bit
))
584 static unsigned int intel_pt_mtc_period(struct intel_pt
*pt
)
586 struct perf_evsel
*evsel
;
590 if (!pt
->mtc_freq_bits
)
593 for (shift
= 0, config
= pt
->mtc_freq_bits
; !(config
& 1); shift
++)
596 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
597 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
))
598 return (config
& pt
->mtc_freq_bits
) >> shift
;
603 static bool intel_pt_timeless_decoding(struct intel_pt
*pt
)
605 struct perf_evsel
*evsel
;
606 bool timeless_decoding
= true;
609 if (!pt
->tsc_bit
|| !pt
->cap_user_time_zero
)
612 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
613 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_TIME
))
615 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
)) {
616 if (config
& pt
->tsc_bit
)
617 timeless_decoding
= false;
622 return timeless_decoding
;
625 static bool intel_pt_tracing_kernel(struct intel_pt
*pt
)
627 struct perf_evsel
*evsel
;
629 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
630 if (intel_pt_get_config(pt
, &evsel
->attr
, NULL
) &&
631 !evsel
->attr
.exclude_kernel
)
637 static bool intel_pt_have_tsc(struct intel_pt
*pt
)
639 struct perf_evsel
*evsel
;
640 bool have_tsc
= false;
646 evlist__for_each_entry(pt
->session
->evlist
, evsel
) {
647 if (intel_pt_get_config(pt
, &evsel
->attr
, &config
)) {
648 if (config
& pt
->tsc_bit
)
657 static u64
intel_pt_ns_to_ticks(const struct intel_pt
*pt
, u64 ns
)
661 quot
= ns
/ pt
->tc
.time_mult
;
662 rem
= ns
% pt
->tc
.time_mult
;
663 return (quot
<< pt
->tc
.time_shift
) + (rem
<< pt
->tc
.time_shift
) /
667 static struct intel_pt_queue
*intel_pt_alloc_queue(struct intel_pt
*pt
,
668 unsigned int queue_nr
)
670 struct intel_pt_params params
= { .get_trace
= 0, };
671 struct intel_pt_queue
*ptq
;
673 ptq
= zalloc(sizeof(struct intel_pt_queue
));
677 if (pt
->synth_opts
.callchain
) {
678 size_t sz
= sizeof(struct ip_callchain
);
680 sz
+= pt
->synth_opts
.callchain_sz
* sizeof(u64
);
681 ptq
->chain
= zalloc(sz
);
686 if (pt
->synth_opts
.last_branch
) {
687 size_t sz
= sizeof(struct branch_stack
);
689 sz
+= pt
->synth_opts
.last_branch_sz
*
690 sizeof(struct branch_entry
);
691 ptq
->last_branch
= zalloc(sz
);
692 if (!ptq
->last_branch
)
694 ptq
->last_branch_rb
= zalloc(sz
);
695 if (!ptq
->last_branch_rb
)
699 ptq
->event_buf
= malloc(PERF_SAMPLE_MAX_SIZE
);
704 ptq
->queue_nr
= queue_nr
;
705 ptq
->exclude_kernel
= intel_pt_exclude_kernel(pt
);
711 params
.get_trace
= intel_pt_get_trace
;
712 params
.walk_insn
= intel_pt_walk_next_insn
;
714 params
.return_compression
= intel_pt_return_compression(pt
);
715 params
.max_non_turbo_ratio
= pt
->max_non_turbo_ratio
;
716 params
.mtc_period
= intel_pt_mtc_period(pt
);
717 params
.tsc_ctc_ratio_n
= pt
->tsc_ctc_ratio_n
;
718 params
.tsc_ctc_ratio_d
= pt
->tsc_ctc_ratio_d
;
720 if (pt
->synth_opts
.instructions
) {
721 if (pt
->synth_opts
.period
) {
722 switch (pt
->synth_opts
.period_type
) {
723 case PERF_ITRACE_PERIOD_INSTRUCTIONS
:
725 INTEL_PT_PERIOD_INSTRUCTIONS
;
726 params
.period
= pt
->synth_opts
.period
;
728 case PERF_ITRACE_PERIOD_TICKS
:
729 params
.period_type
= INTEL_PT_PERIOD_TICKS
;
730 params
.period
= pt
->synth_opts
.period
;
732 case PERF_ITRACE_PERIOD_NANOSECS
:
733 params
.period_type
= INTEL_PT_PERIOD_TICKS
;
734 params
.period
= intel_pt_ns_to_ticks(pt
,
735 pt
->synth_opts
.period
);
742 if (!params
.period
) {
743 params
.period_type
= INTEL_PT_PERIOD_INSTRUCTIONS
;
748 ptq
->decoder
= intel_pt_decoder_new(¶ms
);
755 zfree(&ptq
->event_buf
);
756 zfree(&ptq
->last_branch
);
757 zfree(&ptq
->last_branch_rb
);
763 static void intel_pt_free_queue(void *priv
)
765 struct intel_pt_queue
*ptq
= priv
;
769 thread__zput(ptq
->thread
);
770 intel_pt_decoder_free(ptq
->decoder
);
771 zfree(&ptq
->event_buf
);
772 zfree(&ptq
->last_branch
);
773 zfree(&ptq
->last_branch_rb
);
778 static void intel_pt_set_pid_tid_cpu(struct intel_pt
*pt
,
779 struct auxtrace_queue
*queue
)
781 struct intel_pt_queue
*ptq
= queue
->priv
;
783 if (queue
->tid
== -1 || pt
->have_sched_switch
) {
784 ptq
->tid
= machine__get_current_tid(pt
->machine
, ptq
->cpu
);
785 thread__zput(ptq
->thread
);
788 if (!ptq
->thread
&& ptq
->tid
!= -1)
789 ptq
->thread
= machine__find_thread(pt
->machine
, -1, ptq
->tid
);
792 ptq
->pid
= ptq
->thread
->pid_
;
793 if (queue
->cpu
== -1)
794 ptq
->cpu
= ptq
->thread
->cpu
;
798 static void intel_pt_sample_flags(struct intel_pt_queue
*ptq
)
800 if (ptq
->state
->flags
& INTEL_PT_ABORT_TX
) {
801 ptq
->flags
= PERF_IP_FLAG_BRANCH
| PERF_IP_FLAG_TX_ABORT
;
802 } else if (ptq
->state
->flags
& INTEL_PT_ASYNC
) {
803 if (ptq
->state
->to_ip
)
804 ptq
->flags
= PERF_IP_FLAG_BRANCH
| PERF_IP_FLAG_CALL
|
806 PERF_IP_FLAG_INTERRUPT
;
808 ptq
->flags
= PERF_IP_FLAG_BRANCH
|
809 PERF_IP_FLAG_TRACE_END
;
812 if (ptq
->state
->from_ip
)
813 ptq
->flags
= intel_pt_insn_type(ptq
->state
->insn_op
);
815 ptq
->flags
= PERF_IP_FLAG_BRANCH
|
816 PERF_IP_FLAG_TRACE_BEGIN
;
817 if (ptq
->state
->flags
& INTEL_PT_IN_TX
)
818 ptq
->flags
|= PERF_IP_FLAG_IN_TX
;
819 ptq
->insn_len
= ptq
->state
->insn_len
;
823 static int intel_pt_setup_queue(struct intel_pt
*pt
,
824 struct auxtrace_queue
*queue
,
825 unsigned int queue_nr
)
827 struct intel_pt_queue
*ptq
= queue
->priv
;
829 if (list_empty(&queue
->head
))
833 ptq
= intel_pt_alloc_queue(pt
, queue_nr
);
838 if (queue
->cpu
!= -1)
839 ptq
->cpu
= queue
->cpu
;
840 ptq
->tid
= queue
->tid
;
842 if (pt
->sampling_mode
) {
843 if (pt
->timeless_decoding
)
844 ptq
->step_through_buffers
= true;
845 if (pt
->timeless_decoding
|| !pt
->have_sched_switch
)
846 ptq
->use_buffer_pid_tid
= true;
852 ptq
->switch_state
!= INTEL_PT_SS_EXPECTING_SWITCH_EVENT
)) {
853 const struct intel_pt_state
*state
;
856 if (pt
->timeless_decoding
)
859 intel_pt_log("queue %u getting timestamp\n", queue_nr
);
860 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
861 queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
863 state
= intel_pt_decode(ptq
->decoder
);
865 if (state
->err
== INTEL_PT_ERR_NODATA
) {
866 intel_pt_log("queue %u has no timestamp\n",
872 if (state
->timestamp
)
876 ptq
->timestamp
= state
->timestamp
;
877 intel_pt_log("queue %u timestamp 0x%" PRIx64
"\n",
878 queue_nr
, ptq
->timestamp
);
880 ptq
->have_sample
= true;
881 intel_pt_sample_flags(ptq
);
882 ret
= auxtrace_heap__add(&pt
->heap
, queue_nr
, ptq
->timestamp
);
891 static int intel_pt_setup_queues(struct intel_pt
*pt
)
896 for (i
= 0; i
< pt
->queues
.nr_queues
; i
++) {
897 ret
= intel_pt_setup_queue(pt
, &pt
->queues
.queue_array
[i
], i
);
904 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue
*ptq
)
906 struct branch_stack
*bs_src
= ptq
->last_branch_rb
;
907 struct branch_stack
*bs_dst
= ptq
->last_branch
;
910 bs_dst
->nr
= bs_src
->nr
;
915 nr
= ptq
->pt
->synth_opts
.last_branch_sz
- ptq
->last_branch_pos
;
916 memcpy(&bs_dst
->entries
[0],
917 &bs_src
->entries
[ptq
->last_branch_pos
],
918 sizeof(struct branch_entry
) * nr
);
920 if (bs_src
->nr
>= ptq
->pt
->synth_opts
.last_branch_sz
) {
921 memcpy(&bs_dst
->entries
[nr
],
923 sizeof(struct branch_entry
) * ptq
->last_branch_pos
);
927 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue
*ptq
)
929 ptq
->last_branch_pos
= 0;
930 ptq
->last_branch_rb
->nr
= 0;
933 static void intel_pt_update_last_branch_rb(struct intel_pt_queue
*ptq
)
935 const struct intel_pt_state
*state
= ptq
->state
;
936 struct branch_stack
*bs
= ptq
->last_branch_rb
;
937 struct branch_entry
*be
;
939 if (!ptq
->last_branch_pos
)
940 ptq
->last_branch_pos
= ptq
->pt
->synth_opts
.last_branch_sz
;
942 ptq
->last_branch_pos
-= 1;
944 be
= &bs
->entries
[ptq
->last_branch_pos
];
945 be
->from
= state
->from_ip
;
946 be
->to
= state
->to_ip
;
947 be
->flags
.abort
= !!(state
->flags
& INTEL_PT_ABORT_TX
);
948 be
->flags
.in_tx
= !!(state
->flags
& INTEL_PT_IN_TX
);
949 /* No support for mispredict */
950 be
->flags
.mispred
= ptq
->pt
->mispred_all
;
952 if (bs
->nr
< ptq
->pt
->synth_opts
.last_branch_sz
)
956 static int intel_pt_inject_event(union perf_event
*event
,
957 struct perf_sample
*sample
, u64 type
,
960 event
->header
.size
= perf_event__sample_event_size(sample
, type
, 0);
961 return perf_event__synthesize_sample(event
, type
, 0, sample
, swapped
);
964 static int intel_pt_synth_branch_sample(struct intel_pt_queue
*ptq
)
967 struct intel_pt
*pt
= ptq
->pt
;
968 union perf_event
*event
= ptq
->event_buf
;
969 struct perf_sample sample
= { .ip
= 0, };
970 struct dummy_branch_stack
{
972 struct branch_entry entries
;
975 if (pt
->branches_filter
&& !(pt
->branches_filter
& ptq
->flags
))
978 if (pt
->synth_opts
.initial_skip
&&
979 pt
->num_events
++ < pt
->synth_opts
.initial_skip
)
982 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
983 event
->sample
.header
.misc
= PERF_RECORD_MISC_USER
;
984 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
986 if (!pt
->timeless_decoding
)
987 sample
.time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
989 sample
.cpumode
= PERF_RECORD_MISC_USER
;
990 sample
.ip
= ptq
->state
->from_ip
;
991 sample
.pid
= ptq
->pid
;
992 sample
.tid
= ptq
->tid
;
993 sample
.addr
= ptq
->state
->to_ip
;
994 sample
.id
= ptq
->pt
->branches_id
;
995 sample
.stream_id
= ptq
->pt
->branches_id
;
997 sample
.cpu
= ptq
->cpu
;
998 sample
.flags
= ptq
->flags
;
999 sample
.insn_len
= ptq
->insn_len
;
1002 * perf report cannot handle events without a branch stack when using
1003 * SORT_MODE__BRANCH so make a dummy one.
1005 if (pt
->synth_opts
.last_branch
&& sort__mode
== SORT_MODE__BRANCH
) {
1006 dummy_bs
= (struct dummy_branch_stack
){
1013 sample
.branch_stack
= (struct branch_stack
*)&dummy_bs
;
1016 if (pt
->synth_opts
.inject
) {
1017 ret
= intel_pt_inject_event(event
, &sample
,
1018 pt
->branches_sample_type
,
1019 pt
->synth_needs_swap
);
1024 ret
= perf_session__deliver_synth_event(pt
->session
, event
, &sample
);
1026 pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
1032 static int intel_pt_synth_instruction_sample(struct intel_pt_queue
*ptq
)
1035 struct intel_pt
*pt
= ptq
->pt
;
1036 union perf_event
*event
= ptq
->event_buf
;
1037 struct perf_sample sample
= { .ip
= 0, };
1039 if (pt
->synth_opts
.initial_skip
&&
1040 pt
->num_events
++ < pt
->synth_opts
.initial_skip
)
1043 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
1044 event
->sample
.header
.misc
= PERF_RECORD_MISC_USER
;
1045 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
1047 if (!pt
->timeless_decoding
)
1048 sample
.time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
1050 sample
.cpumode
= PERF_RECORD_MISC_USER
;
1051 sample
.ip
= ptq
->state
->from_ip
;
1052 sample
.pid
= ptq
->pid
;
1053 sample
.tid
= ptq
->tid
;
1054 sample
.addr
= ptq
->state
->to_ip
;
1055 sample
.id
= ptq
->pt
->instructions_id
;
1056 sample
.stream_id
= ptq
->pt
->instructions_id
;
1057 sample
.period
= ptq
->state
->tot_insn_cnt
- ptq
->last_insn_cnt
;
1058 sample
.cpu
= ptq
->cpu
;
1059 sample
.flags
= ptq
->flags
;
1060 sample
.insn_len
= ptq
->insn_len
;
1062 ptq
->last_insn_cnt
= ptq
->state
->tot_insn_cnt
;
1064 if (pt
->synth_opts
.callchain
) {
1065 thread_stack__sample(ptq
->thread
, ptq
->chain
,
1066 pt
->synth_opts
.callchain_sz
, sample
.ip
);
1067 sample
.callchain
= ptq
->chain
;
1070 if (pt
->synth_opts
.last_branch
) {
1071 intel_pt_copy_last_branch_rb(ptq
);
1072 sample
.branch_stack
= ptq
->last_branch
;
1075 if (pt
->synth_opts
.inject
) {
1076 ret
= intel_pt_inject_event(event
, &sample
,
1077 pt
->instructions_sample_type
,
1078 pt
->synth_needs_swap
);
1083 ret
= perf_session__deliver_synth_event(pt
->session
, event
, &sample
);
1085 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
1088 if (pt
->synth_opts
.last_branch
)
1089 intel_pt_reset_last_branch_rb(ptq
);
1094 static int intel_pt_synth_transaction_sample(struct intel_pt_queue
*ptq
)
1097 struct intel_pt
*pt
= ptq
->pt
;
1098 union perf_event
*event
= ptq
->event_buf
;
1099 struct perf_sample sample
= { .ip
= 0, };
1101 if (pt
->synth_opts
.initial_skip
&&
1102 pt
->num_events
++ < pt
->synth_opts
.initial_skip
)
1105 event
->sample
.header
.type
= PERF_RECORD_SAMPLE
;
1106 event
->sample
.header
.misc
= PERF_RECORD_MISC_USER
;
1107 event
->sample
.header
.size
= sizeof(struct perf_event_header
);
1109 if (!pt
->timeless_decoding
)
1110 sample
.time
= tsc_to_perf_time(ptq
->timestamp
, &pt
->tc
);
1112 sample
.cpumode
= PERF_RECORD_MISC_USER
;
1113 sample
.ip
= ptq
->state
->from_ip
;
1114 sample
.pid
= ptq
->pid
;
1115 sample
.tid
= ptq
->tid
;
1116 sample
.addr
= ptq
->state
->to_ip
;
1117 sample
.id
= ptq
->pt
->transactions_id
;
1118 sample
.stream_id
= ptq
->pt
->transactions_id
;
1120 sample
.cpu
= ptq
->cpu
;
1121 sample
.flags
= ptq
->flags
;
1122 sample
.insn_len
= ptq
->insn_len
;
1124 if (pt
->synth_opts
.callchain
) {
1125 thread_stack__sample(ptq
->thread
, ptq
->chain
,
1126 pt
->synth_opts
.callchain_sz
, sample
.ip
);
1127 sample
.callchain
= ptq
->chain
;
1130 if (pt
->synth_opts
.last_branch
) {
1131 intel_pt_copy_last_branch_rb(ptq
);
1132 sample
.branch_stack
= ptq
->last_branch
;
1135 if (pt
->synth_opts
.inject
) {
1136 ret
= intel_pt_inject_event(event
, &sample
,
1137 pt
->transactions_sample_type
,
1138 pt
->synth_needs_swap
);
1143 ret
= perf_session__deliver_synth_event(pt
->session
, event
, &sample
);
1145 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1148 if (pt
->synth_opts
.last_branch
)
1149 intel_pt_reset_last_branch_rb(ptq
);
1154 static int intel_pt_synth_error(struct intel_pt
*pt
, int code
, int cpu
,
1155 pid_t pid
, pid_t tid
, u64 ip
)
1157 union perf_event event
;
1158 char msg
[MAX_AUXTRACE_ERROR_MSG
];
1161 intel_pt__strerror(code
, msg
, MAX_AUXTRACE_ERROR_MSG
);
1163 auxtrace_synth_error(&event
.auxtrace_error
, PERF_AUXTRACE_ERROR_ITRACE
,
1164 code
, cpu
, pid
, tid
, ip
, msg
);
1166 err
= perf_session__deliver_synth_event(pt
->session
, &event
, NULL
);
1168 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1174 static int intel_pt_next_tid(struct intel_pt
*pt
, struct intel_pt_queue
*ptq
)
1176 struct auxtrace_queue
*queue
;
1177 pid_t tid
= ptq
->next_tid
;
1183 intel_pt_log("switch: cpu %d tid %d\n", ptq
->cpu
, tid
);
1185 err
= machine__set_current_tid(pt
->machine
, ptq
->cpu
, -1, tid
);
1187 queue
= &pt
->queues
.queue_array
[ptq
->queue_nr
];
1188 intel_pt_set_pid_tid_cpu(pt
, queue
);
1195 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue
*ptq
, u64 ip
)
1197 struct intel_pt
*pt
= ptq
->pt
;
1199 return ip
== pt
->switch_ip
&&
1200 (ptq
->flags
& PERF_IP_FLAG_BRANCH
) &&
1201 !(ptq
->flags
& (PERF_IP_FLAG_CONDITIONAL
| PERF_IP_FLAG_ASYNC
|
1202 PERF_IP_FLAG_INTERRUPT
| PERF_IP_FLAG_TX_ABORT
));
1205 static int intel_pt_sample(struct intel_pt_queue
*ptq
)
1207 const struct intel_pt_state
*state
= ptq
->state
;
1208 struct intel_pt
*pt
= ptq
->pt
;
1211 if (!ptq
->have_sample
)
1214 ptq
->have_sample
= false;
1216 if (pt
->sample_instructions
&&
1217 (state
->type
& INTEL_PT_INSTRUCTION
) &&
1218 (!pt
->synth_opts
.initial_skip
||
1219 pt
->num_events
++ >= pt
->synth_opts
.initial_skip
)) {
1220 err
= intel_pt_synth_instruction_sample(ptq
);
1225 if (pt
->sample_transactions
&&
1226 (state
->type
& INTEL_PT_TRANSACTION
) &&
1227 (!pt
->synth_opts
.initial_skip
||
1228 pt
->num_events
++ >= pt
->synth_opts
.initial_skip
)) {
1229 err
= intel_pt_synth_transaction_sample(ptq
);
1234 if (!(state
->type
& INTEL_PT_BRANCH
))
1237 if (pt
->synth_opts
.callchain
)
1238 thread_stack__event(ptq
->thread
, ptq
->flags
, state
->from_ip
,
1239 state
->to_ip
, ptq
->insn_len
,
1242 thread_stack__set_trace_nr(ptq
->thread
, state
->trace_nr
);
1244 if (pt
->sample_branches
) {
1245 err
= intel_pt_synth_branch_sample(ptq
);
1250 if (pt
->synth_opts
.last_branch
)
1251 intel_pt_update_last_branch_rb(ptq
);
1253 if (!pt
->sync_switch
)
1256 if (intel_pt_is_switch_ip(ptq
, state
->to_ip
)) {
1257 switch (ptq
->switch_state
) {
1258 case INTEL_PT_SS_UNKNOWN
:
1259 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
1260 err
= intel_pt_next_tid(pt
, ptq
);
1263 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
1266 ptq
->switch_state
= INTEL_PT_SS_EXPECTING_SWITCH_EVENT
;
1269 } else if (!state
->to_ip
) {
1270 ptq
->switch_state
= INTEL_PT_SS_NOT_TRACING
;
1271 } else if (ptq
->switch_state
== INTEL_PT_SS_NOT_TRACING
) {
1272 ptq
->switch_state
= INTEL_PT_SS_UNKNOWN
;
1273 } else if (ptq
->switch_state
== INTEL_PT_SS_UNKNOWN
&&
1274 state
->to_ip
== pt
->ptss_ip
&&
1275 (ptq
->flags
& PERF_IP_FLAG_CALL
)) {
1276 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
1282 static u64
intel_pt_switch_ip(struct intel_pt
*pt
, u64
*ptss_ip
)
1284 struct machine
*machine
= pt
->machine
;
1286 struct symbol
*sym
, *start
;
1287 u64 ip
, switch_ip
= 0;
1293 map
= machine__kernel_map(machine
);
1297 if (map__load(map
, machine
->symbol_filter
))
1300 start
= dso__first_symbol(map
->dso
, MAP__FUNCTION
);
1302 for (sym
= start
; sym
; sym
= dso__next_symbol(sym
)) {
1303 if (sym
->binding
== STB_GLOBAL
&&
1304 !strcmp(sym
->name
, "__switch_to")) {
1305 ip
= map
->unmap_ip(map
, sym
->start
);
1306 if (ip
>= map
->start
&& ip
< map
->end
) {
1313 if (!switch_ip
|| !ptss_ip
)
1316 if (pt
->have_sched_switch
== 1)
1317 ptss
= "perf_trace_sched_switch";
1319 ptss
= "__perf_event_task_sched_out";
1321 for (sym
= start
; sym
; sym
= dso__next_symbol(sym
)) {
1322 if (!strcmp(sym
->name
, ptss
)) {
1323 ip
= map
->unmap_ip(map
, sym
->start
);
1324 if (ip
>= map
->start
&& ip
< map
->end
) {
1334 static int intel_pt_run_decoder(struct intel_pt_queue
*ptq
, u64
*timestamp
)
1336 const struct intel_pt_state
*state
= ptq
->state
;
1337 struct intel_pt
*pt
= ptq
->pt
;
1340 if (!pt
->kernel_start
) {
1341 pt
->kernel_start
= machine__kernel_start(pt
->machine
);
1342 if (pt
->per_cpu_mmaps
&&
1343 (pt
->have_sched_switch
== 1 || pt
->have_sched_switch
== 3) &&
1344 !pt
->timeless_decoding
&& intel_pt_tracing_kernel(pt
) &&
1345 !pt
->sampling_mode
) {
1346 pt
->switch_ip
= intel_pt_switch_ip(pt
, &pt
->ptss_ip
);
1347 if (pt
->switch_ip
) {
1348 intel_pt_log("switch_ip: %"PRIx64
" ptss_ip: %"PRIx64
"\n",
1349 pt
->switch_ip
, pt
->ptss_ip
);
1350 pt
->sync_switch
= true;
1355 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1356 ptq
->queue_nr
, ptq
->cpu
, ptq
->pid
, ptq
->tid
);
1358 err
= intel_pt_sample(ptq
);
1362 state
= intel_pt_decode(ptq
->decoder
);
1364 if (state
->err
== INTEL_PT_ERR_NODATA
)
1366 if (pt
->sync_switch
&&
1367 state
->from_ip
>= pt
->kernel_start
) {
1368 pt
->sync_switch
= false;
1369 intel_pt_next_tid(pt
, ptq
);
1371 if (pt
->synth_opts
.errors
) {
1372 err
= intel_pt_synth_error(pt
, state
->err
,
1383 ptq
->have_sample
= true;
1384 intel_pt_sample_flags(ptq
);
1386 /* Use estimated TSC upon return to user space */
1388 (state
->from_ip
>= pt
->kernel_start
|| !state
->from_ip
) &&
1389 state
->to_ip
&& state
->to_ip
< pt
->kernel_start
) {
1390 intel_pt_log("TSC %"PRIx64
" est. TSC %"PRIx64
"\n",
1391 state
->timestamp
, state
->est_timestamp
);
1392 ptq
->timestamp
= state
->est_timestamp
;
1393 /* Use estimated TSC in unknown switch state */
1394 } else if (pt
->sync_switch
&&
1395 ptq
->switch_state
== INTEL_PT_SS_UNKNOWN
&&
1396 intel_pt_is_switch_ip(ptq
, state
->to_ip
) &&
1397 ptq
->next_tid
== -1) {
1398 intel_pt_log("TSC %"PRIx64
" est. TSC %"PRIx64
"\n",
1399 state
->timestamp
, state
->est_timestamp
);
1400 ptq
->timestamp
= state
->est_timestamp
;
1401 } else if (state
->timestamp
> ptq
->timestamp
) {
1402 ptq
->timestamp
= state
->timestamp
;
1405 if (!pt
->timeless_decoding
&& ptq
->timestamp
>= *timestamp
) {
1406 *timestamp
= ptq
->timestamp
;
1413 static inline int intel_pt_update_queues(struct intel_pt
*pt
)
1415 if (pt
->queues
.new_data
) {
1416 pt
->queues
.new_data
= false;
1417 return intel_pt_setup_queues(pt
);
1422 static int intel_pt_process_queues(struct intel_pt
*pt
, u64 timestamp
)
1424 unsigned int queue_nr
;
1429 struct auxtrace_queue
*queue
;
1430 struct intel_pt_queue
*ptq
;
1432 if (!pt
->heap
.heap_cnt
)
1435 if (pt
->heap
.heap_array
[0].ordinal
>= timestamp
)
1438 queue_nr
= pt
->heap
.heap_array
[0].queue_nr
;
1439 queue
= &pt
->queues
.queue_array
[queue_nr
];
1442 intel_pt_log("queue %u processing 0x%" PRIx64
" to 0x%" PRIx64
"\n",
1443 queue_nr
, pt
->heap
.heap_array
[0].ordinal
,
1446 auxtrace_heap__pop(&pt
->heap
);
1448 if (pt
->heap
.heap_cnt
) {
1449 ts
= pt
->heap
.heap_array
[0].ordinal
+ 1;
1456 intel_pt_set_pid_tid_cpu(pt
, queue
);
1458 ret
= intel_pt_run_decoder(ptq
, &ts
);
1461 auxtrace_heap__add(&pt
->heap
, queue_nr
, ts
);
1466 ret
= auxtrace_heap__add(&pt
->heap
, queue_nr
, ts
);
1470 ptq
->on_heap
= false;
1477 static int intel_pt_process_timeless_queues(struct intel_pt
*pt
, pid_t tid
,
1480 struct auxtrace_queues
*queues
= &pt
->queues
;
1484 for (i
= 0; i
< queues
->nr_queues
; i
++) {
1485 struct auxtrace_queue
*queue
= &pt
->queues
.queue_array
[i
];
1486 struct intel_pt_queue
*ptq
= queue
->priv
;
1488 if (ptq
&& (tid
== -1 || ptq
->tid
== tid
)) {
1490 intel_pt_set_pid_tid_cpu(pt
, queue
);
1491 intel_pt_run_decoder(ptq
, &ts
);
1497 static int intel_pt_lost(struct intel_pt
*pt
, struct perf_sample
*sample
)
1499 return intel_pt_synth_error(pt
, INTEL_PT_ERR_LOST
, sample
->cpu
,
1500 sample
->pid
, sample
->tid
, 0);
1503 static struct intel_pt_queue
*intel_pt_cpu_to_ptq(struct intel_pt
*pt
, int cpu
)
1507 if (cpu
< 0 || !pt
->queues
.nr_queues
)
1510 if ((unsigned)cpu
>= pt
->queues
.nr_queues
)
1511 i
= pt
->queues
.nr_queues
- 1;
1515 if (pt
->queues
.queue_array
[i
].cpu
== cpu
)
1516 return pt
->queues
.queue_array
[i
].priv
;
1518 for (j
= 0; i
> 0; j
++) {
1519 if (pt
->queues
.queue_array
[--i
].cpu
== cpu
)
1520 return pt
->queues
.queue_array
[i
].priv
;
1523 for (; j
< pt
->queues
.nr_queues
; j
++) {
1524 if (pt
->queues
.queue_array
[j
].cpu
== cpu
)
1525 return pt
->queues
.queue_array
[j
].priv
;
1531 static int intel_pt_sync_switch(struct intel_pt
*pt
, int cpu
, pid_t tid
,
1534 struct intel_pt_queue
*ptq
;
1537 if (!pt
->sync_switch
)
1540 ptq
= intel_pt_cpu_to_ptq(pt
, cpu
);
1544 switch (ptq
->switch_state
) {
1545 case INTEL_PT_SS_NOT_TRACING
:
1548 case INTEL_PT_SS_UNKNOWN
:
1549 case INTEL_PT_SS_TRACING
:
1550 ptq
->next_tid
= tid
;
1551 ptq
->switch_state
= INTEL_PT_SS_EXPECTING_SWITCH_IP
;
1553 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT
:
1554 if (!ptq
->on_heap
) {
1555 ptq
->timestamp
= perf_time_to_tsc(timestamp
,
1557 err
= auxtrace_heap__add(&pt
->heap
, ptq
->queue_nr
,
1561 ptq
->on_heap
= true;
1563 ptq
->switch_state
= INTEL_PT_SS_TRACING
;
1565 case INTEL_PT_SS_EXPECTING_SWITCH_IP
:
1566 ptq
->next_tid
= tid
;
1567 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu
);
1576 static int intel_pt_process_switch(struct intel_pt
*pt
,
1577 struct perf_sample
*sample
)
1579 struct perf_evsel
*evsel
;
1583 evsel
= perf_evlist__id2evsel(pt
->session
->evlist
, sample
->id
);
1584 if (evsel
!= pt
->switch_evsel
)
1587 tid
= perf_evsel__intval(evsel
, sample
, "next_pid");
1590 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1591 cpu
, tid
, sample
->time
, perf_time_to_tsc(sample
->time
,
1594 ret
= intel_pt_sync_switch(pt
, cpu
, tid
, sample
->time
);
1598 return machine__set_current_tid(pt
->machine
, cpu
, -1, tid
);
1601 static int intel_pt_context_switch(struct intel_pt
*pt
, union perf_event
*event
,
1602 struct perf_sample
*sample
)
1604 bool out
= event
->header
.misc
& PERF_RECORD_MISC_SWITCH_OUT
;
1610 if (pt
->have_sched_switch
== 3) {
1613 if (event
->header
.type
!= PERF_RECORD_SWITCH_CPU_WIDE
) {
1614 pr_err("Expecting CPU-wide context switch event\n");
1617 pid
= event
->context_switch
.next_prev_pid
;
1618 tid
= event
->context_switch
.next_prev_tid
;
1627 pr_err("context_switch event has no tid\n");
1631 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1632 cpu
, pid
, tid
, sample
->time
, perf_time_to_tsc(sample
->time
,
1635 ret
= intel_pt_sync_switch(pt
, cpu
, tid
, sample
->time
);
1639 return machine__set_current_tid(pt
->machine
, cpu
, pid
, tid
);
1642 static int intel_pt_process_itrace_start(struct intel_pt
*pt
,
1643 union perf_event
*event
,
1644 struct perf_sample
*sample
)
1646 if (!pt
->per_cpu_mmaps
)
1649 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1650 sample
->cpu
, event
->itrace_start
.pid
,
1651 event
->itrace_start
.tid
, sample
->time
,
1652 perf_time_to_tsc(sample
->time
, &pt
->tc
));
1654 return machine__set_current_tid(pt
->machine
, sample
->cpu
,
1655 event
->itrace_start
.pid
,
1656 event
->itrace_start
.tid
);
1659 static int intel_pt_process_event(struct perf_session
*session
,
1660 union perf_event
*event
,
1661 struct perf_sample
*sample
,
1662 struct perf_tool
*tool
)
1664 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1672 if (!tool
->ordered_events
) {
1673 pr_err("Intel Processor Trace requires ordered events\n");
1677 if (sample
->time
&& sample
->time
!= (u64
)-1)
1678 timestamp
= perf_time_to_tsc(sample
->time
, &pt
->tc
);
1682 if (timestamp
|| pt
->timeless_decoding
) {
1683 err
= intel_pt_update_queues(pt
);
1688 if (pt
->timeless_decoding
) {
1689 if (event
->header
.type
== PERF_RECORD_EXIT
) {
1690 err
= intel_pt_process_timeless_queues(pt
,
1694 } else if (timestamp
) {
1695 err
= intel_pt_process_queues(pt
, timestamp
);
1700 if (event
->header
.type
== PERF_RECORD_AUX
&&
1701 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
) &&
1702 pt
->synth_opts
.errors
) {
1703 err
= intel_pt_lost(pt
, sample
);
1708 if (pt
->switch_evsel
&& event
->header
.type
== PERF_RECORD_SAMPLE
)
1709 err
= intel_pt_process_switch(pt
, sample
);
1710 else if (event
->header
.type
== PERF_RECORD_ITRACE_START
)
1711 err
= intel_pt_process_itrace_start(pt
, event
, sample
);
1712 else if (event
->header
.type
== PERF_RECORD_SWITCH
||
1713 event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
)
1714 err
= intel_pt_context_switch(pt
, event
, sample
);
1716 intel_pt_log("event %s (%u): cpu %d time %"PRIu64
" tsc %#"PRIx64
"\n",
1717 perf_event__name(event
->header
.type
), event
->header
.type
,
1718 sample
->cpu
, sample
->time
, timestamp
);
1723 static int intel_pt_flush(struct perf_session
*session
, struct perf_tool
*tool
)
1725 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1732 if (!tool
->ordered_events
)
1735 ret
= intel_pt_update_queues(pt
);
1739 if (pt
->timeless_decoding
)
1740 return intel_pt_process_timeless_queues(pt
, -1,
1743 return intel_pt_process_queues(pt
, MAX_TIMESTAMP
);
1746 static void intel_pt_free_events(struct perf_session
*session
)
1748 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1750 struct auxtrace_queues
*queues
= &pt
->queues
;
1753 for (i
= 0; i
< queues
->nr_queues
; i
++) {
1754 intel_pt_free_queue(queues
->queue_array
[i
].priv
);
1755 queues
->queue_array
[i
].priv
= NULL
;
1757 intel_pt_log_disable();
1758 auxtrace_queues__free(queues
);
1761 static void intel_pt_free(struct perf_session
*session
)
1763 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1766 auxtrace_heap__free(&pt
->heap
);
1767 intel_pt_free_events(session
);
1768 session
->auxtrace
= NULL
;
1769 thread__put(pt
->unknown_thread
);
1773 static int intel_pt_process_auxtrace_event(struct perf_session
*session
,
1774 union perf_event
*event
,
1775 struct perf_tool
*tool __maybe_unused
)
1777 struct intel_pt
*pt
= container_of(session
->auxtrace
, struct intel_pt
,
1780 if (pt
->sampling_mode
)
1783 if (!pt
->data_queued
) {
1784 struct auxtrace_buffer
*buffer
;
1786 int fd
= perf_data_file__fd(session
->file
);
1789 if (perf_data_file__is_pipe(session
->file
)) {
1792 data_offset
= lseek(fd
, 0, SEEK_CUR
);
1793 if (data_offset
== -1)
1797 err
= auxtrace_queues__add_event(&pt
->queues
, session
, event
,
1798 data_offset
, &buffer
);
1802 /* Dump here now we have copied a piped trace out of the pipe */
1804 if (auxtrace_buffer__get_data(buffer
, fd
)) {
1805 intel_pt_dump_event(pt
, buffer
->data
,
1807 auxtrace_buffer__put_data(buffer
);
1815 struct intel_pt_synth
{
1816 struct perf_tool dummy_tool
;
1817 struct perf_session
*session
;
1820 static int intel_pt_event_synth(struct perf_tool
*tool
,
1821 union perf_event
*event
,
1822 struct perf_sample
*sample __maybe_unused
,
1823 struct machine
*machine __maybe_unused
)
1825 struct intel_pt_synth
*intel_pt_synth
=
1826 container_of(tool
, struct intel_pt_synth
, dummy_tool
);
1828 return perf_session__deliver_synth_event(intel_pt_synth
->session
, event
,
1832 static int intel_pt_synth_event(struct perf_session
*session
,
1833 struct perf_event_attr
*attr
, u64 id
)
1835 struct intel_pt_synth intel_pt_synth
;
1837 memset(&intel_pt_synth
, 0, sizeof(struct intel_pt_synth
));
1838 intel_pt_synth
.session
= session
;
1840 return perf_event__synthesize_attr(&intel_pt_synth
.dummy_tool
, attr
, 1,
1841 &id
, intel_pt_event_synth
);
1844 static int intel_pt_synth_events(struct intel_pt
*pt
,
1845 struct perf_session
*session
)
1847 struct perf_evlist
*evlist
= session
->evlist
;
1848 struct perf_evsel
*evsel
;
1849 struct perf_event_attr attr
;
1854 evlist__for_each_entry(evlist
, evsel
) {
1855 if (evsel
->attr
.type
== pt
->pmu_type
&& evsel
->ids
) {
1862 pr_debug("There are no selected events with Intel Processor Trace data\n");
1866 memset(&attr
, 0, sizeof(struct perf_event_attr
));
1867 attr
.size
= sizeof(struct perf_event_attr
);
1868 attr
.type
= PERF_TYPE_HARDWARE
;
1869 attr
.sample_type
= evsel
->attr
.sample_type
& PERF_SAMPLE_MASK
;
1870 attr
.sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
|
1872 if (pt
->timeless_decoding
)
1873 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_TIME
;
1875 attr
.sample_type
|= PERF_SAMPLE_TIME
;
1876 if (!pt
->per_cpu_mmaps
)
1877 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CPU
;
1878 attr
.exclude_user
= evsel
->attr
.exclude_user
;
1879 attr
.exclude_kernel
= evsel
->attr
.exclude_kernel
;
1880 attr
.exclude_hv
= evsel
->attr
.exclude_hv
;
1881 attr
.exclude_host
= evsel
->attr
.exclude_host
;
1882 attr
.exclude_guest
= evsel
->attr
.exclude_guest
;
1883 attr
.sample_id_all
= evsel
->attr
.sample_id_all
;
1884 attr
.read_format
= evsel
->attr
.read_format
;
1886 id
= evsel
->id
[0] + 1000000000;
1890 if (pt
->synth_opts
.instructions
) {
1891 attr
.config
= PERF_COUNT_HW_INSTRUCTIONS
;
1892 if (pt
->synth_opts
.period_type
== PERF_ITRACE_PERIOD_NANOSECS
)
1893 attr
.sample_period
=
1894 intel_pt_ns_to_ticks(pt
, pt
->synth_opts
.period
);
1896 attr
.sample_period
= pt
->synth_opts
.period
;
1897 pt
->instructions_sample_period
= attr
.sample_period
;
1898 if (pt
->synth_opts
.callchain
)
1899 attr
.sample_type
|= PERF_SAMPLE_CALLCHAIN
;
1900 if (pt
->synth_opts
.last_branch
)
1901 attr
.sample_type
|= PERF_SAMPLE_BRANCH_STACK
;
1902 pr_debug("Synthesizing 'instructions' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
1903 id
, (u64
)attr
.sample_type
);
1904 err
= intel_pt_synth_event(session
, &attr
, id
);
1906 pr_err("%s: failed to synthesize 'instructions' event type\n",
1910 pt
->sample_instructions
= true;
1911 pt
->instructions_sample_type
= attr
.sample_type
;
1912 pt
->instructions_id
= id
;
1916 if (pt
->synth_opts
.transactions
) {
1917 attr
.config
= PERF_COUNT_HW_INSTRUCTIONS
;
1918 attr
.sample_period
= 1;
1919 if (pt
->synth_opts
.callchain
)
1920 attr
.sample_type
|= PERF_SAMPLE_CALLCHAIN
;
1921 if (pt
->synth_opts
.last_branch
)
1922 attr
.sample_type
|= PERF_SAMPLE_BRANCH_STACK
;
1923 pr_debug("Synthesizing 'transactions' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
1924 id
, (u64
)attr
.sample_type
);
1925 err
= intel_pt_synth_event(session
, &attr
, id
);
1927 pr_err("%s: failed to synthesize 'transactions' event type\n",
1931 pt
->sample_transactions
= true;
1932 pt
->transactions_id
= id
;
1934 evlist__for_each_entry(evlist
, evsel
) {
1935 if (evsel
->id
&& evsel
->id
[0] == pt
->transactions_id
) {
1937 zfree(&evsel
->name
);
1938 evsel
->name
= strdup("transactions");
1944 if (pt
->synth_opts
.branches
) {
1945 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
1946 attr
.sample_period
= 1;
1947 attr
.sample_type
|= PERF_SAMPLE_ADDR
;
1948 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_CALLCHAIN
;
1949 attr
.sample_type
&= ~(u64
)PERF_SAMPLE_BRANCH_STACK
;
1950 pr_debug("Synthesizing 'branches' event with id %" PRIu64
" sample type %#" PRIx64
"\n",
1951 id
, (u64
)attr
.sample_type
);
1952 err
= intel_pt_synth_event(session
, &attr
, id
);
1954 pr_err("%s: failed to synthesize 'branches' event type\n",
1958 pt
->sample_branches
= true;
1959 pt
->branches_sample_type
= attr
.sample_type
;
1960 pt
->branches_id
= id
;
1963 pt
->synth_needs_swap
= evsel
->needs_swap
;
1968 static struct perf_evsel
*intel_pt_find_sched_switch(struct perf_evlist
*evlist
)
1970 struct perf_evsel
*evsel
;
1972 evlist__for_each_entry_reverse(evlist
, evsel
) {
1973 const char *name
= perf_evsel__name(evsel
);
1975 if (!strcmp(name
, "sched:sched_switch"))
1982 static bool intel_pt_find_switch(struct perf_evlist
*evlist
)
1984 struct perf_evsel
*evsel
;
1986 evlist__for_each_entry(evlist
, evsel
) {
1987 if (evsel
->attr
.context_switch
)
1994 static int intel_pt_perf_config(const char *var
, const char *value
, void *data
)
1996 struct intel_pt
*pt
= data
;
1998 if (!strcmp(var
, "intel-pt.mispred-all"))
1999 pt
->mispred_all
= perf_config_bool(var
, value
);
2004 static const char * const intel_pt_info_fmts
[] = {
2005 [INTEL_PT_PMU_TYPE
] = " PMU Type %"PRId64
"\n",
2006 [INTEL_PT_TIME_SHIFT
] = " Time Shift %"PRIu64
"\n",
2007 [INTEL_PT_TIME_MULT
] = " Time Muliplier %"PRIu64
"\n",
2008 [INTEL_PT_TIME_ZERO
] = " Time Zero %"PRIu64
"\n",
2009 [INTEL_PT_CAP_USER_TIME_ZERO
] = " Cap Time Zero %"PRId64
"\n",
2010 [INTEL_PT_TSC_BIT
] = " TSC bit %#"PRIx64
"\n",
2011 [INTEL_PT_NORETCOMP_BIT
] = " NoRETComp bit %#"PRIx64
"\n",
2012 [INTEL_PT_HAVE_SCHED_SWITCH
] = " Have sched_switch %"PRId64
"\n",
2013 [INTEL_PT_SNAPSHOT_MODE
] = " Snapshot mode %"PRId64
"\n",
2014 [INTEL_PT_PER_CPU_MMAPS
] = " Per-cpu maps %"PRId64
"\n",
2015 [INTEL_PT_MTC_BIT
] = " MTC bit %#"PRIx64
"\n",
2016 [INTEL_PT_TSC_CTC_N
] = " TSC:CTC numerator %"PRIu64
"\n",
2017 [INTEL_PT_TSC_CTC_D
] = " TSC:CTC denominator %"PRIu64
"\n",
2018 [INTEL_PT_CYC_BIT
] = " CYC bit %#"PRIx64
"\n",
2021 static void intel_pt_print_info(u64
*arr
, int start
, int finish
)
2028 for (i
= start
; i
<= finish
; i
++)
2029 fprintf(stdout
, intel_pt_info_fmts
[i
], arr
[i
]);
2032 int intel_pt_process_auxtrace_info(union perf_event
*event
,
2033 struct perf_session
*session
)
2035 struct auxtrace_info_event
*auxtrace_info
= &event
->auxtrace_info
;
2036 size_t min_sz
= sizeof(u64
) * INTEL_PT_PER_CPU_MMAPS
;
2037 struct intel_pt
*pt
;
2040 if (auxtrace_info
->header
.size
< sizeof(struct auxtrace_info_event
) +
2044 pt
= zalloc(sizeof(struct intel_pt
));
2048 perf_config(intel_pt_perf_config
, pt
);
2050 err
= auxtrace_queues__init(&pt
->queues
);
2054 intel_pt_log_set_name(INTEL_PT_PMU_NAME
);
2056 pt
->session
= session
;
2057 pt
->machine
= &session
->machines
.host
; /* No kvm support */
2058 pt
->auxtrace_type
= auxtrace_info
->type
;
2059 pt
->pmu_type
= auxtrace_info
->priv
[INTEL_PT_PMU_TYPE
];
2060 pt
->tc
.time_shift
= auxtrace_info
->priv
[INTEL_PT_TIME_SHIFT
];
2061 pt
->tc
.time_mult
= auxtrace_info
->priv
[INTEL_PT_TIME_MULT
];
2062 pt
->tc
.time_zero
= auxtrace_info
->priv
[INTEL_PT_TIME_ZERO
];
2063 pt
->cap_user_time_zero
= auxtrace_info
->priv
[INTEL_PT_CAP_USER_TIME_ZERO
];
2064 pt
->tsc_bit
= auxtrace_info
->priv
[INTEL_PT_TSC_BIT
];
2065 pt
->noretcomp_bit
= auxtrace_info
->priv
[INTEL_PT_NORETCOMP_BIT
];
2066 pt
->have_sched_switch
= auxtrace_info
->priv
[INTEL_PT_HAVE_SCHED_SWITCH
];
2067 pt
->snapshot_mode
= auxtrace_info
->priv
[INTEL_PT_SNAPSHOT_MODE
];
2068 pt
->per_cpu_mmaps
= auxtrace_info
->priv
[INTEL_PT_PER_CPU_MMAPS
];
2069 intel_pt_print_info(&auxtrace_info
->priv
[0], INTEL_PT_PMU_TYPE
,
2070 INTEL_PT_PER_CPU_MMAPS
);
2072 if (auxtrace_info
->header
.size
>= sizeof(struct auxtrace_info_event
) +
2073 (sizeof(u64
) * INTEL_PT_CYC_BIT
)) {
2074 pt
->mtc_bit
= auxtrace_info
->priv
[INTEL_PT_MTC_BIT
];
2075 pt
->mtc_freq_bits
= auxtrace_info
->priv
[INTEL_PT_MTC_FREQ_BITS
];
2076 pt
->tsc_ctc_ratio_n
= auxtrace_info
->priv
[INTEL_PT_TSC_CTC_N
];
2077 pt
->tsc_ctc_ratio_d
= auxtrace_info
->priv
[INTEL_PT_TSC_CTC_D
];
2078 pt
->cyc_bit
= auxtrace_info
->priv
[INTEL_PT_CYC_BIT
];
2079 intel_pt_print_info(&auxtrace_info
->priv
[0], INTEL_PT_MTC_BIT
,
2083 pt
->timeless_decoding
= intel_pt_timeless_decoding(pt
);
2084 pt
->have_tsc
= intel_pt_have_tsc(pt
);
2085 pt
->sampling_mode
= false;
2086 pt
->est_tsc
= !pt
->timeless_decoding
;
2088 pt
->unknown_thread
= thread__new(999999999, 999999999);
2089 if (!pt
->unknown_thread
) {
2091 goto err_free_queues
;
2095 * Since this thread will not be kept in any rbtree not in a
2096 * list, initialize its list node so that at thread__put() the
2097 * current thread lifetime assuption is kept and we don't segfault
2098 * at list_del_init().
2100 INIT_LIST_HEAD(&pt
->unknown_thread
->node
);
2102 err
= thread__set_comm(pt
->unknown_thread
, "unknown", 0);
2104 goto err_delete_thread
;
2105 if (thread__init_map_groups(pt
->unknown_thread
, pt
->machine
)) {
2107 goto err_delete_thread
;
2110 pt
->auxtrace
.process_event
= intel_pt_process_event
;
2111 pt
->auxtrace
.process_auxtrace_event
= intel_pt_process_auxtrace_event
;
2112 pt
->auxtrace
.flush_events
= intel_pt_flush
;
2113 pt
->auxtrace
.free_events
= intel_pt_free_events
;
2114 pt
->auxtrace
.free
= intel_pt_free
;
2115 session
->auxtrace
= &pt
->auxtrace
;
2120 if (pt
->have_sched_switch
== 1) {
2121 pt
->switch_evsel
= intel_pt_find_sched_switch(session
->evlist
);
2122 if (!pt
->switch_evsel
) {
2123 pr_err("%s: missing sched_switch event\n", __func__
);
2124 goto err_delete_thread
;
2126 } else if (pt
->have_sched_switch
== 2 &&
2127 !intel_pt_find_switch(session
->evlist
)) {
2128 pr_err("%s: missing context_switch attribute flag\n", __func__
);
2129 goto err_delete_thread
;
2132 if (session
->itrace_synth_opts
&& session
->itrace_synth_opts
->set
) {
2133 pt
->synth_opts
= *session
->itrace_synth_opts
;
2135 itrace_synth_opts__set_default(&pt
->synth_opts
);
2136 if (use_browser
!= -1) {
2137 pt
->synth_opts
.branches
= false;
2138 pt
->synth_opts
.callchain
= true;
2142 if (pt
->synth_opts
.log
)
2143 intel_pt_log_enable();
2145 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2146 if (pt
->tc
.time_mult
) {
2147 u64 tsc_freq
= intel_pt_ns_to_ticks(pt
, 1000000000);
2149 pt
->max_non_turbo_ratio
= (tsc_freq
+ 50000000) / 100000000;
2150 intel_pt_log("TSC frequency %"PRIu64
"\n", tsc_freq
);
2151 intel_pt_log("Maximum non-turbo ratio %u\n",
2152 pt
->max_non_turbo_ratio
);
2155 if (pt
->synth_opts
.calls
)
2156 pt
->branches_filter
|= PERF_IP_FLAG_CALL
| PERF_IP_FLAG_ASYNC
|
2157 PERF_IP_FLAG_TRACE_END
;
2158 if (pt
->synth_opts
.returns
)
2159 pt
->branches_filter
|= PERF_IP_FLAG_RETURN
|
2160 PERF_IP_FLAG_TRACE_BEGIN
;
2162 if (pt
->synth_opts
.callchain
&& !symbol_conf
.use_callchain
) {
2163 symbol_conf
.use_callchain
= true;
2164 if (callchain_register_param(&callchain_param
) < 0) {
2165 symbol_conf
.use_callchain
= false;
2166 pt
->synth_opts
.callchain
= false;
2170 err
= intel_pt_synth_events(pt
, session
);
2172 goto err_delete_thread
;
2174 err
= auxtrace_queues__process_index(&pt
->queues
, session
);
2176 goto err_delete_thread
;
2178 if (pt
->queues
.populated
)
2179 pt
->data_queued
= true;
2181 if (pt
->timeless_decoding
)
2182 pr_debug2("Intel PT decoding without timestamps\n");
2187 thread__zput(pt
->unknown_thread
);
2189 intel_pt_log_disable();
2190 auxtrace_queues__free(&pt
->queues
);
2191 session
->auxtrace
= NULL
;