1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
18 static int perf_session__open(struct perf_session
*self
, bool force
)
20 struct stat input_stat
;
22 if (!strcmp(self
->filename
, "-")) {
24 self
->fd
= STDIN_FILENO
;
26 if (perf_session__read_header(self
, self
->fd
) < 0)
27 pr_err("incompatible file format (rerun with -v to learn more)");
32 self
->fd
= open(self
->filename
, O_RDONLY
);
36 pr_err("failed to open %s: %s", self
->filename
, strerror(err
));
37 if (err
== ENOENT
&& !strcmp(self
->filename
, "perf.data"))
38 pr_err(" (try 'perf record' first)");
43 if (fstat(self
->fd
, &input_stat
) < 0)
46 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
47 pr_err("file %s not owned by current user or root\n",
52 if (!input_stat
.st_size
) {
53 pr_info("zero-sized file (%s), nothing to do!\n",
58 if (perf_session__read_header(self
, self
->fd
) < 0) {
59 pr_err("incompatible file format (rerun with -v to learn more)");
63 if (!perf_evlist__valid_sample_type(self
->evlist
)) {
64 pr_err("non matching sample_type");
68 if (!perf_evlist__valid_sample_id_all(self
->evlist
)) {
69 pr_err("non matching sample_id_all");
73 self
->size
= input_stat
.st_size
;
82 void perf_session__update_sample_type(struct perf_session
*self
)
84 self
->sample_type
= perf_evlist__sample_type(self
->evlist
);
85 self
->sample_size
= __perf_evsel__sample_size(self
->sample_type
);
86 self
->sample_id_all
= perf_evlist__sample_id_all(self
->evlist
);
87 self
->id_hdr_size
= perf_evlist__id_hdr_size(self
->evlist
);
88 self
->host_machine
.id_hdr_size
= self
->id_hdr_size
;
91 int perf_session__create_kernel_maps(struct perf_session
*self
)
93 int ret
= machine__create_kernel_maps(&self
->host_machine
);
96 ret
= machines__create_guest_kernel_maps(&self
->machines
);
100 static void perf_session__destroy_kernel_maps(struct perf_session
*self
)
102 machine__destroy_kernel_maps(&self
->host_machine
);
103 machines__destroy_guest_kernel_maps(&self
->machines
);
106 struct perf_session
*perf_session__new(const char *filename
, int mode
,
107 bool force
, bool repipe
,
108 struct perf_tool
*tool
)
110 struct perf_session
*self
;
114 if (!filename
|| !strlen(filename
)) {
115 if (!fstat(STDIN_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
118 filename
= "perf.data";
121 len
= strlen(filename
);
122 self
= zalloc(sizeof(*self
) + len
);
127 memcpy(self
->filename
, filename
, len
);
129 * On 64bit we can mmap the data file in one go. No need for tiny mmap
130 * slices. On 32bit we use 32MB.
132 #if BITS_PER_LONG == 64
133 self
->mmap_window
= ULLONG_MAX
;
135 self
->mmap_window
= 32 * 1024 * 1024ULL;
137 self
->machines
= RB_ROOT
;
138 self
->repipe
= repipe
;
139 INIT_LIST_HEAD(&self
->ordered_samples
.samples
);
140 INIT_LIST_HEAD(&self
->ordered_samples
.sample_cache
);
141 INIT_LIST_HEAD(&self
->ordered_samples
.to_free
);
142 machine__init(&self
->host_machine
, "", HOST_KERNEL_ID
);
143 hists__init(&self
->hists
);
145 if (mode
== O_RDONLY
) {
146 if (perf_session__open(self
, force
) < 0)
148 perf_session__update_sample_type(self
);
149 } else if (mode
== O_WRONLY
) {
151 * In O_RDONLY mode this will be performed when reading the
152 * kernel MMAP event, in perf_event__process_mmap().
154 if (perf_session__create_kernel_maps(self
) < 0)
158 if (tool
&& tool
->ordering_requires_timestamps
&&
159 tool
->ordered_samples
&& !self
->sample_id_all
) {
160 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161 tool
->ordered_samples
= false;
167 perf_session__delete(self
);
171 static void machine__delete_dead_threads(struct machine
*machine
)
173 struct thread
*n
, *t
;
175 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
181 static void perf_session__delete_dead_threads(struct perf_session
*session
)
183 machine__delete_dead_threads(&session
->host_machine
);
186 static void machine__delete_threads(struct machine
*self
)
188 struct rb_node
*nd
= rb_first(&self
->threads
);
191 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
193 rb_erase(&t
->rb_node
, &self
->threads
);
199 static void perf_session__delete_threads(struct perf_session
*session
)
201 machine__delete_threads(&session
->host_machine
);
204 void perf_session__delete(struct perf_session
*self
)
206 perf_session__destroy_kernel_maps(self
);
207 perf_session__delete_dead_threads(self
);
208 perf_session__delete_threads(self
);
209 machine__exit(&self
->host_machine
);
214 void machine__remove_thread(struct machine
*self
, struct thread
*th
)
216 self
->last_match
= NULL
;
217 rb_erase(&th
->rb_node
, &self
->threads
);
219 * We may have references to this thread, for instance in some hist_entry
220 * instances, so just move them to a separate list.
222 list_add_tail(&th
->node
, &self
->dead_threads
);
225 static bool symbol__match_parent_regex(struct symbol
*sym
)
227 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
233 static const u8 cpumodes
[] = {
234 PERF_RECORD_MISC_USER
,
235 PERF_RECORD_MISC_KERNEL
,
236 PERF_RECORD_MISC_GUEST_USER
,
237 PERF_RECORD_MISC_GUEST_KERNEL
239 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
241 static void ip__resolve_ams(struct machine
*self
, struct thread
*thread
,
242 struct addr_map_symbol
*ams
,
245 struct addr_location al
;
249 memset(&al
, 0, sizeof(al
));
251 for (i
= 0; i
< NCPUMODES
; i
++) {
254 * We cannot use the header.misc hint to determine whether a
255 * branch stack address is user, kernel, guest, hypervisor.
256 * Branches may straddle the kernel/user/hypervisor boundaries.
257 * Thus, we have to try consecutively until we find a match
258 * or else, the symbol is unknown
260 thread__find_addr_location(thread
, self
, m
, MAP__FUNCTION
,
267 ams
->al_addr
= al
.addr
;
272 struct branch_info
*machine__resolve_bstack(struct machine
*self
,
274 struct branch_stack
*bs
)
276 struct branch_info
*bi
;
279 bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
283 for (i
= 0; i
< bs
->nr
; i
++) {
284 ip__resolve_ams(self
, thr
, &bi
[i
].to
, bs
->entries
[i
].to
);
285 ip__resolve_ams(self
, thr
, &bi
[i
].from
, bs
->entries
[i
].from
);
286 bi
[i
].flags
= bs
->entries
[i
].flags
;
291 int machine__resolve_callchain(struct machine
*self
,
292 struct perf_evsel
*evsel __used
,
293 struct thread
*thread
,
294 struct ip_callchain
*chain
,
295 struct symbol
**parent
)
297 u8 cpumode
= PERF_RECORD_MISC_USER
;
301 callchain_cursor_reset(&callchain_cursor
);
303 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
304 pr_warning("corrupted callchain. skipping...\n");
308 for (i
= 0; i
< chain
->nr
; i
++) {
310 struct addr_location al
;
312 if (callchain_param
.order
== ORDER_CALLEE
)
315 ip
= chain
->ips
[chain
->nr
- i
- 1];
317 if (ip
>= PERF_CONTEXT_MAX
) {
319 case PERF_CONTEXT_HV
:
320 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
321 case PERF_CONTEXT_KERNEL
:
322 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
323 case PERF_CONTEXT_USER
:
324 cpumode
= PERF_RECORD_MISC_USER
; break;
326 pr_debug("invalid callchain context: "
327 "%"PRId64
"\n", (s64
) ip
);
329 * It seems the callchain is corrupted.
332 callchain_cursor_reset(&callchain_cursor
);
339 thread__find_addr_location(thread
, self
, cpumode
,
340 MAP__FUNCTION
, ip
, &al
, NULL
);
341 if (al
.sym
!= NULL
) {
342 if (sort__has_parent
&& !*parent
&&
343 symbol__match_parent_regex(al
.sym
))
345 if (!symbol_conf
.use_callchain
)
349 err
= callchain_cursor_append(&callchain_cursor
,
358 static int process_event_synth_tracing_data_stub(union perf_event
*event __used
,
359 struct perf_session
*session __used
)
361 dump_printf(": unhandled!\n");
365 static int process_event_synth_attr_stub(union perf_event
*event __used
,
366 struct perf_evlist
**pevlist __used
)
368 dump_printf(": unhandled!\n");
372 static int process_event_sample_stub(struct perf_tool
*tool __used
,
373 union perf_event
*event __used
,
374 struct perf_sample
*sample __used
,
375 struct perf_evsel
*evsel __used
,
376 struct machine
*machine __used
)
378 dump_printf(": unhandled!\n");
382 static int process_event_stub(struct perf_tool
*tool __used
,
383 union perf_event
*event __used
,
384 struct perf_sample
*sample __used
,
385 struct machine
*machine __used
)
387 dump_printf(": unhandled!\n");
391 static int process_finished_round_stub(struct perf_tool
*tool __used
,
392 union perf_event
*event __used
,
393 struct perf_session
*perf_session __used
)
395 dump_printf(": unhandled!\n");
399 static int process_event_type_stub(struct perf_tool
*tool __used
,
400 union perf_event
*event __used
)
402 dump_printf(": unhandled!\n");
406 static int process_finished_round(struct perf_tool
*tool
,
407 union perf_event
*event
,
408 struct perf_session
*session
);
410 static void perf_tool__fill_defaults(struct perf_tool
*tool
)
412 if (tool
->sample
== NULL
)
413 tool
->sample
= process_event_sample_stub
;
414 if (tool
->mmap
== NULL
)
415 tool
->mmap
= process_event_stub
;
416 if (tool
->comm
== NULL
)
417 tool
->comm
= process_event_stub
;
418 if (tool
->fork
== NULL
)
419 tool
->fork
= process_event_stub
;
420 if (tool
->exit
== NULL
)
421 tool
->exit
= process_event_stub
;
422 if (tool
->lost
== NULL
)
423 tool
->lost
= perf_event__process_lost
;
424 if (tool
->read
== NULL
)
425 tool
->read
= process_event_sample_stub
;
426 if (tool
->throttle
== NULL
)
427 tool
->throttle
= process_event_stub
;
428 if (tool
->unthrottle
== NULL
)
429 tool
->unthrottle
= process_event_stub
;
430 if (tool
->attr
== NULL
)
431 tool
->attr
= process_event_synth_attr_stub
;
432 if (tool
->event_type
== NULL
)
433 tool
->event_type
= process_event_type_stub
;
434 if (tool
->tracing_data
== NULL
)
435 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
436 if (tool
->build_id
== NULL
)
437 tool
->build_id
= process_finished_round_stub
;
438 if (tool
->finished_round
== NULL
) {
439 if (tool
->ordered_samples
)
440 tool
->finished_round
= process_finished_round
;
442 tool
->finished_round
= process_finished_round_stub
;
446 void mem_bswap_64(void *src
, int byte_size
)
450 while (byte_size
> 0) {
452 byte_size
-= sizeof(u64
);
457 static void swap_sample_id_all(union perf_event
*event
, void *data
)
459 void *end
= (void *) event
+ event
->header
.size
;
460 int size
= end
- data
;
462 BUG_ON(size
% sizeof(u64
));
463 mem_bswap_64(data
, size
);
466 static void perf_event__all64_swap(union perf_event
*event
,
467 bool sample_id_all __used
)
469 struct perf_event_header
*hdr
= &event
->header
;
470 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
473 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
475 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
476 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
479 void *data
= &event
->comm
.comm
;
481 data
+= ALIGN(strlen(data
) + 1, sizeof(u64
));
482 swap_sample_id_all(event
, data
);
486 static void perf_event__mmap_swap(union perf_event
*event
,
489 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
490 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
491 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
492 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
493 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
496 void *data
= &event
->mmap
.filename
;
498 data
+= ALIGN(strlen(data
) + 1, sizeof(u64
));
499 swap_sample_id_all(event
, data
);
503 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
505 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
506 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
507 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
508 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
509 event
->fork
.time
= bswap_64(event
->fork
.time
);
512 swap_sample_id_all(event
, &event
->fork
+ 1);
515 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
517 event
->read
.pid
= bswap_32(event
->read
.pid
);
518 event
->read
.tid
= bswap_32(event
->read
.tid
);
519 event
->read
.value
= bswap_64(event
->read
.value
);
520 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
521 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
522 event
->read
.id
= bswap_64(event
->read
.id
);
525 swap_sample_id_all(event
, &event
->read
+ 1);
528 static u8
revbyte(u8 b
)
530 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
531 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
532 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
537 * XXX this is hack in attempt to carry flags bitfield
538 * throught endian village. ABI says:
540 * Bit-fields are allocated from right to left (least to most significant)
541 * on little-endian implementations and from left to right (most to least
542 * significant) on big-endian implementations.
544 * The above seems to be byte specific, so we need to reverse each
545 * byte of the bitfield. 'Internet' also says this might be implementation
546 * specific and we probably need proper fix and carry perf_event_attr
547 * bitfield flags in separate data file FEAT_ section. Thought this seems
550 static void swap_bitfield(u8
*p
, unsigned len
)
554 for (i
= 0; i
< len
; i
++) {
560 /* exported for swapping attributes in file header */
561 void perf_event__attr_swap(struct perf_event_attr
*attr
)
563 attr
->type
= bswap_32(attr
->type
);
564 attr
->size
= bswap_32(attr
->size
);
565 attr
->config
= bswap_64(attr
->config
);
566 attr
->sample_period
= bswap_64(attr
->sample_period
);
567 attr
->sample_type
= bswap_64(attr
->sample_type
);
568 attr
->read_format
= bswap_64(attr
->read_format
);
569 attr
->wakeup_events
= bswap_32(attr
->wakeup_events
);
570 attr
->bp_type
= bswap_32(attr
->bp_type
);
571 attr
->bp_addr
= bswap_64(attr
->bp_addr
);
572 attr
->bp_len
= bswap_64(attr
->bp_len
);
574 swap_bitfield((u8
*) (&attr
->read_format
+ 1), sizeof(u64
));
577 static void perf_event__hdr_attr_swap(union perf_event
*event
,
578 bool sample_id_all __used
)
582 perf_event__attr_swap(&event
->attr
.attr
);
584 size
= event
->header
.size
;
585 size
-= (void *)&event
->attr
.id
- (void *)event
;
586 mem_bswap_64(event
->attr
.id
, size
);
589 static void perf_event__event_type_swap(union perf_event
*event
,
590 bool sample_id_all __used
)
592 event
->event_type
.event_type
.event_id
=
593 bswap_64(event
->event_type
.event_type
.event_id
);
596 static void perf_event__tracing_data_swap(union perf_event
*event
,
597 bool sample_id_all __used
)
599 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
602 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
605 static perf_event__swap_op perf_event__swap_ops
[] = {
606 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
607 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
608 [PERF_RECORD_FORK
] = perf_event__task_swap
,
609 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
610 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
611 [PERF_RECORD_READ
] = perf_event__read_swap
,
612 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
613 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
614 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
615 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
616 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
617 [PERF_RECORD_HEADER_MAX
] = NULL
,
620 struct sample_queue
{
623 union perf_event
*event
;
624 struct list_head list
;
627 static void perf_session_free_sample_buffers(struct perf_session
*session
)
629 struct ordered_samples
*os
= &session
->ordered_samples
;
631 while (!list_empty(&os
->to_free
)) {
632 struct sample_queue
*sq
;
634 sq
= list_entry(os
->to_free
.next
, struct sample_queue
, list
);
640 static int perf_session_deliver_event(struct perf_session
*session
,
641 union perf_event
*event
,
642 struct perf_sample
*sample
,
643 struct perf_tool
*tool
,
646 static void flush_sample_queue(struct perf_session
*s
,
647 struct perf_tool
*tool
)
649 struct ordered_samples
*os
= &s
->ordered_samples
;
650 struct list_head
*head
= &os
->samples
;
651 struct sample_queue
*tmp
, *iter
;
652 struct perf_sample sample
;
653 u64 limit
= os
->next_flush
;
654 u64 last_ts
= os
->last_sample
? os
->last_sample
->timestamp
: 0ULL;
655 unsigned idx
= 0, progress_next
= os
->nr_samples
/ 16;
658 if (!tool
->ordered_samples
|| !limit
)
661 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
662 if (iter
->timestamp
> limit
)
665 ret
= perf_session__parse_sample(s
, iter
->event
, &sample
);
667 pr_err("Can't parse sample, err = %d\n", ret
);
669 perf_session_deliver_event(s
, iter
->event
, &sample
, tool
,
672 os
->last_flush
= iter
->timestamp
;
673 list_del(&iter
->list
);
674 list_add(&iter
->list
, &os
->sample_cache
);
675 if (++idx
>= progress_next
) {
676 progress_next
+= os
->nr_samples
/ 16;
677 ui_progress__update(idx
, os
->nr_samples
,
678 "Processing time ordered events...");
682 if (list_empty(head
)) {
683 os
->last_sample
= NULL
;
684 } else if (last_ts
<= limit
) {
686 list_entry(head
->prev
, struct sample_queue
, list
);
693 * When perf record finishes a pass on every buffers, it records this pseudo
695 * We record the max timestamp t found in the pass n.
696 * Assuming these timestamps are monotonic across cpus, we know that if
697 * a buffer still has events with timestamps below t, they will be all
698 * available and then read in the pass n + 1.
699 * Hence when we start to read the pass n + 2, we can safely flush every
700 * events with timestamps below t.
702 * ============ PASS n =================
705 * cnt1 timestamps | cnt2 timestamps
708 * - | 4 <--- max recorded
710 * ============ PASS n + 1 ==============
713 * cnt1 timestamps | cnt2 timestamps
716 * 5 | 7 <---- max recorded
718 * Flush every events below timestamp 4
720 * ============ PASS n + 2 ==============
723 * cnt1 timestamps | cnt2 timestamps
728 * Flush every events below timestamp 7
731 static int process_finished_round(struct perf_tool
*tool
,
732 union perf_event
*event __used
,
733 struct perf_session
*session
)
735 flush_sample_queue(session
, tool
);
736 session
->ordered_samples
.next_flush
= session
->ordered_samples
.max_timestamp
;
741 /* The queue is ordered by time */
742 static void __queue_event(struct sample_queue
*new, struct perf_session
*s
)
744 struct ordered_samples
*os
= &s
->ordered_samples
;
745 struct sample_queue
*sample
= os
->last_sample
;
746 u64 timestamp
= new->timestamp
;
750 os
->last_sample
= new;
753 list_add(&new->list
, &os
->samples
);
754 os
->max_timestamp
= timestamp
;
759 * last_sample might point to some random place in the list as it's
760 * the last queued event. We expect that the new event is close to
763 if (sample
->timestamp
<= timestamp
) {
764 while (sample
->timestamp
<= timestamp
) {
765 p
= sample
->list
.next
;
766 if (p
== &os
->samples
) {
767 list_add_tail(&new->list
, &os
->samples
);
768 os
->max_timestamp
= timestamp
;
771 sample
= list_entry(p
, struct sample_queue
, list
);
773 list_add_tail(&new->list
, &sample
->list
);
775 while (sample
->timestamp
> timestamp
) {
776 p
= sample
->list
.prev
;
777 if (p
== &os
->samples
) {
778 list_add(&new->list
, &os
->samples
);
781 sample
= list_entry(p
, struct sample_queue
, list
);
783 list_add(&new->list
, &sample
->list
);
787 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
789 static int perf_session_queue_event(struct perf_session
*s
, union perf_event
*event
,
790 struct perf_sample
*sample
, u64 file_offset
)
792 struct ordered_samples
*os
= &s
->ordered_samples
;
793 struct list_head
*sc
= &os
->sample_cache
;
794 u64 timestamp
= sample
->time
;
795 struct sample_queue
*new;
797 if (!timestamp
|| timestamp
== ~0ULL)
800 if (timestamp
< s
->ordered_samples
.last_flush
) {
801 printf("Warning: Timestamp below last timeslice flush\n");
805 if (!list_empty(sc
)) {
806 new = list_entry(sc
->next
, struct sample_queue
, list
);
807 list_del(&new->list
);
808 } else if (os
->sample_buffer
) {
809 new = os
->sample_buffer
+ os
->sample_buffer_idx
;
810 if (++os
->sample_buffer_idx
== MAX_SAMPLE_BUFFER
)
811 os
->sample_buffer
= NULL
;
813 os
->sample_buffer
= malloc(MAX_SAMPLE_BUFFER
* sizeof(*new));
814 if (!os
->sample_buffer
)
816 list_add(&os
->sample_buffer
->list
, &os
->to_free
);
817 os
->sample_buffer_idx
= 2;
818 new = os
->sample_buffer
+ 1;
821 new->timestamp
= timestamp
;
822 new->file_offset
= file_offset
;
825 __queue_event(new, s
);
830 static void callchain__printf(struct perf_sample
*sample
)
834 printf("... chain: nr:%" PRIu64
"\n", sample
->callchain
->nr
);
836 for (i
= 0; i
< sample
->callchain
->nr
; i
++)
837 printf("..... %2d: %016" PRIx64
"\n",
838 i
, sample
->callchain
->ips
[i
]);
841 static void branch_stack__printf(struct perf_sample
*sample
)
845 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
847 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++)
848 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
"\n",
849 i
, sample
->branch_stack
->entries
[i
].from
,
850 sample
->branch_stack
->entries
[i
].to
);
853 static void perf_session__print_tstamp(struct perf_session
*session
,
854 union perf_event
*event
,
855 struct perf_sample
*sample
)
857 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
858 !session
->sample_id_all
) {
859 fputs("-1 -1 ", stdout
);
863 if ((session
->sample_type
& PERF_SAMPLE_CPU
))
864 printf("%u ", sample
->cpu
);
866 if (session
->sample_type
& PERF_SAMPLE_TIME
)
867 printf("%" PRIu64
" ", sample
->time
);
870 static void dump_event(struct perf_session
*session
, union perf_event
*event
,
871 u64 file_offset
, struct perf_sample
*sample
)
876 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
877 file_offset
, event
->header
.size
, event
->header
.type
);
882 perf_session__print_tstamp(session
, event
, sample
);
884 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
885 event
->header
.size
, perf_event__name(event
->header
.type
));
888 static void dump_sample(struct perf_session
*session
, union perf_event
*event
,
889 struct perf_sample
*sample
)
894 printf("(IP, %d): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
895 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
896 sample
->period
, sample
->addr
);
898 if (session
->sample_type
& PERF_SAMPLE_CALLCHAIN
)
899 callchain__printf(sample
);
901 if (session
->sample_type
& PERF_SAMPLE_BRANCH_STACK
)
902 branch_stack__printf(sample
);
905 static struct machine
*
906 perf_session__find_machine_for_cpumode(struct perf_session
*session
,
907 union perf_event
*event
)
909 const u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
911 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
&& perf_guest
) {
914 if (event
->header
.type
== PERF_RECORD_MMAP
)
915 pid
= event
->mmap
.pid
;
919 return perf_session__find_machine(session
, pid
);
922 return perf_session__find_host_machine(session
);
925 static int perf_session_deliver_event(struct perf_session
*session
,
926 union perf_event
*event
,
927 struct perf_sample
*sample
,
928 struct perf_tool
*tool
,
931 struct perf_evsel
*evsel
;
932 struct machine
*machine
;
934 dump_event(session
, event
, file_offset
, sample
);
936 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
->id
);
937 if (evsel
!= NULL
&& event
->header
.type
!= PERF_RECORD_SAMPLE
) {
939 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
940 * because the tools right now may apply filters, discarding
941 * some of the samples. For consistency, in the future we
942 * should have something like nr_filtered_samples and remove
943 * the sample->period from total_sample_period, etc, KISS for
946 * Also testing against NULL allows us to handle files without
947 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
948 * future probably it'll be a good idea to restrict event
949 * processing via perf_session to files with both set.
951 hists__inc_nr_events(&evsel
->hists
, event
->header
.type
);
954 machine
= perf_session__find_machine_for_cpumode(session
, event
);
956 switch (event
->header
.type
) {
957 case PERF_RECORD_SAMPLE
:
958 dump_sample(session
, event
, sample
);
960 ++session
->hists
.stats
.nr_unknown_id
;
963 if (machine
== NULL
) {
964 ++session
->hists
.stats
.nr_unprocessable_samples
;
967 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
968 case PERF_RECORD_MMAP
:
969 return tool
->mmap(tool
, event
, sample
, machine
);
970 case PERF_RECORD_COMM
:
971 return tool
->comm(tool
, event
, sample
, machine
);
972 case PERF_RECORD_FORK
:
973 return tool
->fork(tool
, event
, sample
, machine
);
974 case PERF_RECORD_EXIT
:
975 return tool
->exit(tool
, event
, sample
, machine
);
976 case PERF_RECORD_LOST
:
977 if (tool
->lost
== perf_event__process_lost
)
978 session
->hists
.stats
.total_lost
+= event
->lost
.lost
;
979 return tool
->lost(tool
, event
, sample
, machine
);
980 case PERF_RECORD_READ
:
981 return tool
->read(tool
, event
, sample
, evsel
, machine
);
982 case PERF_RECORD_THROTTLE
:
983 return tool
->throttle(tool
, event
, sample
, machine
);
984 case PERF_RECORD_UNTHROTTLE
:
985 return tool
->unthrottle(tool
, event
, sample
, machine
);
987 ++session
->hists
.stats
.nr_unknown_events
;
992 static int perf_session__preprocess_sample(struct perf_session
*session
,
993 union perf_event
*event
, struct perf_sample
*sample
)
995 if (event
->header
.type
!= PERF_RECORD_SAMPLE
||
996 !(session
->sample_type
& PERF_SAMPLE_CALLCHAIN
))
999 if (!ip_callchain__valid(sample
->callchain
, event
)) {
1000 pr_debug("call-chain problem with event, skipping it.\n");
1001 ++session
->hists
.stats
.nr_invalid_chains
;
1002 session
->hists
.stats
.total_invalid_chains
+= sample
->period
;
1008 static int perf_session__process_user_event(struct perf_session
*session
, union perf_event
*event
,
1009 struct perf_tool
*tool
, u64 file_offset
)
1013 dump_event(session
, event
, file_offset
, NULL
);
1015 /* These events are processed right away */
1016 switch (event
->header
.type
) {
1017 case PERF_RECORD_HEADER_ATTR
:
1018 err
= tool
->attr(event
, &session
->evlist
);
1020 perf_session__update_sample_type(session
);
1022 case PERF_RECORD_HEADER_EVENT_TYPE
:
1023 return tool
->event_type(tool
, event
);
1024 case PERF_RECORD_HEADER_TRACING_DATA
:
1025 /* setup for reading amidst mmap */
1026 lseek(session
->fd
, file_offset
, SEEK_SET
);
1027 return tool
->tracing_data(event
, session
);
1028 case PERF_RECORD_HEADER_BUILD_ID
:
1029 return tool
->build_id(tool
, event
, session
);
1030 case PERF_RECORD_FINISHED_ROUND
:
1031 return tool
->finished_round(tool
, event
, session
);
1037 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1039 perf_event__swap_op swap
;
1041 swap
= perf_event__swap_ops
[event
->header
.type
];
1043 swap(event
, sample_id_all
);
1046 static int perf_session__process_event(struct perf_session
*session
,
1047 union perf_event
*event
,
1048 struct perf_tool
*tool
,
1051 struct perf_sample sample
;
1054 if (session
->header
.needs_swap
)
1055 event_swap(event
, session
->sample_id_all
);
1057 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1060 hists__inc_nr_events(&session
->hists
, event
->header
.type
);
1062 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1063 return perf_session__process_user_event(session
, event
, tool
, file_offset
);
1066 * For all kernel events we get the sample data
1068 ret
= perf_session__parse_sample(session
, event
, &sample
);
1072 /* Preprocess sample records - precheck callchains */
1073 if (perf_session__preprocess_sample(session
, event
, &sample
))
1076 if (tool
->ordered_samples
) {
1077 ret
= perf_session_queue_event(session
, event
, &sample
,
1083 return perf_session_deliver_event(session
, event
, &sample
, tool
,
1087 void perf_event_header__bswap(struct perf_event_header
*self
)
1089 self
->type
= bswap_32(self
->type
);
1090 self
->misc
= bswap_16(self
->misc
);
1091 self
->size
= bswap_16(self
->size
);
1094 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1096 return machine__findnew_thread(&session
->host_machine
, pid
);
1099 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
1101 struct thread
*thread
= perf_session__findnew(self
, 0);
1103 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
1104 pr_err("problem inserting idle task.\n");
1111 static void perf_session__warn_about_errors(const struct perf_session
*session
,
1112 const struct perf_tool
*tool
)
1114 if (tool
->lost
== perf_event__process_lost
&&
1115 session
->hists
.stats
.nr_events
[PERF_RECORD_LOST
] != 0) {
1116 ui__warning("Processed %d events and lost %d chunks!\n\n"
1117 "Check IO/CPU overload!\n\n",
1118 session
->hists
.stats
.nr_events
[0],
1119 session
->hists
.stats
.nr_events
[PERF_RECORD_LOST
]);
1122 if (session
->hists
.stats
.nr_unknown_events
!= 0) {
1123 ui__warning("Found %u unknown events!\n\n"
1124 "Is this an older tool processing a perf.data "
1125 "file generated by a more recent tool?\n\n"
1126 "If that is not the case, consider "
1127 "reporting to linux-kernel@vger.kernel.org.\n\n",
1128 session
->hists
.stats
.nr_unknown_events
);
1131 if (session
->hists
.stats
.nr_unknown_id
!= 0) {
1132 ui__warning("%u samples with id not present in the header\n",
1133 session
->hists
.stats
.nr_unknown_id
);
1136 if (session
->hists
.stats
.nr_invalid_chains
!= 0) {
1137 ui__warning("Found invalid callchains!\n\n"
1138 "%u out of %u events were discarded for this reason.\n\n"
1139 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1140 session
->hists
.stats
.nr_invalid_chains
,
1141 session
->hists
.stats
.nr_events
[PERF_RECORD_SAMPLE
]);
1144 if (session
->hists
.stats
.nr_unprocessable_samples
!= 0) {
1145 ui__warning("%u unprocessable samples recorded.\n"
1146 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1147 session
->hists
.stats
.nr_unprocessable_samples
);
1151 #define session_done() (*(volatile int *)(&session_done))
1152 volatile int session_done
;
1154 static int __perf_session__process_pipe_events(struct perf_session
*self
,
1155 struct perf_tool
*tool
)
1157 union perf_event
*event
;
1158 uint32_t size
, cur_size
= 0;
1165 perf_tool__fill_defaults(tool
);
1168 cur_size
= sizeof(union perf_event
);
1170 buf
= malloc(cur_size
);
1175 err
= readn(self
->fd
, event
, sizeof(struct perf_event_header
));
1180 pr_err("failed to read event header\n");
1184 if (self
->header
.needs_swap
)
1185 perf_event_header__bswap(&event
->header
);
1187 size
= event
->header
.size
;
1191 if (size
> cur_size
) {
1192 void *new = realloc(buf
, size
);
1194 pr_err("failed to allocate memory to read event\n");
1202 p
+= sizeof(struct perf_event_header
);
1204 if (size
- sizeof(struct perf_event_header
)) {
1205 err
= readn(self
->fd
, p
, size
- sizeof(struct perf_event_header
));
1208 pr_err("unexpected end of event stream\n");
1212 pr_err("failed to read event data\n");
1217 if ((skip
= perf_session__process_event(self
, event
, tool
, head
)) < 0) {
1218 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1219 head
, event
->header
.size
, event
->header
.type
);
1229 if (!session_done())
1235 perf_session__warn_about_errors(self
, tool
);
1236 perf_session_free_sample_buffers(self
);
1240 static union perf_event
*
1241 fetch_mmaped_event(struct perf_session
*session
,
1242 u64 head
, size_t mmap_size
, char *buf
)
1244 union perf_event
*event
;
1247 * Ensure we have enough space remaining to read
1248 * the size of the event in the headers.
1250 if (head
+ sizeof(event
->header
) > mmap_size
)
1253 event
= (union perf_event
*)(buf
+ head
);
1255 if (session
->header
.needs_swap
)
1256 perf_event_header__bswap(&event
->header
);
1258 if (head
+ event
->header
.size
> mmap_size
)
1264 int __perf_session__process_events(struct perf_session
*session
,
1265 u64 data_offset
, u64 data_size
,
1266 u64 file_size
, struct perf_tool
*tool
)
1268 u64 head
, page_offset
, file_offset
, file_pos
, progress_next
;
1269 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1270 size_t page_size
, mmap_size
;
1271 char *buf
, *mmaps
[8];
1272 union perf_event
*event
;
1275 perf_tool__fill_defaults(tool
);
1277 page_size
= sysconf(_SC_PAGESIZE
);
1279 page_offset
= page_size
* (data_offset
/ page_size
);
1280 file_offset
= page_offset
;
1281 head
= data_offset
- page_offset
;
1283 if (data_offset
+ data_size
< file_size
)
1284 file_size
= data_offset
+ data_size
;
1286 progress_next
= file_size
/ 16;
1288 mmap_size
= session
->mmap_window
;
1289 if (mmap_size
> file_size
)
1290 mmap_size
= file_size
;
1292 memset(mmaps
, 0, sizeof(mmaps
));
1294 mmap_prot
= PROT_READ
;
1295 mmap_flags
= MAP_SHARED
;
1297 if (session
->header
.needs_swap
) {
1298 mmap_prot
|= PROT_WRITE
;
1299 mmap_flags
= MAP_PRIVATE
;
1302 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, session
->fd
,
1304 if (buf
== MAP_FAILED
) {
1305 pr_err("failed to mmap file\n");
1309 mmaps
[map_idx
] = buf
;
1310 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1311 file_pos
= file_offset
+ head
;
1314 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1316 if (mmaps
[map_idx
]) {
1317 munmap(mmaps
[map_idx
], mmap_size
);
1318 mmaps
[map_idx
] = NULL
;
1321 page_offset
= page_size
* (head
/ page_size
);
1322 file_offset
+= page_offset
;
1323 head
-= page_offset
;
1327 size
= event
->header
.size
;
1330 perf_session__process_event(session
, event
, tool
, file_pos
) < 0) {
1331 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1332 file_offset
+ head
, event
->header
.size
,
1333 event
->header
.type
);
1341 if (file_pos
>= progress_next
) {
1342 progress_next
+= file_size
/ 16;
1343 ui_progress__update(file_pos
, file_size
,
1344 "Processing events...");
1347 if (file_pos
< file_size
)
1351 /* do the final flush for ordered samples */
1352 session
->ordered_samples
.next_flush
= ULLONG_MAX
;
1353 flush_sample_queue(session
, tool
);
1355 perf_session__warn_about_errors(session
, tool
);
1356 perf_session_free_sample_buffers(session
);
1360 int perf_session__process_events(struct perf_session
*self
,
1361 struct perf_tool
*tool
)
1365 if (perf_session__register_idle_thread(self
) == NULL
)
1369 err
= __perf_session__process_events(self
,
1370 self
->header
.data_offset
,
1371 self
->header
.data_size
,
1374 err
= __perf_session__process_pipe_events(self
, tool
);
1379 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
1381 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
1382 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1389 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1390 const char *symbol_name
, u64 addr
)
1394 struct ref_reloc_sym
*ref
;
1396 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1400 ref
->name
= strdup(symbol_name
);
1401 if (ref
->name
== NULL
) {
1406 bracket
= strchr(ref
->name
, ']');
1412 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1413 struct kmap
*kmap
= map__kmap(maps
[i
]);
1414 kmap
->ref_reloc_sym
= ref
;
1420 size_t perf_session__fprintf_dsos(struct perf_session
*self
, FILE *fp
)
1422 return __dsos__fprintf(&self
->host_machine
.kernel_dsos
, fp
) +
1423 __dsos__fprintf(&self
->host_machine
.user_dsos
, fp
) +
1424 machines__fprintf_dsos(&self
->machines
, fp
);
1427 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*self
, FILE *fp
,
1430 size_t ret
= machine__fprintf_dsos_buildid(&self
->host_machine
, fp
, with_hits
);
1431 return ret
+ machines__fprintf_dsos_buildid(&self
->machines
, fp
, with_hits
);
1434 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1436 struct perf_evsel
*pos
;
1437 size_t ret
= fprintf(fp
, "Aggregated stats:\n");
1439 ret
+= hists__fprintf_nr_events(&session
->hists
, fp
);
1441 list_for_each_entry(pos
, &session
->evlist
->entries
, node
) {
1442 ret
+= fprintf(fp
, "%s stats:\n", event_name(pos
));
1443 ret
+= hists__fprintf_nr_events(&pos
->hists
, fp
);
1449 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1452 * FIXME: Here we have to actually print all the machines in this
1453 * session, not just the host...
1455 return machine__fprintf(&session
->host_machine
, fp
);
1458 void perf_session__remove_thread(struct perf_session
*session
,
1462 * FIXME: This one makes no sense, we need to remove the thread from
1463 * the machine it belongs to, perf_session can have many machines, so
1464 * doing it always on ->host_machine is wrong. Fix when auditing all
1465 * the 'perf kvm' code.
1467 machine__remove_thread(&session
->host_machine
, th
);
1470 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1473 struct perf_evsel
*pos
;
1475 list_for_each_entry(pos
, &session
->evlist
->entries
, node
) {
1476 if (pos
->attr
.type
== type
)
1482 void perf_event__print_ip(union perf_event
*event
, struct perf_sample
*sample
,
1483 struct machine
*machine
, struct perf_evsel
*evsel
,
1484 int print_sym
, int print_dso
, int print_symoffset
)
1486 struct addr_location al
;
1487 struct callchain_cursor_node
*node
;
1489 if (perf_event__preprocess_sample(event
, machine
, &al
, sample
,
1491 error("problem processing %d event, skipping it.\n",
1492 event
->header
.type
);
1496 if (symbol_conf
.use_callchain
&& sample
->callchain
) {
1498 if (machine__resolve_callchain(machine
, evsel
, al
.thread
,
1499 sample
->callchain
, NULL
) != 0) {
1501 error("Failed to resolve callchain. Skipping\n");
1504 callchain_cursor_commit(&callchain_cursor
);
1507 node
= callchain_cursor_current(&callchain_cursor
);
1511 printf("\t%16" PRIx64
, node
->ip
);
1514 symbol__fprintf_symname(node
->sym
, stdout
);
1518 map__fprintf_dsoname(node
->map
, stdout
);
1523 callchain_cursor_advance(&callchain_cursor
);
1527 printf("%16" PRIx64
, sample
->ip
);
1530 if (print_symoffset
)
1531 symbol__fprintf_symname_offs(al
.sym
, &al
,
1534 symbol__fprintf_symname(al
.sym
, stdout
);
1539 map__fprintf_dsoname(al
.map
, stdout
);
1545 int perf_session__cpu_bitmap(struct perf_session
*session
,
1546 const char *cpu_list
, unsigned long *cpu_bitmap
)
1549 struct cpu_map
*map
;
1551 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1552 struct perf_evsel
*evsel
;
1554 evsel
= perf_session__find_first_evtype(session
, i
);
1558 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1559 pr_err("File does not contain CPU events. "
1560 "Remove -c option to proceed.\n");
1565 map
= cpu_map__new(cpu_list
);
1567 pr_err("Invalid cpu_list\n");
1571 for (i
= 0; i
< map
->nr
; i
++) {
1572 int cpu
= map
->map
[i
];
1574 if (cpu
>= MAX_NR_CPUS
) {
1575 pr_err("Requested CPU %d too large. "
1576 "Consider raising MAX_NR_CPUS\n", cpu
);
1580 set_bit(cpu
, cpu_bitmap
);
1586 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
1592 if (session
== NULL
|| fp
== NULL
)
1595 ret
= fstat(session
->fd
, &st
);
1599 fprintf(fp
, "# ========\n");
1600 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
1601 perf_header__fprintf_info(session
, fp
, full
);
1602 fprintf(fp
, "# ========\n#\n");