1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
13 static int perf_session__open(struct perf_session
*self
, bool force
)
15 struct stat input_stat
;
17 if (!strcmp(self
->filename
, "-")) {
19 self
->fd
= STDIN_FILENO
;
21 if (perf_header__read(self
, self
->fd
) < 0)
22 pr_err("incompatible file format");
27 self
->fd
= open(self
->filename
, O_RDONLY
);
29 pr_err("failed to open file: %s", self
->filename
);
30 if (!strcmp(self
->filename
, "perf.data"))
31 pr_err(" (try 'perf record' first)");
36 if (fstat(self
->fd
, &input_stat
) < 0)
39 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
40 pr_err("file %s not owned by current user or root\n",
45 if (!input_stat
.st_size
) {
46 pr_info("zero-sized file (%s), nothing to do!\n",
51 if (perf_header__read(self
, self
->fd
) < 0) {
52 pr_err("incompatible file format");
56 self
->size
= input_stat
.st_size
;
65 void perf_session__update_sample_type(struct perf_session
*self
)
67 self
->sample_type
= perf_header__sample_type(&self
->header
);
70 struct perf_session
*perf_session__new(const char *filename
, int mode
, bool force
)
72 size_t len
= filename
? strlen(filename
) + 1 : 0;
73 struct perf_session
*self
= zalloc(sizeof(*self
) + len
);
78 if (perf_header__init(&self
->header
) < 0)
81 memcpy(self
->filename
, filename
, len
);
82 self
->threads
= RB_ROOT
;
83 self
->stats_by_id
= RB_ROOT
;
84 self
->last_match
= NULL
;
85 self
->mmap_window
= 32;
88 self
->unknown_events
= 0;
89 map_groups__init(&self
->kmaps
);
91 if (mode
== O_RDONLY
) {
92 if (perf_session__open(self
, force
) < 0)
94 } else if (mode
== O_WRONLY
) {
96 * In O_RDONLY mode this will be performed when reading the
97 * kernel MMAP event, in event__process_mmap().
99 if (perf_session__create_kernel_maps(self
) < 0)
103 perf_session__update_sample_type(self
);
110 perf_session__delete(self
);
114 void perf_session__delete(struct perf_session
*self
)
116 perf_header__exit(&self
->header
);
122 static bool symbol__match_parent_regex(struct symbol
*sym
)
124 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
130 struct map_symbol
*perf_session__resolve_callchain(struct perf_session
*self
,
131 struct thread
*thread
,
132 struct ip_callchain
*chain
,
133 struct symbol
**parent
)
135 u8 cpumode
= PERF_RECORD_MISC_USER
;
137 struct map_symbol
*syms
= calloc(chain
->nr
, sizeof(*syms
));
142 for (i
= 0; i
< chain
->nr
; i
++) {
143 u64 ip
= chain
->ips
[i
];
144 struct addr_location al
;
146 if (ip
>= PERF_CONTEXT_MAX
) {
148 case PERF_CONTEXT_HV
:
149 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
150 case PERF_CONTEXT_KERNEL
:
151 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
152 case PERF_CONTEXT_USER
:
153 cpumode
= PERF_RECORD_MISC_USER
; break;
160 thread__find_addr_location(thread
, self
, cpumode
,
161 MAP__FUNCTION
, ip
, &al
, NULL
);
162 if (al
.sym
!= NULL
) {
163 if (sort__has_parent
&& !*parent
&&
164 symbol__match_parent_regex(al
.sym
))
166 if (!symbol_conf
.use_callchain
)
168 syms
[i
].map
= al
.map
;
169 syms
[i
].sym
= al
.sym
;
176 static int process_event_stub(event_t
*event __used
,
177 struct perf_session
*session __used
)
179 dump_printf(": unhandled!\n");
183 static void perf_event_ops__fill_defaults(struct perf_event_ops
*handler
)
185 if (handler
->sample
== NULL
)
186 handler
->sample
= process_event_stub
;
187 if (handler
->mmap
== NULL
)
188 handler
->mmap
= process_event_stub
;
189 if (handler
->comm
== NULL
)
190 handler
->comm
= process_event_stub
;
191 if (handler
->fork
== NULL
)
192 handler
->fork
= process_event_stub
;
193 if (handler
->exit
== NULL
)
194 handler
->exit
= process_event_stub
;
195 if (handler
->lost
== NULL
)
196 handler
->lost
= process_event_stub
;
197 if (handler
->read
== NULL
)
198 handler
->read
= process_event_stub
;
199 if (handler
->throttle
== NULL
)
200 handler
->throttle
= process_event_stub
;
201 if (handler
->unthrottle
== NULL
)
202 handler
->unthrottle
= process_event_stub
;
203 if (handler
->attr
== NULL
)
204 handler
->attr
= process_event_stub
;
205 if (handler
->event_type
== NULL
)
206 handler
->event_type
= process_event_stub
;
207 if (handler
->tracing_data
== NULL
)
208 handler
->tracing_data
= process_event_stub
;
209 if (handler
->build_id
== NULL
)
210 handler
->build_id
= process_event_stub
;
213 static const char *event__name
[] = {
215 [PERF_RECORD_MMAP
] = "MMAP",
216 [PERF_RECORD_LOST
] = "LOST",
217 [PERF_RECORD_COMM
] = "COMM",
218 [PERF_RECORD_EXIT
] = "EXIT",
219 [PERF_RECORD_THROTTLE
] = "THROTTLE",
220 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
221 [PERF_RECORD_FORK
] = "FORK",
222 [PERF_RECORD_READ
] = "READ",
223 [PERF_RECORD_SAMPLE
] = "SAMPLE",
224 [PERF_RECORD_HEADER_ATTR
] = "ATTR",
225 [PERF_RECORD_HEADER_EVENT_TYPE
] = "EVENT_TYPE",
226 [PERF_RECORD_HEADER_TRACING_DATA
] = "TRACING_DATA",
227 [PERF_RECORD_HEADER_BUILD_ID
] = "BUILD_ID",
230 unsigned long event__total
[PERF_RECORD_HEADER_MAX
];
232 void event__print_totals(void)
235 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
238 pr_info("%10s events: %10ld\n",
239 event__name
[i
], event__total
[i
]);
243 void mem_bswap_64(void *src
, int byte_size
)
247 while (byte_size
> 0) {
249 byte_size
-= sizeof(u64
);
254 static void event__all64_swap(event_t
*self
)
256 struct perf_event_header
*hdr
= &self
->header
;
257 mem_bswap_64(hdr
+ 1, self
->header
.size
- sizeof(*hdr
));
260 static void event__comm_swap(event_t
*self
)
262 self
->comm
.pid
= bswap_32(self
->comm
.pid
);
263 self
->comm
.tid
= bswap_32(self
->comm
.tid
);
266 static void event__mmap_swap(event_t
*self
)
268 self
->mmap
.pid
= bswap_32(self
->mmap
.pid
);
269 self
->mmap
.tid
= bswap_32(self
->mmap
.tid
);
270 self
->mmap
.start
= bswap_64(self
->mmap
.start
);
271 self
->mmap
.len
= bswap_64(self
->mmap
.len
);
272 self
->mmap
.pgoff
= bswap_64(self
->mmap
.pgoff
);
275 static void event__task_swap(event_t
*self
)
277 self
->fork
.pid
= bswap_32(self
->fork
.pid
);
278 self
->fork
.tid
= bswap_32(self
->fork
.tid
);
279 self
->fork
.ppid
= bswap_32(self
->fork
.ppid
);
280 self
->fork
.ptid
= bswap_32(self
->fork
.ptid
);
281 self
->fork
.time
= bswap_64(self
->fork
.time
);
284 static void event__read_swap(event_t
*self
)
286 self
->read
.pid
= bswap_32(self
->read
.pid
);
287 self
->read
.tid
= bswap_32(self
->read
.tid
);
288 self
->read
.value
= bswap_64(self
->read
.value
);
289 self
->read
.time_enabled
= bswap_64(self
->read
.time_enabled
);
290 self
->read
.time_running
= bswap_64(self
->read
.time_running
);
291 self
->read
.id
= bswap_64(self
->read
.id
);
294 static void event__attr_swap(event_t
*self
)
298 self
->attr
.attr
.type
= bswap_32(self
->attr
.attr
.type
);
299 self
->attr
.attr
.size
= bswap_32(self
->attr
.attr
.size
);
300 self
->attr
.attr
.config
= bswap_64(self
->attr
.attr
.config
);
301 self
->attr
.attr
.sample_period
= bswap_64(self
->attr
.attr
.sample_period
);
302 self
->attr
.attr
.sample_type
= bswap_64(self
->attr
.attr
.sample_type
);
303 self
->attr
.attr
.read_format
= bswap_64(self
->attr
.attr
.read_format
);
304 self
->attr
.attr
.wakeup_events
= bswap_32(self
->attr
.attr
.wakeup_events
);
305 self
->attr
.attr
.bp_type
= bswap_32(self
->attr
.attr
.bp_type
);
306 self
->attr
.attr
.bp_addr
= bswap_64(self
->attr
.attr
.bp_addr
);
307 self
->attr
.attr
.bp_len
= bswap_64(self
->attr
.attr
.bp_len
);
309 size
= self
->header
.size
;
310 size
-= (void *)&self
->attr
.id
- (void *)self
;
311 mem_bswap_64(self
->attr
.id
, size
);
314 static void event__event_type_swap(event_t
*self
)
316 self
->event_type
.event_type
.event_id
=
317 bswap_64(self
->event_type
.event_type
.event_id
);
320 static void event__tracing_data_swap(event_t
*self
)
322 self
->tracing_data
.size
= bswap_32(self
->tracing_data
.size
);
325 typedef void (*event__swap_op
)(event_t
*self
);
327 static event__swap_op event__swap_ops
[] = {
328 [PERF_RECORD_MMAP
] = event__mmap_swap
,
329 [PERF_RECORD_COMM
] = event__comm_swap
,
330 [PERF_RECORD_FORK
] = event__task_swap
,
331 [PERF_RECORD_EXIT
] = event__task_swap
,
332 [PERF_RECORD_LOST
] = event__all64_swap
,
333 [PERF_RECORD_READ
] = event__read_swap
,
334 [PERF_RECORD_SAMPLE
] = event__all64_swap
,
335 [PERF_RECORD_HEADER_ATTR
] = event__attr_swap
,
336 [PERF_RECORD_HEADER_EVENT_TYPE
] = event__event_type_swap
,
337 [PERF_RECORD_HEADER_TRACING_DATA
] = event__tracing_data_swap
,
338 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
339 [PERF_RECORD_HEADER_MAX
] = NULL
,
342 static int perf_session__process_event(struct perf_session
*self
,
344 struct perf_event_ops
*ops
,
345 u64 offset
, u64 head
)
349 if (event
->header
.type
< PERF_RECORD_HEADER_MAX
) {
350 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
351 offset
+ head
, event
->header
.size
,
352 event__name
[event
->header
.type
]);
354 ++event__total
[event
->header
.type
];
357 if (self
->header
.needs_swap
&& event__swap_ops
[event
->header
.type
])
358 event__swap_ops
[event
->header
.type
](event
);
360 switch (event
->header
.type
) {
361 case PERF_RECORD_SAMPLE
:
362 return ops
->sample(event
, self
);
363 case PERF_RECORD_MMAP
:
364 return ops
->mmap(event
, self
);
365 case PERF_RECORD_COMM
:
366 return ops
->comm(event
, self
);
367 case PERF_RECORD_FORK
:
368 return ops
->fork(event
, self
);
369 case PERF_RECORD_EXIT
:
370 return ops
->exit(event
, self
);
371 case PERF_RECORD_LOST
:
372 return ops
->lost(event
, self
);
373 case PERF_RECORD_READ
:
374 return ops
->read(event
, self
);
375 case PERF_RECORD_THROTTLE
:
376 return ops
->throttle(event
, self
);
377 case PERF_RECORD_UNTHROTTLE
:
378 return ops
->unthrottle(event
, self
);
379 case PERF_RECORD_HEADER_ATTR
:
380 return ops
->attr(event
, self
);
381 case PERF_RECORD_HEADER_EVENT_TYPE
:
382 return ops
->event_type(event
, self
);
383 case PERF_RECORD_HEADER_TRACING_DATA
:
384 /* setup for reading amidst mmap */
385 lseek(self
->fd
, offset
+ head
, SEEK_SET
);
386 return ops
->tracing_data(event
, self
);
387 case PERF_RECORD_HEADER_BUILD_ID
:
388 return ops
->build_id(event
, self
);
390 self
->unknown_events
++;
395 void perf_event_header__bswap(struct perf_event_header
*self
)
397 self
->type
= bswap_32(self
->type
);
398 self
->misc
= bswap_16(self
->misc
);
399 self
->size
= bswap_16(self
->size
);
402 int perf_header__read_build_ids(struct perf_header
*self
,
403 int input
, u64 offset
, u64 size
)
405 struct build_id_event bev
;
406 char filename
[PATH_MAX
];
407 u64 limit
= offset
+ size
;
410 while (offset
< limit
) {
413 struct list_head
*head
= &dsos__user
;
415 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
418 if (self
->needs_swap
)
419 perf_event_header__bswap(&bev
.header
);
421 len
= bev
.header
.size
- sizeof(bev
);
422 if (read(input
, filename
, len
) != len
)
425 if (bev
.header
.misc
& PERF_RECORD_MISC_KERNEL
)
426 head
= &dsos__kernel
;
428 dso
= __dsos__findnew(head
, filename
);
430 dso__set_build_id(dso
, &bev
.build_id
);
431 if (head
== &dsos__kernel
&& filename
[0] == '[')
435 offset
+= bev
.header
.size
;
442 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
444 struct thread
*thread
= perf_session__findnew(self
, 0);
446 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
447 pr_err("problem inserting idle task.\n");
454 int do_read(int fd
, void *buf
, size_t size
)
456 void *buf_start
= buf
;
459 int ret
= read(fd
, buf
, size
);
468 return buf
- buf_start
;
471 #define session_done() (*(volatile int *)(&session_done))
472 volatile int session_done
;
474 static int __perf_session__process_pipe_events(struct perf_session
*self
,
475 struct perf_event_ops
*ops
)
484 perf_event_ops__fill_defaults(ops
);
488 err
= do_read(self
->fd
, &event
, sizeof(struct perf_event_header
));
493 pr_err("failed to read event header\n");
497 if (self
->header
.needs_swap
)
498 perf_event_header__bswap(&event
.header
);
500 size
= event
.header
.size
;
505 p
+= sizeof(struct perf_event_header
);
507 err
= do_read(self
->fd
, p
, size
- sizeof(struct perf_event_header
));
510 pr_err("unexpected end of event stream\n");
514 pr_err("failed to read event data\n");
519 (skip
= perf_session__process_event(self
, &event
, ops
,
521 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
522 head
, event
.header
.size
, event
.header
.type
);
524 * assume we lost track of the stream, check alignment, and
525 * increment a single u64 in the hope to catch on again 'soon'.
527 if (unlikely(head
& 7))
535 dump_printf("\n%#Lx [%#x]: event: %d\n",
536 head
, event
.header
.size
, event
.header
.type
);
549 int __perf_session__process_events(struct perf_session
*self
,
550 u64 data_offset
, u64 data_size
,
551 u64 file_size
, struct perf_event_ops
*ops
)
553 int err
, mmap_prot
, mmap_flags
;
560 struct ui_progress
*progress
= ui_progress__new("Processing events...",
562 if (progress
== NULL
)
565 perf_event_ops__fill_defaults(ops
);
567 page_size
= sysconf(_SC_PAGESIZE
);
570 shift
= page_size
* (head
/ page_size
);
574 mmap_prot
= PROT_READ
;
575 mmap_flags
= MAP_SHARED
;
577 if (self
->header
.needs_swap
) {
578 mmap_prot
|= PROT_WRITE
;
579 mmap_flags
= MAP_PRIVATE
;
582 buf
= mmap(NULL
, page_size
* self
->mmap_window
, mmap_prot
,
583 mmap_flags
, self
->fd
, offset
);
584 if (buf
== MAP_FAILED
) {
585 pr_err("failed to mmap file\n");
591 event
= (event_t
*)(buf
+ head
);
592 ui_progress__update(progress
, offset
);
594 if (self
->header
.needs_swap
)
595 perf_event_header__bswap(&event
->header
);
596 size
= event
->header
.size
;
600 if (head
+ event
->header
.size
>= page_size
* self
->mmap_window
) {
603 shift
= page_size
* (head
/ page_size
);
605 munmap_ret
= munmap(buf
, page_size
* self
->mmap_window
);
606 assert(munmap_ret
== 0);
613 size
= event
->header
.size
;
615 dump_printf("\n%#Lx [%#x]: event: %d\n",
616 offset
+ head
, event
->header
.size
, event
->header
.type
);
619 perf_session__process_event(self
, event
, ops
, offset
, head
) < 0) {
620 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
621 offset
+ head
, event
->header
.size
,
624 * assume we lost track of the stream, check alignment, and
625 * increment a single u64 in the hope to catch on again 'soon'.
627 if (unlikely(head
& 7))
635 if (offset
+ head
>= data_offset
+ data_size
)
638 if (offset
+ head
< file_size
)
643 ui_progress__delete(progress
);
647 int perf_session__process_events(struct perf_session
*self
,
648 struct perf_event_ops
*ops
)
652 if (perf_session__register_idle_thread(self
) == NULL
)
655 if (!symbol_conf
.full_paths
) {
658 if (getcwd(bf
, sizeof(bf
)) == NULL
) {
661 pr_err("failed to get the current directory\n");
664 self
->cwd
= strdup(bf
);
665 if (self
->cwd
== NULL
) {
669 self
->cwdlen
= strlen(self
->cwd
);
673 err
= __perf_session__process_events(self
,
674 self
->header
.data_offset
,
675 self
->header
.data_size
,
678 err
= __perf_session__process_pipe_events(self
, ops
);
683 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
685 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
686 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
693 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session
*self
,
694 const char *symbol_name
,
700 self
->ref_reloc_sym
.name
= strdup(symbol_name
);
701 if (self
->ref_reloc_sym
.name
== NULL
)
704 bracket
= strchr(self
->ref_reloc_sym
.name
, ']');
708 self
->ref_reloc_sym
.addr
= addr
;
710 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
711 struct kmap
*kmap
= map__kmap(self
->vmlinux_maps
[i
]);
712 kmap
->ref_reloc_sym
= &self
->ref_reloc_sym
;