1 #include <linux/types.h>
10 static pid_t
event__synthesize_comm(pid_t pid
, int full
,
11 event__handler_t process
,
12 struct perf_session
*session
)
15 char filename
[PATH_MAX
];
20 struct dirent dirent
, *next
;
23 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
25 fp
= fopen(filename
, "r");
29 * We raced with a task exiting - just return:
31 pr_debug("couldn't open %s\n", filename
);
35 memset(&ev
.comm
, 0, sizeof(ev
.comm
));
36 while (!ev
.comm
.comm
[0] || !ev
.comm
.pid
) {
37 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
40 if (memcmp(bf
, "Name:", 5) == 0) {
42 while (*name
&& isspace(*name
))
44 size
= strlen(name
) - 1;
45 memcpy(ev
.comm
.comm
, name
, size
++);
46 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
48 while (*tgids
&& isspace(*tgids
))
50 tgid
= ev
.comm
.pid
= atoi(tgids
);
54 ev
.comm
.header
.type
= PERF_RECORD_COMM
;
55 size
= ALIGN(size
, sizeof(u64
));
56 ev
.comm
.header
.size
= sizeof(ev
.comm
) - (sizeof(ev
.comm
.comm
) - size
);
61 process(&ev
, session
);
65 snprintf(filename
, sizeof(filename
), "/proc/%d/task", pid
);
67 tasks
= opendir(filename
);
71 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
73 pid
= strtol(dirent
.d_name
, &end
, 10);
79 process(&ev
, session
);
88 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename
);
92 static int event__synthesize_mmap_events(pid_t pid
, pid_t tgid
,
93 event__handler_t process
,
94 struct perf_session
*session
)
96 char filename
[PATH_MAX
];
99 snprintf(filename
, sizeof(filename
), "/proc/%d/maps", pid
);
101 fp
= fopen(filename
, "r");
104 * We raced with a task exiting - just return:
106 pr_debug("couldn't open %s\n", filename
);
111 char bf
[BUFSIZ
], *pbf
= bf
;
114 .type
= PERF_RECORD_MMAP
,
116 * Just like the kernel, see __perf_event_mmap
117 * in kernel/perf_event.c
119 .misc
= PERF_RECORD_MISC_USER
,
124 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
127 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
128 n
= hex2u64(pbf
, &ev
.mmap
.start
);
132 n
= hex2u64(pbf
, &ev
.mmap
.len
);
136 if (*pbf
== 'x') { /* vm_exec */
138 char *execname
= strchr(bf
, '/');
141 if (execname
== NULL
)
142 execname
= strstr(bf
, "[vdso]");
144 if (execname
== NULL
)
148 n
= hex2u64(pbf
, &vm_pgoff
);
149 /* pgoff is in bytes, not pages */
151 ev
.mmap
.pgoff
= vm_pgoff
<< getpagesize();
155 size
= strlen(execname
);
156 execname
[size
- 1] = '\0'; /* Remove \n */
157 memcpy(ev
.mmap
.filename
, execname
, size
);
158 size
= ALIGN(size
, sizeof(u64
));
159 ev
.mmap
.len
-= ev
.mmap
.start
;
160 ev
.mmap
.header
.size
= (sizeof(ev
.mmap
) -
161 (sizeof(ev
.mmap
.filename
) - size
));
165 process(&ev
, session
);
173 int event__synthesize_modules(event__handler_t process
,
174 struct perf_session
*session
,
175 struct machine
*machine
)
178 struct map_groups
*kmaps
= &machine
->kmaps
;
182 * kernel uses 0 for user space maps, see kernel/perf_event.c
185 if (machine__is_host(machine
))
186 misc
= PERF_RECORD_MISC_KERNEL
;
188 misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
190 for (nd
= rb_first(&kmaps
->maps
[MAP__FUNCTION
]);
191 nd
; nd
= rb_next(nd
)) {
194 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
196 if (pos
->dso
->kernel
)
199 size
= ALIGN(pos
->dso
->long_name_len
+ 1, sizeof(u64
));
200 memset(&ev
, 0, sizeof(ev
));
201 ev
.mmap
.header
.misc
= misc
;
202 ev
.mmap
.header
.type
= PERF_RECORD_MMAP
;
203 ev
.mmap
.header
.size
= (sizeof(ev
.mmap
) -
204 (sizeof(ev
.mmap
.filename
) - size
));
205 ev
.mmap
.start
= pos
->start
;
206 ev
.mmap
.len
= pos
->end
- pos
->start
;
207 ev
.mmap
.pid
= machine
->pid
;
209 memcpy(ev
.mmap
.filename
, pos
->dso
->long_name
,
210 pos
->dso
->long_name_len
+ 1);
211 process(&ev
, session
);
217 int event__synthesize_thread(pid_t pid
, event__handler_t process
,
218 struct perf_session
*session
)
220 pid_t tgid
= event__synthesize_comm(pid
, 1, process
, session
);
223 return event__synthesize_mmap_events(pid
, tgid
, process
, session
);
226 void event__synthesize_threads(event__handler_t process
,
227 struct perf_session
*session
)
230 struct dirent dirent
, *next
;
232 proc
= opendir("/proc");
234 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
236 pid_t pid
= strtol(dirent
.d_name
, &end
, 10);
238 if (*end
) /* only interested in proper numerical dirents */
241 event__synthesize_thread(pid
, process
, session
);
247 struct process_symbol_args
{
252 static int find_symbol_cb(void *arg
, const char *name
, char type
, u64 start
)
254 struct process_symbol_args
*args
= arg
;
257 * Must be a function or at least an alias, as in PARISC64, where "_text" is
258 * an 'A' to the same address as "_stext".
260 if (!(symbol_type__is_a(type
, MAP__FUNCTION
) ||
261 type
== 'A') || strcmp(name
, args
->name
))
268 int event__synthesize_kernel_mmap(event__handler_t process
,
269 struct perf_session
*session
,
270 struct machine
*machine
,
271 const char *symbol_name
)
274 const char *filename
, *mmap_name
;
276 char name_buff
[PATH_MAX
];
281 .type
= PERF_RECORD_MMAP
,
285 * We should get this from /sys/kernel/sections/.text, but till that is
286 * available use this, and after it is use this as a fallback for older
289 struct process_symbol_args args
= { .name
= symbol_name
, };
291 mmap_name
= machine__mmap_name(machine
, name_buff
, sizeof(name_buff
));
292 if (machine__is_host(machine
)) {
294 * kernel uses PERF_RECORD_MISC_USER for user space maps,
295 * see kernel/perf_event.c __perf_event_mmap
297 ev
.header
.misc
= PERF_RECORD_MISC_KERNEL
;
298 filename
= "/proc/kallsyms";
300 ev
.header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
301 if (machine__is_default_guest(machine
))
302 filename
= (char *) symbol_conf
.default_guest_kallsyms
;
304 sprintf(path
, "%s/proc/kallsyms", machine
->root_dir
);
309 if (kallsyms__parse(filename
, &args
, find_symbol_cb
) <= 0)
312 map
= machine
->vmlinux_maps
[MAP__FUNCTION
];
313 size
= snprintf(ev
.mmap
.filename
, sizeof(ev
.mmap
.filename
),
314 "%s%s", mmap_name
, symbol_name
) + 1;
315 size
= ALIGN(size
, sizeof(u64
));
316 ev
.mmap
.header
.size
= (sizeof(ev
.mmap
) -
317 (sizeof(ev
.mmap
.filename
) - size
));
318 ev
.mmap
.pgoff
= args
.start
;
319 ev
.mmap
.start
= map
->start
;
320 ev
.mmap
.len
= map
->end
- ev
.mmap
.start
;
321 ev
.mmap
.pid
= machine
->pid
;
323 return process(&ev
, session
);
326 static void thread__comm_adjust(struct thread
*self
)
328 char *comm
= self
->comm
;
330 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
331 (!symbol_conf
.comm_list
||
332 strlist__has_entry(symbol_conf
.comm_list
, comm
))) {
333 unsigned int slen
= strlen(comm
);
335 if (slen
> comms__col_width
) {
336 comms__col_width
= slen
;
337 threads__col_width
= slen
+ 6;
342 static int thread__set_comm_adjust(struct thread
*self
, const char *comm
)
344 int ret
= thread__set_comm(self
, comm
);
349 thread__comm_adjust(self
);
354 int event__process_comm(event_t
*self
, struct perf_session
*session
)
356 struct thread
*thread
= perf_session__findnew(session
, self
->comm
.pid
);
358 dump_printf(": %s:%d\n", self
->comm
.comm
, self
->comm
.pid
);
360 if (thread
== NULL
|| thread__set_comm_adjust(thread
, self
->comm
.comm
)) {
361 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
368 int event__process_lost(event_t
*self
, struct perf_session
*session
)
370 dump_printf(": id:%Ld: lost:%Ld\n", self
->lost
.id
, self
->lost
.lost
);
371 session
->events_stats
.lost
+= self
->lost
.lost
;
375 static void event_set_kernel_mmap_len(struct map
**maps
, event_t
*self
)
377 maps
[MAP__FUNCTION
]->start
= self
->mmap
.start
;
378 maps
[MAP__FUNCTION
]->end
= self
->mmap
.start
+ self
->mmap
.len
;
380 * Be a bit paranoid here, some perf.data file came with
381 * a zero sized synthesized MMAP event for the kernel.
383 if (maps
[MAP__FUNCTION
]->end
== 0)
384 maps
[MAP__FUNCTION
]->end
= ~0UL;
387 static int event__process_kernel_mmap(event_t
*self
,
388 struct perf_session
*session
)
391 char kmmap_prefix
[PATH_MAX
];
392 struct machine
*machine
;
393 enum dso_kernel_type kernel_type
;
396 machine
= perf_session__findnew_machine(session
, self
->mmap
.pid
);
398 pr_err("Can't find id %d's machine\n", self
->mmap
.pid
);
402 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
403 if (machine__is_host(machine
))
404 kernel_type
= DSO_TYPE_KERNEL
;
406 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
408 is_kernel_mmap
= memcmp(self
->mmap
.filename
,
410 strlen(kmmap_prefix
)) == 0;
411 if (self
->mmap
.filename
[0] == '/' ||
412 (!is_kernel_mmap
&& self
->mmap
.filename
[0] == '[')) {
414 char short_module_name
[1024];
417 if (self
->mmap
.filename
[0] == '/') {
418 name
= strrchr(self
->mmap
.filename
, '/');
423 dot
= strrchr(name
, '.');
426 snprintf(short_module_name
, sizeof(short_module_name
),
427 "[%.*s]", (int)(dot
- name
), name
);
428 strxfrchar(short_module_name
, '-', '_');
430 strcpy(short_module_name
, self
->mmap
.filename
);
432 map
= machine__new_module(machine
, self
->mmap
.start
,
433 self
->mmap
.filename
);
437 name
= strdup(short_module_name
);
441 map
->dso
->short_name
= name
;
442 map
->end
= map
->start
+ self
->mmap
.len
;
443 } else if (is_kernel_mmap
) {
444 const char *symbol_name
= (self
->mmap
.filename
+
445 strlen(kmmap_prefix
));
447 * Should be there already, from the build-id table in
450 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
455 kernel
->kernel
= kernel_type
;
456 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
459 event_set_kernel_mmap_len(machine
->vmlinux_maps
, self
);
460 perf_session__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
463 if (machine__is_default_guest(machine
)) {
465 * preload dso of guest kernel and modules
467 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
476 int event__process_mmap(event_t
*self
, struct perf_session
*session
)
478 struct machine
*machine
;
479 struct thread
*thread
;
481 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
484 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
485 self
->mmap
.pid
, self
->mmap
.tid
, self
->mmap
.start
,
486 self
->mmap
.len
, self
->mmap
.pgoff
, self
->mmap
.filename
);
488 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
489 cpumode
== PERF_RECORD_MISC_KERNEL
) {
490 ret
= event__process_kernel_mmap(self
, session
);
496 machine
= perf_session__find_host_machine(session
);
499 thread
= perf_session__findnew(session
, self
->mmap
.pid
);
500 map
= map__new(&machine
->user_dsos
, self
->mmap
.start
,
501 self
->mmap
.len
, self
->mmap
.pgoff
,
502 self
->mmap
.pid
, self
->mmap
.filename
,
503 MAP__FUNCTION
, session
->cwd
, session
->cwdlen
);
505 if (thread
== NULL
|| map
== NULL
)
508 thread__insert_map(thread
, map
);
512 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
516 int event__process_task(event_t
*self
, struct perf_session
*session
)
518 struct thread
*thread
= perf_session__findnew(session
, self
->fork
.pid
);
519 struct thread
*parent
= perf_session__findnew(session
, self
->fork
.ppid
);
521 dump_printf("(%d:%d):(%d:%d)\n", self
->fork
.pid
, self
->fork
.tid
,
522 self
->fork
.ppid
, self
->fork
.ptid
);
524 * A thread clone will have the same PID for both parent and child.
526 if (thread
== parent
)
529 if (self
->header
.type
== PERF_RECORD_EXIT
)
532 if (thread
== NULL
|| parent
== NULL
||
533 thread__fork(thread
, parent
) < 0) {
534 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
541 void thread__find_addr_map(struct thread
*self
,
542 struct perf_session
*session
, u8 cpumode
,
543 enum map_type type
, pid_t pid
, u64 addr
,
544 struct addr_location
*al
)
546 struct map_groups
*mg
= &self
->mg
;
547 struct machine
*machine
= NULL
;
551 al
->cpumode
= cpumode
;
552 al
->filtered
= false;
554 if (cpumode
== PERF_RECORD_MISC_KERNEL
&& perf_host
) {
556 machine
= perf_session__find_host_machine(session
);
557 if (machine
== NULL
) {
561 mg
= &machine
->kmaps
;
562 } else if (cpumode
== PERF_RECORD_MISC_USER
&& perf_host
) {
564 machine
= perf_session__find_host_machine(session
);
565 } else if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
&& perf_guest
) {
567 machine
= perf_session__find_machine(session
, pid
);
568 if (machine
== NULL
) {
572 mg
= &machine
->kmaps
;
575 * 'u' means guest os user space.
576 * TODO: We don't support guest user space. Might support late.
578 if (cpumode
== PERF_RECORD_MISC_GUEST_USER
&& perf_guest
)
584 if ((cpumode
== PERF_RECORD_MISC_GUEST_USER
||
585 cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) &&
588 if ((cpumode
== PERF_RECORD_MISC_USER
||
589 cpumode
== PERF_RECORD_MISC_KERNEL
) &&
596 al
->map
= map_groups__find(mg
, type
, al
->addr
);
597 if (al
->map
== NULL
) {
599 * If this is outside of all known maps, and is a negative
600 * address, try to look it up in the kernel dso, as it might be
601 * a vsyscall or vdso (which executes in user-mode).
603 * XXX This is nasty, we should have a symbol list in the
604 * "[vdso]" dso, but for now lets use the old trick of looking
605 * in the whole kernel symbol list.
607 if ((long long)al
->addr
< 0 &&
608 cpumode
== PERF_RECORD_MISC_KERNEL
&&
609 machine
&& mg
!= &machine
->kmaps
) {
610 mg
= &machine
->kmaps
;
614 al
->addr
= al
->map
->map_ip(al
->map
, al
->addr
);
617 void thread__find_addr_location(struct thread
*self
,
618 struct perf_session
*session
, u8 cpumode
,
619 enum map_type type
, pid_t pid
, u64 addr
,
620 struct addr_location
*al
,
621 symbol_filter_t filter
)
623 thread__find_addr_map(self
, session
, cpumode
, type
, pid
, addr
, al
);
625 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
630 static void dso__calc_col_width(struct dso
*self
)
632 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
633 (!symbol_conf
.dso_list
||
634 strlist__has_entry(symbol_conf
.dso_list
, self
->name
))) {
635 u16 slen
= self
->short_name_len
;
637 slen
= self
->long_name_len
;
638 if (dsos__col_width
< slen
)
639 dsos__col_width
= slen
;
642 self
->slen_calculated
= 1;
645 int event__preprocess_sample(const event_t
*self
, struct perf_session
*session
,
646 struct addr_location
*al
, symbol_filter_t filter
)
648 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
649 struct thread
*thread
= perf_session__findnew(session
, self
->ip
.pid
);
654 if (symbol_conf
.comm_list
&&
655 !strlist__has_entry(symbol_conf
.comm_list
, thread
->comm
))
658 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
660 thread__find_addr_map(thread
, session
, cpumode
, MAP__FUNCTION
,
661 self
->ip
.pid
, self
->ip
.ip
, al
);
662 dump_printf(" ...... dso: %s\n",
663 al
->map
? al
->map
->dso
->long_name
:
664 al
->level
== 'H' ? "[hypervisor]" : "<not found>");
668 if (symbol_conf
.dso_list
&&
669 (!al
->map
|| !al
->map
->dso
||
670 !(strlist__has_entry(symbol_conf
.dso_list
,
671 al
->map
->dso
->short_name
) ||
672 (al
->map
->dso
->short_name
!= al
->map
->dso
->long_name
&&
673 strlist__has_entry(symbol_conf
.dso_list
,
674 al
->map
->dso
->long_name
)))))
677 * We have to do this here as we may have a dso with no symbol
678 * hit that has a name longer than the ones with symbols
681 if (!sort_dso
.elide
&& !al
->map
->dso
->slen_calculated
)
682 dso__calc_col_width(al
->map
->dso
);
684 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
686 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
688 if (dsos__col_width
< unresolved_col_width
&&
689 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
690 !symbol_conf
.dso_list
)
691 dsos__col_width
= unresolved_col_width
;
694 if (symbol_conf
.sym_list
&& al
->sym
&&
695 !strlist__has_entry(symbol_conf
.sym_list
, al
->sym
->name
))
705 int event__parse_sample(event_t
*event
, u64 type
, struct sample_data
*data
)
707 u64
*array
= event
->sample
.array
;
709 if (type
& PERF_SAMPLE_IP
) {
710 data
->ip
= event
->ip
.ip
;
714 if (type
& PERF_SAMPLE_TID
) {
715 u32
*p
= (u32
*)array
;
721 if (type
& PERF_SAMPLE_TIME
) {
726 if (type
& PERF_SAMPLE_ADDR
) {
732 if (type
& PERF_SAMPLE_ID
) {
737 if (type
& PERF_SAMPLE_STREAM_ID
) {
738 data
->stream_id
= *array
;
742 if (type
& PERF_SAMPLE_CPU
) {
743 u32
*p
= (u32
*)array
;
748 if (type
& PERF_SAMPLE_PERIOD
) {
749 data
->period
= *array
;
753 if (type
& PERF_SAMPLE_READ
) {
754 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
758 if (type
& PERF_SAMPLE_CALLCHAIN
) {
759 data
->callchain
= (struct ip_callchain
*)array
;
760 array
+= 1 + data
->callchain
->nr
;
763 if (type
& PERF_SAMPLE_RAW
) {
764 u32
*p
= (u32
*)array
;