4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include <linux/list.h>
14 #include "util/cache.h"
15 #include <linux/rbtree.h>
16 #include "util/symbol.h"
17 #include "util/string.h"
18 #include "util/callchain.h"
19 #include "util/strlist.h"
22 #include "util/header.h"
24 #include "util/parse-options.h"
25 #include "util/parse-events.h"
31 static char const *input_name
= "perf.data";
32 static char *vmlinux
= NULL
;
34 static char default_sort_order
[] = "comm,dso";
35 static char *sort_order
= default_sort_order
;
36 static char *dso_list_str
, *comm_list_str
, *sym_list_str
;
37 static struct strlist
*dso_list
, *comm_list
, *sym_list
;
40 static int show_mask
= SHOW_KERNEL
| SHOW_USER
| SHOW_HV
;
42 static int dump_trace
= 0;
43 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
44 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
47 #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
51 static int full_paths
;
53 static unsigned long page_size
;
54 static unsigned long mmap_window
= 32;
56 static char default_parent_pattern
[] = "^sys_|^do_page_fault";
57 static char *parent_pattern
= default_parent_pattern
;
58 static regex_t parent_regex
;
60 static int exclude_other
= 1;
62 static char callchain_default_opt
[] = "fractal,0.5";
67 struct callchain_param callchain_param
= {
68 .mode
= CHAIN_GRAPH_ABS
,
72 static u64 sample_type
;
75 struct perf_event_header header
;
78 unsigned char __more_data
[];
82 struct perf_event_header header
;
87 char filename
[PATH_MAX
];
91 struct perf_event_header header
;
97 struct perf_event_header header
;
101 struct period_event
{
102 struct perf_event_header header
;
109 struct perf_event_header header
;
115 struct perf_event_header header
;
121 typedef union event_union
{
122 struct perf_event_header header
;
124 struct mmap_event mmap
;
125 struct comm_event comm
;
126 struct fork_event fork
;
127 struct period_event period
;
128 struct lost_event lost
;
129 struct read_event read
;
132 static LIST_HEAD(dsos
);
133 static struct dso
*kernel_dso
;
134 static struct dso
*vdso
;
135 static struct dso
*hypervisor_dso
;
137 static void dsos__add(struct dso
*dso
)
139 list_add_tail(&dso
->node
, &dsos
);
142 static struct dso
*dsos__find(const char *name
)
146 list_for_each_entry(pos
, &dsos
, node
)
147 if (strcmp(pos
->name
, name
) == 0)
152 static struct dso
*dsos__findnew(const char *name
)
154 struct dso
*dso
= dsos__find(name
);
160 dso
= dso__new(name
, 0);
164 nr
= dso__load(dso
, NULL
, verbose
);
166 eprintf("Failed to open: %s\n", name
);
170 eprintf("No symbols found in: %s, maybe install a debug package?\n", name
);
181 static void dsos__fprintf(FILE *fp
)
185 list_for_each_entry(pos
, &dsos
, node
)
186 dso__fprintf(pos
, fp
);
189 static struct symbol
*vdso__find_symbol(struct dso
*dso
, u64 ip
)
191 return dso__find_symbol(dso
, ip
);
194 static int load_kernel(void)
198 kernel_dso
= dso__new("[kernel]", 0);
202 err
= dso__load_kernel(kernel_dso
, vmlinux
, NULL
, verbose
, modules
);
204 dso__delete(kernel_dso
);
207 dsos__add(kernel_dso
);
209 vdso
= dso__new("[vdso]", 0);
213 vdso
->find_symbol
= vdso__find_symbol
;
217 hypervisor_dso
= dso__new("[hypervisor]", 0);
220 dsos__add(hypervisor_dso
);
225 static char __cwd
[PATH_MAX
];
226 static char *cwd
= __cwd
;
229 static int strcommon(const char *pathname
)
233 while (pathname
[n
] == cwd
[n
] && n
< cwdlen
)
240 struct list_head node
;
244 u64 (*map_ip
)(struct map
*, u64
);
248 static u64
map__map_ip(struct map
*map
, u64 ip
)
250 return ip
- map
->start
+ map
->pgoff
;
253 static u64
vdso__map_ip(struct map
*map __used
, u64 ip
)
258 static inline int is_anon_memory(const char *filename
)
260 return strcmp(filename
, "//anon") == 0;
263 static struct map
*map__new(struct mmap_event
*event
)
265 struct map
*self
= malloc(sizeof(*self
));
268 const char *filename
= event
->filename
;
269 char newfilename
[PATH_MAX
];
273 int n
= strcommon(filename
);
276 snprintf(newfilename
, sizeof(newfilename
),
277 ".%s", filename
+ n
);
278 filename
= newfilename
;
282 anon
= is_anon_memory(filename
);
285 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", event
->pid
);
286 filename
= newfilename
;
289 self
->start
= event
->start
;
290 self
->end
= event
->start
+ event
->len
;
291 self
->pgoff
= event
->pgoff
;
293 self
->dso
= dsos__findnew(filename
);
294 if (self
->dso
== NULL
)
297 if (self
->dso
== vdso
|| anon
)
298 self
->map_ip
= vdso__map_ip
;
300 self
->map_ip
= map__map_ip
;
308 static struct map
*map__clone(struct map
*self
)
310 struct map
*map
= malloc(sizeof(*self
));
315 memcpy(map
, self
, sizeof(*self
));
320 static int map__overlap(struct map
*l
, struct map
*r
)
322 if (l
->start
> r
->start
) {
328 if (l
->end
> r
->start
)
334 static size_t map__fprintf(struct map
*self
, FILE *fp
)
336 return fprintf(fp
, " %Lx-%Lx %Lx %s\n",
337 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
342 struct rb_node rb_node
;
343 struct list_head maps
;
348 static struct thread
*thread__new(pid_t pid
)
350 struct thread
*self
= malloc(sizeof(*self
));
354 self
->comm
= malloc(32);
356 snprintf(self
->comm
, 32, ":%d", self
->pid
);
357 INIT_LIST_HEAD(&self
->maps
);
363 static int thread__set_comm(struct thread
*self
, const char *comm
)
367 self
->comm
= strdup(comm
);
368 return self
->comm
? 0 : -ENOMEM
;
371 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
374 size_t ret
= fprintf(fp
, "Thread %d %s\n", self
->pid
, self
->comm
);
376 list_for_each_entry(pos
, &self
->maps
, node
)
377 ret
+= map__fprintf(pos
, fp
);
383 static struct rb_root threads
;
384 static struct thread
*last_match
;
386 static struct thread
*threads__findnew(pid_t pid
)
388 struct rb_node
**p
= &threads
.rb_node
;
389 struct rb_node
*parent
= NULL
;
393 * Font-end cache - PID lookups come in blocks,
394 * so most of the time we dont have to look up
397 if (last_match
&& last_match
->pid
== pid
)
402 th
= rb_entry(parent
, struct thread
, rb_node
);
404 if (th
->pid
== pid
) {
415 th
= thread__new(pid
);
417 rb_link_node(&th
->rb_node
, parent
, p
);
418 rb_insert_color(&th
->rb_node
, &threads
);
425 static void thread__insert_map(struct thread
*self
, struct map
*map
)
427 struct map
*pos
, *tmp
;
429 list_for_each_entry_safe(pos
, tmp
, &self
->maps
, node
) {
430 if (map__overlap(pos
, map
)) {
432 printf("overlapping maps:\n");
433 map__fprintf(map
, stdout
);
434 map__fprintf(pos
, stdout
);
437 if (map
->start
<= pos
->start
&& map
->end
> pos
->start
)
438 pos
->start
= map
->end
;
440 if (map
->end
>= pos
->end
&& map
->start
< pos
->end
)
441 pos
->end
= map
->start
;
444 printf("after collision:\n");
445 map__fprintf(pos
, stdout
);
448 if (pos
->start
>= pos
->end
) {
449 list_del_init(&pos
->node
);
455 list_add_tail(&map
->node
, &self
->maps
);
458 static int thread__fork(struct thread
*self
, struct thread
*parent
)
464 self
->comm
= strdup(parent
->comm
);
468 list_for_each_entry(map
, &parent
->maps
, node
) {
469 struct map
*new = map__clone(map
);
472 thread__insert_map(self
, new);
478 static struct map
*thread__find_map(struct thread
*self
, u64 ip
)
485 list_for_each_entry(pos
, &self
->maps
, node
)
486 if (ip
>= pos
->start
&& ip
<= pos
->end
)
492 static size_t threads__fprintf(FILE *fp
)
497 for (nd
= rb_first(&threads
); nd
; nd
= rb_next(nd
)) {
498 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
500 ret
+= thread__fprintf(pos
, fp
);
507 * histogram, sorted on item, collects counts
510 static struct rb_root hist
;
513 struct rb_node rb_node
;
515 struct thread
*thread
;
519 struct symbol
*parent
;
522 struct callchain_node callchain
;
523 struct rb_root sorted_chain
;
529 * configurable sorting bits
533 struct list_head list
;
537 int64_t (*cmp
)(struct hist_entry
*, struct hist_entry
*);
538 int64_t (*collapse
)(struct hist_entry
*, struct hist_entry
*);
539 size_t (*print
)(FILE *fp
, struct hist_entry
*);
542 static int64_t cmp_null(void *l
, void *r
)
555 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
557 return right
->thread
->pid
- left
->thread
->pid
;
561 sort__thread_print(FILE *fp
, struct hist_entry
*self
)
563 return fprintf(fp
, "%16s:%5d", self
->thread
->comm
?: "", self
->thread
->pid
);
566 static struct sort_entry sort_thread
= {
567 .header
= " Command: Pid",
568 .cmp
= sort__thread_cmp
,
569 .print
= sort__thread_print
,
575 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
577 return right
->thread
->pid
- left
->thread
->pid
;
581 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
583 char *comm_l
= left
->thread
->comm
;
584 char *comm_r
= right
->thread
->comm
;
586 if (!comm_l
|| !comm_r
)
587 return cmp_null(comm_l
, comm_r
);
589 return strcmp(comm_l
, comm_r
);
593 sort__comm_print(FILE *fp
, struct hist_entry
*self
)
595 return fprintf(fp
, "%16s", self
->thread
->comm
);
598 static struct sort_entry sort_comm
= {
599 .header
= " Command",
600 .cmp
= sort__comm_cmp
,
601 .collapse
= sort__comm_collapse
,
602 .print
= sort__comm_print
,
608 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
610 struct dso
*dso_l
= left
->dso
;
611 struct dso
*dso_r
= right
->dso
;
613 if (!dso_l
|| !dso_r
)
614 return cmp_null(dso_l
, dso_r
);
616 return strcmp(dso_l
->name
, dso_r
->name
);
620 sort__dso_print(FILE *fp
, struct hist_entry
*self
)
623 return fprintf(fp
, "%-25s", self
->dso
->name
);
625 return fprintf(fp
, "%016llx ", (u64
)self
->ip
);
628 static struct sort_entry sort_dso
= {
629 .header
= "Shared Object ",
630 .cmp
= sort__dso_cmp
,
631 .print
= sort__dso_print
,
637 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
641 if (left
->sym
== right
->sym
)
644 ip_l
= left
->sym
? left
->sym
->start
: left
->ip
;
645 ip_r
= right
->sym
? right
->sym
->start
: right
->ip
;
647 return (int64_t)(ip_r
- ip_l
);
651 sort__sym_print(FILE *fp
, struct hist_entry
*self
)
656 ret
+= fprintf(fp
, "%#018llx ", (u64
)self
->ip
);
659 ret
+= fprintf(fp
, "[%c] %s",
660 self
->dso
== kernel_dso
? 'k' :
661 self
->dso
== hypervisor_dso
? 'h' : '.', self
->sym
->name
);
663 if (self
->sym
->module
)
664 ret
+= fprintf(fp
, "\t[%s]", self
->sym
->module
->name
);
666 ret
+= fprintf(fp
, "%#016llx", (u64
)self
->ip
);
672 static struct sort_entry sort_sym
= {
674 .cmp
= sort__sym_cmp
,
675 .print
= sort__sym_print
,
681 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
683 struct symbol
*sym_l
= left
->parent
;
684 struct symbol
*sym_r
= right
->parent
;
686 if (!sym_l
|| !sym_r
)
687 return cmp_null(sym_l
, sym_r
);
689 return strcmp(sym_l
->name
, sym_r
->name
);
693 sort__parent_print(FILE *fp
, struct hist_entry
*self
)
697 ret
+= fprintf(fp
, "%-20s", self
->parent
? self
->parent
->name
: "[other]");
702 static struct sort_entry sort_parent
= {
703 .header
= "Parent symbol ",
704 .cmp
= sort__parent_cmp
,
705 .print
= sort__parent_print
,
708 static int sort__need_collapse
= 0;
709 static int sort__has_parent
= 0;
711 struct sort_dimension
{
713 struct sort_entry
*entry
;
717 static struct sort_dimension sort_dimensions
[] = {
718 { .name
= "pid", .entry
= &sort_thread
, },
719 { .name
= "comm", .entry
= &sort_comm
, },
720 { .name
= "dso", .entry
= &sort_dso
, },
721 { .name
= "symbol", .entry
= &sort_sym
, },
722 { .name
= "parent", .entry
= &sort_parent
, },
725 static LIST_HEAD(hist_entry__sort_list
);
727 static int sort_dimension__add(char *tok
)
731 for (i
= 0; i
< ARRAY_SIZE(sort_dimensions
); i
++) {
732 struct sort_dimension
*sd
= &sort_dimensions
[i
];
737 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
740 if (sd
->entry
->collapse
)
741 sort__need_collapse
= 1;
743 if (sd
->entry
== &sort_parent
) {
744 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
748 regerror(ret
, &parent_regex
, err
, sizeof(err
));
749 fprintf(stderr
, "Invalid regex: %s\n%s",
750 parent_pattern
, err
);
753 sort__has_parent
= 1;
756 list_add_tail(&sd
->entry
->list
, &hist_entry__sort_list
);
766 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
768 struct sort_entry
*se
;
771 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
772 cmp
= se
->cmp(left
, right
);
781 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
783 struct sort_entry
*se
;
786 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
787 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
789 f
= se
->collapse
?: se
->cmp
;
791 cmp
= f(left
, right
);
799 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
)
804 ret
+= fprintf(fp
, "%s", " ");
806 for (i
= 0; i
< depth
; i
++)
807 if (depth_mask
& (1 << i
))
808 ret
+= fprintf(fp
, "| ");
810 ret
+= fprintf(fp
, " ");
812 ret
+= fprintf(fp
, "\n");
817 ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
, int depth
,
818 int depth_mask
, int count
, u64 total_samples
,
824 ret
+= fprintf(fp
, "%s", " ");
825 for (i
= 0; i
< depth
; i
++) {
826 if (depth_mask
& (1 << i
))
827 ret
+= fprintf(fp
, "|");
829 ret
+= fprintf(fp
, " ");
830 if (!count
&& i
== depth
- 1) {
833 percent
= hits
* 100.0 / total_samples
;
834 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
836 ret
+= fprintf(fp
, "%s", " ");
839 ret
+= fprintf(fp
, "%s\n", chain
->sym
->name
);
841 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
847 callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
848 u64 total_samples
, int depth
, int depth_mask
)
850 struct rb_node
*node
, *next
;
851 struct callchain_node
*child
;
852 struct callchain_list
*chain
;
853 int new_depth_mask
= depth_mask
;
858 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
859 new_total
= self
->cumul_hit
;
861 new_total
= total_samples
;
863 node
= rb_first(&self
->rb_root
);
865 child
= rb_entry(node
, struct callchain_node
, rb_node
);
868 * The depth mask manages the output of pipes that show
869 * the depth. We don't want to keep the pipes of the current
870 * level for the last child of this depth
872 next
= rb_next(node
);
874 new_depth_mask
&= ~(1 << (depth
- 1));
877 * But we keep the older depth mask for the line seperator
878 * to keep the level link until we reach the last child
880 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
);
882 list_for_each_entry(chain
, &child
->val
, list
) {
883 if (chain
->ip
>= PERF_CONTEXT_MAX
)
885 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
890 ret
+= callchain__fprintf_graph(fp
, child
, new_total
,
892 new_depth_mask
| (1 << depth
));
900 callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
903 struct callchain_list
*chain
;
909 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
912 list_for_each_entry(chain
, &self
->val
, list
) {
913 if (chain
->ip
>= PERF_CONTEXT_MAX
)
916 ret
+= fprintf(fp
, " %s\n", chain
->sym
->name
);
918 ret
+= fprintf(fp
, " %p\n",
919 (void *)(long)chain
->ip
);
926 hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
929 struct rb_node
*rb_node
;
930 struct callchain_node
*chain
;
933 rb_node
= rb_first(&self
->sorted_chain
);
937 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
938 percent
= chain
->hit
* 100.0 / total_samples
;
939 switch (callchain_param
.mode
) {
941 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
943 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
945 case CHAIN_GRAPH_ABS
: /* Falldown */
946 case CHAIN_GRAPH_REL
:
947 ret
+= callchain__fprintf_graph(fp
, chain
,
948 total_samples
, 1, 1);
952 ret
+= fprintf(fp
, "\n");
953 rb_node
= rb_next(rb_node
);
961 hist_entry__fprintf(FILE *fp
, struct hist_entry
*self
, u64 total_samples
)
963 struct sort_entry
*se
;
966 if (exclude_other
&& !self
->parent
)
970 ret
= percent_color_fprintf(fp
, " %6.2f%%",
971 (self
->count
* 100.0) / total_samples
);
973 ret
= fprintf(fp
, "%12Ld ", self
->count
);
975 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
976 if (exclude_other
&& (se
== &sort_parent
))
980 ret
+= se
->print(fp
, self
);
983 ret
+= fprintf(fp
, "\n");
986 hist_entry_callchain__fprintf(fp
, self
, total_samples
);
995 static struct symbol
*
996 resolve_symbol(struct thread
*thread
, struct map
**mapp
,
997 struct dso
**dsop
, u64
*ipp
)
999 struct dso
*dso
= dsop
? *dsop
: NULL
;
1000 struct map
*map
= mapp
? *mapp
: NULL
;
1012 map
= thread__find_map(thread
, ip
);
1017 ip
= map
->map_ip(map
, ip
);
1022 * If this is outside of all known maps,
1023 * and is a negative address, try to look it
1024 * up in the kernel dso, as it might be a
1025 * vsyscall (which executes in user-mode):
1027 if ((long long)ip
< 0)
1030 dprintf(" ...... dso: %s\n", dso
? dso
->name
: "<not found>");
1031 dprintf(" ...... map: %Lx -> %Lx\n", *ipp
, ip
);
1040 return dso
->find_symbol(dso
, ip
);
1043 static int call__match(struct symbol
*sym
)
1045 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
1051 static struct symbol
**
1052 resolve_callchain(struct thread
*thread
, struct map
*map __used
,
1053 struct ip_callchain
*chain
, struct hist_entry
*entry
)
1055 u64 context
= PERF_CONTEXT_MAX
;
1056 struct symbol
**syms
= NULL
;
1060 syms
= calloc(chain
->nr
, sizeof(*syms
));
1062 fprintf(stderr
, "Can't allocate memory for symbols\n");
1067 for (i
= 0; i
< chain
->nr
; i
++) {
1068 u64 ip
= chain
->ips
[i
];
1069 struct dso
*dso
= NULL
;
1072 if (ip
>= PERF_CONTEXT_MAX
) {
1078 case PERF_CONTEXT_HV
:
1079 dso
= hypervisor_dso
;
1081 case PERF_CONTEXT_KERNEL
:
1088 sym
= resolve_symbol(thread
, NULL
, &dso
, &ip
);
1091 if (sort__has_parent
&& call__match(sym
) &&
1093 entry
->parent
= sym
;
1104 * collect histogram counts
1108 hist_entry__add(struct thread
*thread
, struct map
*map
, struct dso
*dso
,
1109 struct symbol
*sym
, u64 ip
, struct ip_callchain
*chain
,
1110 char level
, u64 count
)
1112 struct rb_node
**p
= &hist
.rb_node
;
1113 struct rb_node
*parent
= NULL
;
1114 struct hist_entry
*he
;
1115 struct symbol
**syms
= NULL
;
1116 struct hist_entry entry
= {
1125 .sorted_chain
= RB_ROOT
1129 if ((sort__has_parent
|| callchain
) && chain
)
1130 syms
= resolve_callchain(thread
, map
, chain
, &entry
);
1132 while (*p
!= NULL
) {
1134 he
= rb_entry(parent
, struct hist_entry
, rb_node
);
1136 cmp
= hist_entry__cmp(&entry
, he
);
1141 append_chain(&he
->callchain
, chain
, syms
);
1150 p
= &(*p
)->rb_right
;
1153 he
= malloc(sizeof(*he
));
1158 callchain_init(&he
->callchain
);
1159 append_chain(&he
->callchain
, chain
, syms
);
1162 rb_link_node(&he
->rb_node
, parent
, p
);
1163 rb_insert_color(&he
->rb_node
, &hist
);
1168 static void hist_entry__free(struct hist_entry
*he
)
1174 * collapse the histogram
1177 static struct rb_root collapse_hists
;
1179 static void collapse__insert_entry(struct hist_entry
*he
)
1181 struct rb_node
**p
= &collapse_hists
.rb_node
;
1182 struct rb_node
*parent
= NULL
;
1183 struct hist_entry
*iter
;
1186 while (*p
!= NULL
) {
1188 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1190 cmp
= hist_entry__collapse(iter
, he
);
1193 iter
->count
+= he
->count
;
1194 hist_entry__free(he
);
1201 p
= &(*p
)->rb_right
;
1204 rb_link_node(&he
->rb_node
, parent
, p
);
1205 rb_insert_color(&he
->rb_node
, &collapse_hists
);
1208 static void collapse__resort(void)
1210 struct rb_node
*next
;
1211 struct hist_entry
*n
;
1213 if (!sort__need_collapse
)
1216 next
= rb_first(&hist
);
1218 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1219 next
= rb_next(&n
->rb_node
);
1221 rb_erase(&n
->rb_node
, &hist
);
1222 collapse__insert_entry(n
);
1227 * reverse the map, sort on count.
1230 static struct rb_root output_hists
;
1232 static void output__insert_entry(struct hist_entry
*he
, u64 min_callchain_hits
)
1234 struct rb_node
**p
= &output_hists
.rb_node
;
1235 struct rb_node
*parent
= NULL
;
1236 struct hist_entry
*iter
;
1239 callchain_param
.sort(&he
->sorted_chain
, &he
->callchain
,
1240 min_callchain_hits
, &callchain_param
);
1242 while (*p
!= NULL
) {
1244 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1246 if (he
->count
> iter
->count
)
1249 p
= &(*p
)->rb_right
;
1252 rb_link_node(&he
->rb_node
, parent
, p
);
1253 rb_insert_color(&he
->rb_node
, &output_hists
);
1256 static void output__resort(u64 total_samples
)
1258 struct rb_node
*next
;
1259 struct hist_entry
*n
;
1260 struct rb_root
*tree
= &hist
;
1261 u64 min_callchain_hits
;
1263 min_callchain_hits
= total_samples
* (callchain_param
.min_percent
/ 100);
1265 if (sort__need_collapse
)
1266 tree
= &collapse_hists
;
1268 next
= rb_first(tree
);
1271 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1272 next
= rb_next(&n
->rb_node
);
1274 rb_erase(&n
->rb_node
, tree
);
1275 output__insert_entry(n
, min_callchain_hits
);
1279 static size_t output__fprintf(FILE *fp
, u64 total_samples
)
1281 struct hist_entry
*pos
;
1282 struct sort_entry
*se
;
1288 fprintf(fp
, "# (%Ld samples)\n", (u64
)total_samples
);
1291 fprintf(fp
, "# Overhead");
1292 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1293 if (exclude_other
&& (se
== &sort_parent
))
1295 fprintf(fp
, " %s", se
->header
);
1299 fprintf(fp
, "# ........");
1300 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1303 if (exclude_other
&& (se
== &sort_parent
))
1307 for (i
= 0; i
< strlen(se
->header
); i
++)
1314 for (nd
= rb_first(&output_hists
); nd
; nd
= rb_next(nd
)) {
1315 pos
= rb_entry(nd
, struct hist_entry
, rb_node
);
1316 ret
+= hist_entry__fprintf(fp
, pos
, total_samples
);
1319 if (sort_order
== default_sort_order
&&
1320 parent_pattern
== default_parent_pattern
) {
1322 fprintf(fp
, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
1330 static void register_idle_thread(void)
1332 struct thread
*thread
= threads__findnew(0);
1334 if (thread
== NULL
||
1335 thread__set_comm(thread
, "[idle]")) {
1336 fprintf(stderr
, "problem inserting idle task.\n");
1341 static unsigned long total
= 0,
1348 static int validate_chain(struct ip_callchain
*chain
, event_t
*event
)
1350 unsigned int chain_size
;
1352 chain_size
= event
->header
.size
;
1353 chain_size
-= (unsigned long)&event
->ip
.__more_data
- (unsigned long)event
;
1355 if (chain
->nr
*sizeof(u64
) > chain_size
)
1362 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1366 struct dso
*dso
= NULL
;
1367 struct thread
*thread
= threads__findnew(event
->ip
.pid
);
1368 u64 ip
= event
->ip
.ip
;
1370 struct map
*map
= NULL
;
1371 void *more_data
= event
->ip
.__more_data
;
1372 struct ip_callchain
*chain
= NULL
;
1375 if (sample_type
& PERF_SAMPLE_PERIOD
) {
1376 period
= *(u64
*)more_data
;
1377 more_data
+= sizeof(u64
);
1380 dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
1381 (void *)(offset
+ head
),
1382 (void *)(long)(event
->header
.size
),
1388 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
1391 chain
= (void *)more_data
;
1393 dprintf("... chain: nr:%Lu\n", chain
->nr
);
1395 if (validate_chain(chain
, event
) < 0) {
1396 eprintf("call-chain problem with event, skipping it.\n");
1401 for (i
= 0; i
< chain
->nr
; i
++)
1402 dprintf("..... %2d: %016Lx\n", i
, chain
->ips
[i
]);
1406 dprintf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1408 if (thread
== NULL
) {
1409 eprintf("problem processing %d event, skipping it.\n",
1410 event
->header
.type
);
1414 if (comm_list
&& !strlist__has_entry(comm_list
, thread
->comm
))
1417 cpumode
= event
->header
.misc
& PERF_EVENT_MISC_CPUMODE_MASK
;
1419 if (cpumode
== PERF_EVENT_MISC_KERNEL
) {
1425 dprintf(" ...... dso: %s\n", dso
->name
);
1427 } else if (cpumode
== PERF_EVENT_MISC_USER
) {
1436 dso
= hypervisor_dso
;
1438 dprintf(" ...... dso: [hypervisor]\n");
1441 if (show
& show_mask
) {
1442 struct symbol
*sym
= resolve_symbol(thread
, &map
, &dso
, &ip
);
1444 if (dso_list
&& dso
&& dso
->name
&& !strlist__has_entry(dso_list
, dso
->name
))
1447 if (sym_list
&& sym
&& !strlist__has_entry(sym_list
, sym
->name
))
1450 if (hist_entry__add(thread
, map
, dso
, sym
, ip
, chain
, level
, period
)) {
1451 eprintf("problem incrementing symbol count, skipping event\n");
1461 process_mmap_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1463 struct thread
*thread
= threads__findnew(event
->mmap
.pid
);
1464 struct map
*map
= map__new(&event
->mmap
);
1466 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1467 (void *)(offset
+ head
),
1468 (void *)(long)(event
->header
.size
),
1470 (void *)(long)event
->mmap
.start
,
1471 (void *)(long)event
->mmap
.len
,
1472 (void *)(long)event
->mmap
.pgoff
,
1473 event
->mmap
.filename
);
1475 if (thread
== NULL
|| map
== NULL
) {
1476 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1480 thread__insert_map(thread
, map
);
1487 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1489 struct thread
*thread
= threads__findnew(event
->comm
.pid
);
1491 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1492 (void *)(offset
+ head
),
1493 (void *)(long)(event
->header
.size
),
1494 event
->comm
.comm
, event
->comm
.pid
);
1496 if (thread
== NULL
||
1497 thread__set_comm(thread
, event
->comm
.comm
)) {
1498 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1507 process_fork_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1509 struct thread
*thread
= threads__findnew(event
->fork
.pid
);
1510 struct thread
*parent
= threads__findnew(event
->fork
.ppid
);
1512 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
1513 (void *)(offset
+ head
),
1514 (void *)(long)(event
->header
.size
),
1515 event
->fork
.pid
, event
->fork
.ppid
);
1517 if (!thread
|| !parent
|| thread__fork(thread
, parent
)) {
1518 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1527 process_period_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1529 dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
1530 (void *)(offset
+ head
),
1531 (void *)(long)(event
->header
.size
),
1534 event
->period
.sample_period
);
1540 process_lost_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1542 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1543 (void *)(offset
+ head
),
1544 (void *)(long)(event
->header
.size
),
1548 total_lost
+= event
->lost
.lost
;
1553 static void trace_event(event_t
*event
)
1555 unsigned char *raw_event
= (void *)event
;
1556 char *color
= PERF_COLOR_BLUE
;
1563 cdprintf("\n. ... raw event: size %d bytes\n", event
->header
.size
);
1565 for (i
= 0; i
< event
->header
.size
; i
++) {
1566 if ((i
& 15) == 0) {
1568 cdprintf(" %04x: ", i
);
1571 cdprintf(" %02x", raw_event
[i
]);
1573 if (((i
& 15) == 15) || i
== event
->header
.size
-1) {
1575 for (j
= 0; j
< 15-(i
& 15); j
++)
1577 for (j
= 0; j
< (i
& 15); j
++) {
1578 if (isprint(raw_event
[i
-15+j
]))
1579 cdprintf("%c", raw_event
[i
-15+j
]);
1590 process_read_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1592 dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
1593 (void *)(offset
+ head
),
1594 (void *)(long)(event
->header
.size
),
1603 process_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1607 switch (event
->header
.type
) {
1608 case PERF_EVENT_SAMPLE
:
1609 return process_sample_event(event
, offset
, head
);
1611 case PERF_EVENT_MMAP
:
1612 return process_mmap_event(event
, offset
, head
);
1614 case PERF_EVENT_COMM
:
1615 return process_comm_event(event
, offset
, head
);
1617 case PERF_EVENT_FORK
:
1618 return process_fork_event(event
, offset
, head
);
1620 case PERF_EVENT_PERIOD
:
1621 return process_period_event(event
, offset
, head
);
1623 case PERF_EVENT_LOST
:
1624 return process_lost_event(event
, offset
, head
);
1626 case PERF_EVENT_READ
:
1627 return process_read_event(event
, offset
, head
);
1630 * We dont process them right now but they are fine:
1633 case PERF_EVENT_THROTTLE
:
1634 case PERF_EVENT_UNTHROTTLE
:
1644 static struct perf_header
*header
;
1646 static u64
perf_header__sample_type(void)
1648 u64 sample_type
= 0;
1651 for (i
= 0; i
< header
->attrs
; i
++) {
1652 struct perf_header_attr
*attr
= header
->attr
[i
];
1655 sample_type
= attr
->attr
.sample_type
;
1656 else if (sample_type
!= attr
->attr
.sample_type
)
1657 die("non matching sample_type");
1663 static int __cmd_report(void)
1665 int ret
, rc
= EXIT_FAILURE
;
1666 unsigned long offset
= 0;
1667 unsigned long head
, shift
;
1673 register_idle_thread();
1675 input
= open(input_name
, O_RDONLY
);
1677 fprintf(stderr
, " failed to open file: %s", input_name
);
1678 if (!strcmp(input_name
, "perf.data"))
1679 fprintf(stderr
, " (try 'perf record' first)");
1680 fprintf(stderr
, "\n");
1684 ret
= fstat(input
, &stat
);
1686 perror("failed to stat file");
1690 if (!stat
.st_size
) {
1691 fprintf(stderr
, "zero-sized file, nothing to do!\n");
1695 header
= perf_header__read(input
);
1696 head
= header
->data_offset
;
1698 sample_type
= perf_header__sample_type();
1700 if (!(sample_type
& PERF_SAMPLE_CALLCHAIN
)) {
1701 if (sort__has_parent
) {
1702 fprintf(stderr
, "selected --sort parent, but no"
1703 " callchain data. Did you call"
1704 " perf record without -g?\n");
1708 fprintf(stderr
, "selected -c but no callchain data."
1709 " Did you call perf record without"
1715 if (load_kernel() < 0) {
1716 perror("failed to load kernel symbols");
1717 return EXIT_FAILURE
;
1721 if (getcwd(__cwd
, sizeof(__cwd
)) == NULL
) {
1722 perror("failed to get the current directory");
1723 return EXIT_FAILURE
;
1725 cwdlen
= strlen(cwd
);
1731 shift
= page_size
* (head
/ page_size
);
1736 buf
= (char *)mmap(NULL
, page_size
* mmap_window
, PROT_READ
,
1737 MAP_SHARED
, input
, offset
);
1738 if (buf
== MAP_FAILED
) {
1739 perror("failed to mmap file");
1744 event
= (event_t
*)(buf
+ head
);
1746 size
= event
->header
.size
;
1750 if (head
+ event
->header
.size
>= page_size
* mmap_window
) {
1753 shift
= page_size
* (head
/ page_size
);
1755 ret
= munmap(buf
, page_size
* mmap_window
);
1763 size
= event
->header
.size
;
1765 dprintf("\n%p [%p]: event: %d\n",
1766 (void *)(offset
+ head
),
1767 (void *)(long)event
->header
.size
,
1768 event
->header
.type
);
1770 if (!size
|| process_event(event
, offset
, head
) < 0) {
1772 dprintf("%p [%p]: skipping unknown header type: %d\n",
1773 (void *)(offset
+ head
),
1774 (void *)(long)(event
->header
.size
),
1775 event
->header
.type
);
1780 * assume we lost track of the stream, check alignment, and
1781 * increment a single u64 in the hope to catch on again 'soon'.
1784 if (unlikely(head
& 7))
1792 if (offset
+ head
>= header
->data_offset
+ header
->data_size
)
1795 if (offset
+ head
< (unsigned long)stat
.st_size
)
1802 dprintf(" IP events: %10ld\n", total
);
1803 dprintf(" mmap events: %10ld\n", total_mmap
);
1804 dprintf(" comm events: %10ld\n", total_comm
);
1805 dprintf(" fork events: %10ld\n", total_fork
);
1806 dprintf(" lost events: %10ld\n", total_lost
);
1807 dprintf(" unknown events: %10ld\n", total_unknown
);
1813 threads__fprintf(stdout
);
1816 dsos__fprintf(stdout
);
1819 output__resort(total
);
1820 output__fprintf(stdout
, total
);
1826 parse_callchain_opt(const struct option
*opt __used
, const char *arg
,
1837 tok
= strtok((char *)arg
, ",");
1841 /* get the output mode */
1842 if (!strncmp(tok
, "graph", strlen(arg
)))
1843 callchain_param
.mode
= CHAIN_GRAPH_ABS
;
1845 else if (!strncmp(tok
, "flat", strlen(arg
)))
1846 callchain_param
.mode
= CHAIN_FLAT
;
1848 else if (!strncmp(tok
, "fractal", strlen(arg
)))
1849 callchain_param
.mode
= CHAIN_GRAPH_REL
;
1854 /* get the min percentage */
1855 tok
= strtok(NULL
, ",");
1859 callchain_param
.min_percent
= strtod(tok
, &endptr
);
1864 if (register_callchain_param(&callchain_param
) < 0) {
1865 fprintf(stderr
, "Can't register callchain params\n");
1871 static const char * const report_usage
[] = {
1872 "perf report [<options>] <command>",
1876 static const struct option options
[] = {
1877 OPT_STRING('i', "input", &input_name
, "file",
1879 OPT_BOOLEAN('v', "verbose", &verbose
,
1880 "be more verbose (show symbol address, etc)"),
1881 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1882 "dump raw trace in ASCII"),
1883 OPT_STRING('k', "vmlinux", &vmlinux
, "file", "vmlinux pathname"),
1884 OPT_BOOLEAN('m', "modules", &modules
,
1885 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1886 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1887 "sort by key(s): pid, comm, dso, symbol, parent"),
1888 OPT_BOOLEAN('P', "full-paths", &full_paths
,
1889 "Don't shorten the pathnames taking into account the cwd"),
1890 OPT_STRING('p', "parent", &parent_pattern
, "regex",
1891 "regex filter to identify parent, see: '--sort parent'"),
1892 OPT_BOOLEAN('x', "exclude-other", &exclude_other
,
1893 "Only display entries with parent-match"),
1894 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL
, "output_type,min_percent",
1895 "Display callchains using output_type and min percent threshold. "
1896 "Default: fractal,0.5", &parse_callchain_opt
, callchain_default_opt
),
1897 OPT_STRING('d', "dsos", &dso_list_str
, "dso[,dso...]",
1898 "only consider symbols in these dsos"),
1899 OPT_STRING('C', "comms", &comm_list_str
, "comm[,comm...]",
1900 "only consider symbols in these comms"),
1901 OPT_STRING('S', "symbols", &sym_list_str
, "symbol[,symbol...]",
1902 "only consider these symbols"),
1906 static void setup_sorting(void)
1908 char *tmp
, *tok
, *str
= strdup(sort_order
);
1910 for (tok
= strtok_r(str
, ", ", &tmp
);
1911 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1912 if (sort_dimension__add(tok
) < 0) {
1913 error("Unknown --sort key: `%s'", tok
);
1914 usage_with_options(report_usage
, options
);
1921 static void setup_list(struct strlist
**list
, const char *list_str
,
1922 const char *list_name
)
1925 *list
= strlist__new(true, list_str
);
1927 fprintf(stderr
, "problems parsing %s list\n",
1934 int cmd_report(int argc
, const char **argv
, const char *prefix __used
)
1938 page_size
= getpagesize();
1940 argc
= parse_options(argc
, argv
, options
, report_usage
, 0);
1944 if (parent_pattern
!= default_parent_pattern
)
1945 sort_dimension__add("parent");
1950 * Any (unrecognized) arguments left?
1953 usage_with_options(report_usage
, options
);
1955 setup_list(&dso_list
, dso_list_str
, "dso");
1956 setup_list(&comm_list
, comm_list_str
, "comm");
1957 setup_list(&sym_list
, sym_list_str
, "symbol");
1961 return __cmd_report();