33b3b15fb014a873a59d764b6243e6b68cdf10a7
[deliverable/linux.git] / Documentation / perf_counter / builtin-report.c
1 /*
2 * builtin-report.c
3 *
4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
7 */
8 #include "builtin.h"
9
10 #include "util/util.h"
11
12 #include "util/list.h"
13 #include "util/cache.h"
14 #include "util/rbtree.h"
15 #include "util/symbol.h"
16 #include "util/string.h"
17
18 #include "perf.h"
19
20 #include "util/parse-options.h"
21 #include "util/parse-events.h"
22
23 #define SHOW_KERNEL 1
24 #define SHOW_USER 2
25 #define SHOW_HV 4
26
27 static char const *input_name = "perf.data";
28 static char *vmlinux = NULL;
29 static char *sort_order = "comm,dso";
30 static int input;
31 static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
32
33 static int dump_trace = 0;
34 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
35
36 static int verbose;
37 static int full_paths;
38
39 static unsigned long page_size;
40 static unsigned long mmap_window = 32;
41
42 const char *perf_event_names[] = {
43 [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP",
44 [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP",
45 [PERF_EVENT_COMM] = " PERF_EVENT_COMM",
46 };
47
48 struct ip_event {
49 struct perf_event_header header;
50 __u64 ip;
51 __u32 pid, tid;
52 };
53 struct mmap_event {
54 struct perf_event_header header;
55 __u32 pid, tid;
56 __u64 start;
57 __u64 len;
58 __u64 pgoff;
59 char filename[PATH_MAX];
60 };
61 struct comm_event {
62 struct perf_event_header header;
63 __u32 pid,tid;
64 char comm[16];
65 };
66
67 typedef union event_union {
68 struct perf_event_header header;
69 struct ip_event ip;
70 struct mmap_event mmap;
71 struct comm_event comm;
72 } event_t;
73
74 static LIST_HEAD(dsos);
75 static struct dso *kernel_dso;
76
77 static void dsos__add(struct dso *dso)
78 {
79 list_add_tail(&dso->node, &dsos);
80 }
81
82 static struct dso *dsos__find(const char *name)
83 {
84 struct dso *pos;
85
86 list_for_each_entry(pos, &dsos, node)
87 if (strcmp(pos->name, name) == 0)
88 return pos;
89 return NULL;
90 }
91
92 static struct dso *dsos__findnew(const char *name)
93 {
94 struct dso *dso = dsos__find(name);
95 int nr;
96
97 if (dso)
98 return dso;
99
100 dso = dso__new(name, 0);
101 if (!dso)
102 goto out_delete_dso;
103
104 nr = dso__load(dso, NULL);
105 if (nr < 0) {
106 fprintf(stderr, "Failed to open: %s\n", name);
107 goto out_delete_dso;
108 }
109 if (!nr && verbose) {
110 fprintf(stderr,
111 "No symbols found in: %s, maybe install a debug package?\n",
112 name);
113 }
114
115 dsos__add(dso);
116
117 return dso;
118
119 out_delete_dso:
120 dso__delete(dso);
121 return NULL;
122 }
123
124 static void dsos__fprintf(FILE *fp)
125 {
126 struct dso *pos;
127
128 list_for_each_entry(pos, &dsos, node)
129 dso__fprintf(pos, fp);
130 }
131
132 static int load_kernel(void)
133 {
134 int err;
135
136 kernel_dso = dso__new("[kernel]", 0);
137 if (!kernel_dso)
138 return -1;
139
140 err = dso__load_kernel(kernel_dso, vmlinux, NULL);
141 if (err) {
142 dso__delete(kernel_dso);
143 kernel_dso = NULL;
144 } else
145 dsos__add(kernel_dso);
146
147 return err;
148 }
149
150 static int strcommon(const char *pathname, const char *cwd, int cwdlen)
151 {
152 int n = 0;
153
154 while (pathname[n] == cwd[n] && n < cwdlen)
155 ++n;
156
157 return n;
158 }
159
160 struct map {
161 struct list_head node;
162 uint64_t start;
163 uint64_t end;
164 uint64_t pgoff;
165 struct dso *dso;
166 };
167
168 static struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
169 {
170 struct map *self = malloc(sizeof(*self));
171
172 if (self != NULL) {
173 const char *filename = event->filename;
174 char newfilename[PATH_MAX];
175
176 if (cwd) {
177 int n = strcommon(filename, cwd, cwdlen);
178 if (n == cwdlen) {
179 snprintf(newfilename, sizeof(newfilename),
180 ".%s", filename + n);
181 filename = newfilename;
182 }
183 }
184
185 self->start = event->start;
186 self->end = event->start + event->len;
187 self->pgoff = event->pgoff;
188
189 self->dso = dsos__findnew(filename);
190 if (self->dso == NULL)
191 goto out_delete;
192 }
193 return self;
194 out_delete:
195 free(self);
196 return NULL;
197 }
198
199 struct thread;
200
201 struct thread {
202 struct rb_node rb_node;
203 struct list_head maps;
204 pid_t pid;
205 char *comm;
206 };
207
208 static struct thread *thread__new(pid_t pid)
209 {
210 struct thread *self = malloc(sizeof(*self));
211
212 if (self != NULL) {
213 self->pid = pid;
214 self->comm = malloc(32);
215 if (self->comm)
216 snprintf(self->comm, 32, ":%d", self->pid);
217 INIT_LIST_HEAD(&self->maps);
218 }
219
220 return self;
221 }
222
223 static int thread__set_comm(struct thread *self, const char *comm)
224 {
225 if (self->comm)
226 free(self->comm);
227 self->comm = strdup(comm);
228 return self->comm ? 0 : -ENOMEM;
229 }
230
231 static struct rb_root threads;
232 static struct thread *last_match;
233
234 static struct thread *threads__findnew(pid_t pid)
235 {
236 struct rb_node **p = &threads.rb_node;
237 struct rb_node *parent = NULL;
238 struct thread *th;
239
240 /*
241 * Font-end cache - PID lookups come in blocks,
242 * so most of the time we dont have to look up
243 * the full rbtree:
244 */
245 if (last_match && last_match->pid == pid)
246 return last_match;
247
248 while (*p != NULL) {
249 parent = *p;
250 th = rb_entry(parent, struct thread, rb_node);
251
252 if (th->pid == pid) {
253 last_match = th;
254 return th;
255 }
256
257 if (pid < th->pid)
258 p = &(*p)->rb_left;
259 else
260 p = &(*p)->rb_right;
261 }
262
263 th = thread__new(pid);
264 if (th != NULL) {
265 rb_link_node(&th->rb_node, parent, p);
266 rb_insert_color(&th->rb_node, &threads);
267 last_match = th;
268 }
269
270 return th;
271 }
272
273 static void thread__insert_map(struct thread *self, struct map *map)
274 {
275 list_add_tail(&map->node, &self->maps);
276 }
277
278 static struct map *thread__find_map(struct thread *self, uint64_t ip)
279 {
280 struct map *pos;
281
282 if (self == NULL)
283 return NULL;
284
285 list_for_each_entry(pos, &self->maps, node)
286 if (ip >= pos->start && ip <= pos->end)
287 return pos;
288
289 return NULL;
290 }
291
292 /*
293 * histogram, sorted on item, collects counts
294 */
295
296 static struct rb_root hist;
297
298 struct hist_entry {
299 struct rb_node rb_node;
300
301 struct thread *thread;
302 struct map *map;
303 struct dso *dso;
304 struct symbol *sym;
305 uint64_t ip;
306 char level;
307
308 uint32_t count;
309 };
310
311 /*
312 * configurable sorting bits
313 */
314
315 struct sort_entry {
316 struct list_head list;
317
318 char *header;
319
320 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
321 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
322 size_t (*print)(FILE *fp, struct hist_entry *);
323 };
324
325 /* --sort pid */
326
327 static int64_t
328 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
329 {
330 return right->thread->pid - left->thread->pid;
331 }
332
333 static size_t
334 sort__thread_print(FILE *fp, struct hist_entry *self)
335 {
336 return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid);
337 }
338
339 static struct sort_entry sort_thread = {
340 .header = " Command: Pid ",
341 .cmp = sort__thread_cmp,
342 .print = sort__thread_print,
343 };
344
345 /* --sort comm */
346
347 static int64_t
348 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
349 {
350 return right->thread->pid - left->thread->pid;
351 }
352
353 static int64_t
354 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
355 {
356 char *comm_l = left->thread->comm;
357 char *comm_r = right->thread->comm;
358
359 if (!comm_l || !comm_r) {
360 if (!comm_l && !comm_r)
361 return 0;
362 else if (!comm_l)
363 return -1;
364 else
365 return 1;
366 }
367
368 return strcmp(comm_l, comm_r);
369 }
370
371 static size_t
372 sort__comm_print(FILE *fp, struct hist_entry *self)
373 {
374 return fprintf(fp, " %16s", self->thread->comm);
375 }
376
377 static struct sort_entry sort_comm = {
378 .header = " Command",
379 .cmp = sort__comm_cmp,
380 .collapse = sort__comm_collapse,
381 .print = sort__comm_print,
382 };
383
384 /* --sort dso */
385
386 static int64_t
387 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
388 {
389 struct dso *dso_l = left->dso;
390 struct dso *dso_r = right->dso;
391
392 if (!dso_l || !dso_r) {
393 if (!dso_l && !dso_r)
394 return 0;
395 else if (!dso_l)
396 return -1;
397 else
398 return 1;
399 }
400
401 return strcmp(dso_l->name, dso_r->name);
402 }
403
404 static size_t
405 sort__dso_print(FILE *fp, struct hist_entry *self)
406 {
407 if (self->dso)
408 return fprintf(fp, " %-25s", self->dso->name);
409
410 return fprintf(fp, " %016llx", (__u64)self->ip);
411 }
412
413 static struct sort_entry sort_dso = {
414 .header = " Shared Object ",
415 .cmp = sort__dso_cmp,
416 .print = sort__dso_print,
417 };
418
419 /* --sort symbol */
420
421 static int64_t
422 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
423 {
424 uint64_t ip_l, ip_r;
425
426 if (left->sym == right->sym)
427 return 0;
428
429 ip_l = left->sym ? left->sym->start : left->ip;
430 ip_r = right->sym ? right->sym->start : right->ip;
431
432 return (int64_t)(ip_r - ip_l);
433 }
434
435 static size_t
436 sort__sym_print(FILE *fp, struct hist_entry *self)
437 {
438 size_t ret = 0;
439
440 if (verbose)
441 ret += fprintf(fp, " %#018llx", (__u64)self->ip);
442
443 if (self->dso)
444 ret += fprintf(fp, " %s: ", self->dso->name);
445 else
446 ret += fprintf(fp, " %#016llx: ", (__u64)self->ip);
447
448 if (self->sym)
449 ret += fprintf(fp, "%s", self->sym->name);
450 else
451 ret += fprintf(fp, "%#016llx", (__u64)self->ip);
452
453 return ret;
454 }
455
456 static struct sort_entry sort_sym = {
457 .header = " Shared Object: Symbol",
458 .cmp = sort__sym_cmp,
459 .print = sort__sym_print,
460 };
461
462 static int sort__need_collapse = 0;
463
464 struct sort_dimension {
465 char *name;
466 struct sort_entry *entry;
467 int taken;
468 };
469
470 static struct sort_dimension sort_dimensions[] = {
471 { .name = "pid", .entry = &sort_thread, },
472 { .name = "comm", .entry = &sort_comm, },
473 { .name = "dso", .entry = &sort_dso, },
474 { .name = "symbol", .entry = &sort_sym, },
475 };
476
477 static LIST_HEAD(hist_entry__sort_list);
478
479 static int sort_dimension__add(char *tok)
480 {
481 int i;
482
483 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
484 struct sort_dimension *sd = &sort_dimensions[i];
485
486 if (sd->taken)
487 continue;
488
489 if (strncasecmp(tok, sd->name, strlen(tok)))
490 continue;
491
492 if (sd->entry->collapse)
493 sort__need_collapse = 1;
494
495 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
496 sd->taken = 1;
497
498 return 0;
499 }
500
501 return -ESRCH;
502 }
503
504 static int64_t
505 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
506 {
507 struct sort_entry *se;
508 int64_t cmp = 0;
509
510 list_for_each_entry(se, &hist_entry__sort_list, list) {
511 cmp = se->cmp(left, right);
512 if (cmp)
513 break;
514 }
515
516 return cmp;
517 }
518
519 static int64_t
520 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
521 {
522 struct sort_entry *se;
523 int64_t cmp = 0;
524
525 list_for_each_entry(se, &hist_entry__sort_list, list) {
526 int64_t (*f)(struct hist_entry *, struct hist_entry *);
527
528 f = se->collapse ?: se->cmp;
529
530 cmp = f(left, right);
531 if (cmp)
532 break;
533 }
534
535 return cmp;
536 }
537
538 static size_t
539 hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
540 {
541 struct sort_entry *se;
542 size_t ret;
543
544 if (total_samples) {
545 ret = fprintf(fp, " %6.2f%%",
546 (self->count * 100.0) / total_samples);
547 } else
548 ret = fprintf(fp, "%12d ", self->count);
549
550 list_for_each_entry(se, &hist_entry__sort_list, list)
551 ret += se->print(fp, self);
552
553 ret += fprintf(fp, "\n");
554
555 return ret;
556 }
557
558 /*
559 * collect histogram counts
560 */
561
562 static int
563 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
564 struct symbol *sym, uint64_t ip, char level)
565 {
566 struct rb_node **p = &hist.rb_node;
567 struct rb_node *parent = NULL;
568 struct hist_entry *he;
569 struct hist_entry entry = {
570 .thread = thread,
571 .map = map,
572 .dso = dso,
573 .sym = sym,
574 .ip = ip,
575 .level = level,
576 .count = 1,
577 };
578 int cmp;
579
580 while (*p != NULL) {
581 parent = *p;
582 he = rb_entry(parent, struct hist_entry, rb_node);
583
584 cmp = hist_entry__cmp(&entry, he);
585
586 if (!cmp) {
587 he->count++;
588 return 0;
589 }
590
591 if (cmp < 0)
592 p = &(*p)->rb_left;
593 else
594 p = &(*p)->rb_right;
595 }
596
597 he = malloc(sizeof(*he));
598 if (!he)
599 return -ENOMEM;
600 *he = entry;
601 rb_link_node(&he->rb_node, parent, p);
602 rb_insert_color(&he->rb_node, &hist);
603
604 return 0;
605 }
606
607 static void hist_entry__free(struct hist_entry *he)
608 {
609 free(he);
610 }
611
612 /*
613 * collapse the histogram
614 */
615
616 static struct rb_root collapse_hists;
617
618 static void collapse__insert_entry(struct hist_entry *he)
619 {
620 struct rb_node **p = &collapse_hists.rb_node;
621 struct rb_node *parent = NULL;
622 struct hist_entry *iter;
623 int64_t cmp;
624
625 while (*p != NULL) {
626 parent = *p;
627 iter = rb_entry(parent, struct hist_entry, rb_node);
628
629 cmp = hist_entry__collapse(iter, he);
630
631 if (!cmp) {
632 iter->count += he->count;
633 hist_entry__free(he);
634 return;
635 }
636
637 if (cmp < 0)
638 p = &(*p)->rb_left;
639 else
640 p = &(*p)->rb_right;
641 }
642
643 rb_link_node(&he->rb_node, parent, p);
644 rb_insert_color(&he->rb_node, &collapse_hists);
645 }
646
647 static void collapse__resort(void)
648 {
649 struct rb_node *next;
650 struct hist_entry *n;
651
652 if (!sort__need_collapse)
653 return;
654
655 next = rb_first(&hist);
656 while (next) {
657 n = rb_entry(next, struct hist_entry, rb_node);
658 next = rb_next(&n->rb_node);
659
660 rb_erase(&n->rb_node, &hist);
661 collapse__insert_entry(n);
662 }
663 }
664
665 /*
666 * reverse the map, sort on count.
667 */
668
669 static struct rb_root output_hists;
670
671 static void output__insert_entry(struct hist_entry *he)
672 {
673 struct rb_node **p = &output_hists.rb_node;
674 struct rb_node *parent = NULL;
675 struct hist_entry *iter;
676
677 while (*p != NULL) {
678 parent = *p;
679 iter = rb_entry(parent, struct hist_entry, rb_node);
680
681 if (he->count > iter->count)
682 p = &(*p)->rb_left;
683 else
684 p = &(*p)->rb_right;
685 }
686
687 rb_link_node(&he->rb_node, parent, p);
688 rb_insert_color(&he->rb_node, &output_hists);
689 }
690
691 static void output__resort(void)
692 {
693 struct rb_node *next;
694 struct hist_entry *n;
695
696 if (sort__need_collapse)
697 next = rb_first(&collapse_hists);
698 else
699 next = rb_first(&hist);
700
701 while (next) {
702 n = rb_entry(next, struct hist_entry, rb_node);
703 next = rb_next(&n->rb_node);
704
705 rb_erase(&n->rb_node, &hist);
706 output__insert_entry(n);
707 }
708 }
709
710 static size_t output__fprintf(FILE *fp, uint64_t total_samples)
711 {
712 struct hist_entry *pos;
713 struct sort_entry *se;
714 struct rb_node *nd;
715 size_t ret = 0;
716
717 fprintf(fp, "#\n");
718
719 fprintf(fp, "# Overhead");
720 list_for_each_entry(se, &hist_entry__sort_list, list)
721 fprintf(fp, " %s", se->header);
722 fprintf(fp, "\n");
723
724 fprintf(fp, "# ........");
725 list_for_each_entry(se, &hist_entry__sort_list, list) {
726 int i;
727
728 fprintf(fp, " ");
729 for (i = 0; i < strlen(se->header)-1; i++)
730 fprintf(fp, ".");
731 }
732 fprintf(fp, "\n");
733
734 fprintf(fp, "#\n");
735
736 for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
737 pos = rb_entry(nd, struct hist_entry, rb_node);
738 ret += hist_entry__fprintf(fp, pos, total_samples);
739 }
740
741 return ret;
742 }
743
744 static void register_idle_thread(void)
745 {
746 struct thread *thread = threads__findnew(0);
747
748 if (thread == NULL ||
749 thread__set_comm(thread, "[idle]")) {
750 fprintf(stderr, "problem inserting idle task.\n");
751 exit(-1);
752 }
753 }
754
755
756 static int __cmd_report(void)
757 {
758 unsigned long offset = 0;
759 unsigned long head = 0;
760 struct stat stat;
761 char *buf;
762 event_t *event;
763 int ret, rc = EXIT_FAILURE;
764 uint32_t size;
765 unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0;
766 char cwd[PATH_MAX], *cwdp = cwd;
767 int cwdlen;
768
769 register_idle_thread();
770
771 input = open(input_name, O_RDONLY);
772 if (input < 0) {
773 perror("failed to open file");
774 exit(-1);
775 }
776
777 ret = fstat(input, &stat);
778 if (ret < 0) {
779 perror("failed to stat file");
780 exit(-1);
781 }
782
783 if (!stat.st_size) {
784 fprintf(stderr, "zero-sized file, nothing to do!\n");
785 exit(0);
786 }
787
788 if (load_kernel() < 0) {
789 perror("failed to load kernel symbols");
790 return EXIT_FAILURE;
791 }
792
793 if (!full_paths) {
794 if (getcwd(cwd, sizeof(cwd)) == NULL) {
795 perror("failed to get the current directory");
796 return EXIT_FAILURE;
797 }
798 cwdlen = strlen(cwd);
799 } else {
800 cwdp = NULL;
801 cwdlen = 0;
802 }
803 remap:
804 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
805 MAP_SHARED, input, offset);
806 if (buf == MAP_FAILED) {
807 perror("failed to mmap file");
808 exit(-1);
809 }
810
811 more:
812 event = (event_t *)(buf + head);
813
814 size = event->header.size;
815 if (!size)
816 size = 8;
817
818 if (head + event->header.size >= page_size * mmap_window) {
819 unsigned long shift = page_size * (head / page_size);
820 int ret;
821
822 ret = munmap(buf, page_size * mmap_window);
823 assert(ret == 0);
824
825 offset += shift;
826 head -= shift;
827 goto remap;
828 }
829
830 size = event->header.size;
831 if (!size)
832 goto broken_event;
833
834 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
835 char level;
836 int show = 0;
837 struct dso *dso = NULL;
838 struct thread *thread = threads__findnew(event->ip.pid);
839 uint64_t ip = event->ip.ip;
840 struct map *map = NULL;
841
842 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
843 (void *)(offset + head),
844 (void *)(long)(event->header.size),
845 event->header.misc,
846 event->ip.pid,
847 (void *)(long)ip);
848
849 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
850
851 if (thread == NULL) {
852 fprintf(stderr, "problem processing %d event, skipping it.\n",
853 event->header.type);
854 goto broken_event;
855 }
856
857 if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
858 show = SHOW_KERNEL;
859 level = 'k';
860
861 dso = kernel_dso;
862
863 dprintf(" ...... dso: %s\n", dso->name);
864
865 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
866
867 show = SHOW_USER;
868 level = '.';
869
870 map = thread__find_map(thread, ip);
871 if (map != NULL) {
872 dso = map->dso;
873 ip -= map->start + map->pgoff;
874 } else {
875 /*
876 * If this is outside of all known maps,
877 * and is a negative address, try to look it
878 * up in the kernel dso, as it might be a
879 * vsyscall (which executes in user-mode):
880 */
881 if ((long long)ip < 0)
882 dso = kernel_dso;
883 }
884 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
885
886 } else {
887 show = SHOW_HV;
888 level = 'H';
889 dprintf(" ...... dso: [hypervisor]\n");
890 }
891
892 if (show & show_mask) {
893 struct symbol *sym = dso__find_symbol(dso, ip);
894
895 if (hist_entry__add(thread, map, dso, sym, ip, level)) {
896 fprintf(stderr,
897 "problem incrementing symbol count, skipping event\n");
898 goto broken_event;
899 }
900 }
901 total++;
902 } else switch (event->header.type) {
903 case PERF_EVENT_MMAP: {
904 struct thread *thread = threads__findnew(event->mmap.pid);
905 struct map *map = map__new(&event->mmap, cwdp, cwdlen);
906
907 dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n",
908 (void *)(offset + head),
909 (void *)(long)(event->header.size),
910 (void *)(long)event->mmap.start,
911 (void *)(long)event->mmap.len,
912 (void *)(long)event->mmap.pgoff,
913 event->mmap.filename);
914
915 if (thread == NULL || map == NULL) {
916 if (verbose)
917 fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n");
918 goto broken_event;
919 }
920 thread__insert_map(thread, map);
921 total_mmap++;
922 break;
923 }
924 case PERF_EVENT_COMM: {
925 struct thread *thread = threads__findnew(event->comm.pid);
926
927 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
928 (void *)(offset + head),
929 (void *)(long)(event->header.size),
930 event->comm.comm, event->comm.pid);
931
932 if (thread == NULL ||
933 thread__set_comm(thread, event->comm.comm)) {
934 fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n");
935 goto broken_event;
936 }
937 total_comm++;
938 break;
939 }
940 default: {
941 broken_event:
942 dprintf("%p [%p]: skipping unknown header type: %d\n",
943 (void *)(offset + head),
944 (void *)(long)(event->header.size),
945 event->header.type);
946
947 total_unknown++;
948
949 /*
950 * assume we lost track of the stream, check alignment, and
951 * increment a single u64 in the hope to catch on again 'soon'.
952 */
953
954 if (unlikely(head & 7))
955 head &= ~7ULL;
956
957 size = 8;
958 }
959 }
960
961 head += size;
962
963 if (offset + head < stat.st_size)
964 goto more;
965
966 rc = EXIT_SUCCESS;
967 close(input);
968
969 dprintf(" IP events: %10ld\n", total);
970 dprintf(" mmap events: %10ld\n", total_mmap);
971 dprintf(" comm events: %10ld\n", total_comm);
972 dprintf(" unknown events: %10ld\n", total_unknown);
973
974 if (dump_trace)
975 return 0;
976
977 if (verbose >= 2)
978 dsos__fprintf(stdout);
979
980 collapse__resort();
981 output__resort();
982 output__fprintf(stdout, total);
983
984 return rc;
985 }
986
987 static const char * const report_usage[] = {
988 "perf report [<options>] <command>",
989 NULL
990 };
991
992 static const struct option options[] = {
993 OPT_STRING('i', "input", &input_name, "file",
994 "input file name"),
995 OPT_BOOLEAN('v', "verbose", &verbose,
996 "be more verbose (show symbol address, etc)"),
997 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
998 "dump raw trace in ASCII"),
999 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1000 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1001 "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
1002 OPT_BOOLEAN('P', "full-paths", &full_paths,
1003 "Don't shorten the pathnames taking into account the cwd"),
1004 OPT_END()
1005 };
1006
1007 static void setup_sorting(void)
1008 {
1009 char *tmp, *tok, *str = strdup(sort_order);
1010
1011 for (tok = strtok_r(str, ", ", &tmp);
1012 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1013 if (sort_dimension__add(tok) < 0) {
1014 error("Unknown --sort key: `%s'", tok);
1015 usage_with_options(report_usage, options);
1016 }
1017 }
1018
1019 free(str);
1020 }
1021
1022 int cmd_report(int argc, const char **argv, const char *prefix)
1023 {
1024 symbol__init();
1025
1026 page_size = getpagesize();
1027
1028 parse_options(argc, argv, options, report_usage, 0);
1029
1030 setup_sorting();
1031
1032 setup_pager();
1033
1034 return __cmd_report();
1035 }
This page took 0.090961 seconds and 4 git commands to generate.