perf annotate: Move locking to struct annotation
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 enum hist_filter {
10 HIST_FILTER__DSO,
11 HIST_FILTER__THREAD,
12 HIST_FILTER__PARENT,
13 };
14
15 struct callchain_param callchain_param = {
16 .mode = CHAIN_GRAPH_REL,
17 .min_percent = 0.5
18 };
19
20 u16 hists__col_len(struct hists *self, enum hist_column col)
21 {
22 return self->col_len[col];
23 }
24
25 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
26 {
27 self->col_len[col] = len;
28 }
29
30 bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
31 {
32 if (len > hists__col_len(self, col)) {
33 hists__set_col_len(self, col, len);
34 return true;
35 }
36 return false;
37 }
38
39 static void hists__reset_col_len(struct hists *self)
40 {
41 enum hist_column col;
42
43 for (col = 0; col < HISTC_NR_COLS; ++col)
44 hists__set_col_len(self, col, 0);
45 }
46
47 static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
48 {
49 u16 len;
50
51 if (h->ms.sym)
52 hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
53
54 len = thread__comm_len(h->thread);
55 if (hists__new_col_len(self, HISTC_COMM, len))
56 hists__set_col_len(self, HISTC_THREAD, len + 6);
57
58 if (h->ms.map) {
59 len = dso__name_len(h->ms.map->dso);
60 hists__new_col_len(self, HISTC_DSO, len);
61 }
62 }
63
64 static void hist_entry__add_cpumode_period(struct hist_entry *self,
65 unsigned int cpumode, u64 period)
66 {
67 switch (cpumode) {
68 case PERF_RECORD_MISC_KERNEL:
69 self->period_sys += period;
70 break;
71 case PERF_RECORD_MISC_USER:
72 self->period_us += period;
73 break;
74 case PERF_RECORD_MISC_GUEST_KERNEL:
75 self->period_guest_sys += period;
76 break;
77 case PERF_RECORD_MISC_GUEST_USER:
78 self->period_guest_us += period;
79 break;
80 default:
81 break;
82 }
83 }
84
85 /*
86 * histogram, sorted on item, collects periods
87 */
88
89 static struct hist_entry *hist_entry__new(struct hist_entry *template)
90 {
91 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
92 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
93
94 if (self != NULL) {
95 *self = *template;
96 self->nr_events = 1;
97 if (self->ms.map)
98 self->ms.map->referenced = true;
99 if (symbol_conf.use_callchain)
100 callchain_init(self->callchain);
101 }
102
103 return self;
104 }
105
106 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
107 {
108 if (!h->filtered) {
109 hists__calc_col_len(self, h);
110 ++self->nr_entries;
111 }
112 }
113
114 static u8 symbol__parent_filter(const struct symbol *parent)
115 {
116 if (symbol_conf.exclude_other && parent == NULL)
117 return 1 << HIST_FILTER__PARENT;
118 return 0;
119 }
120
121 struct hist_entry *__hists__add_entry(struct hists *self,
122 struct addr_location *al,
123 struct symbol *sym_parent, u64 period)
124 {
125 struct rb_node **p = &self->entries.rb_node;
126 struct rb_node *parent = NULL;
127 struct hist_entry *he;
128 struct hist_entry entry = {
129 .thread = al->thread,
130 .ms = {
131 .map = al->map,
132 .sym = al->sym,
133 },
134 .cpu = al->cpu,
135 .ip = al->addr,
136 .level = al->level,
137 .period = period,
138 .parent = sym_parent,
139 .filtered = symbol__parent_filter(sym_parent),
140 };
141 int cmp;
142
143 while (*p != NULL) {
144 parent = *p;
145 he = rb_entry(parent, struct hist_entry, rb_node);
146
147 cmp = hist_entry__cmp(&entry, he);
148
149 if (!cmp) {
150 he->period += period;
151 ++he->nr_events;
152 goto out;
153 }
154
155 if (cmp < 0)
156 p = &(*p)->rb_left;
157 else
158 p = &(*p)->rb_right;
159 }
160
161 he = hist_entry__new(&entry);
162 if (!he)
163 return NULL;
164 rb_link_node(&he->rb_node, parent, p);
165 rb_insert_color(&he->rb_node, &self->entries);
166 hists__inc_nr_entries(self, he);
167 out:
168 hist_entry__add_cpumode_period(he, al->cpumode, period);
169 return he;
170 }
171
172 int64_t
173 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
174 {
175 struct sort_entry *se;
176 int64_t cmp = 0;
177
178 list_for_each_entry(se, &hist_entry__sort_list, list) {
179 cmp = se->se_cmp(left, right);
180 if (cmp)
181 break;
182 }
183
184 return cmp;
185 }
186
187 int64_t
188 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
189 {
190 struct sort_entry *se;
191 int64_t cmp = 0;
192
193 list_for_each_entry(se, &hist_entry__sort_list, list) {
194 int64_t (*f)(struct hist_entry *, struct hist_entry *);
195
196 f = se->se_collapse ?: se->se_cmp;
197
198 cmp = f(left, right);
199 if (cmp)
200 break;
201 }
202
203 return cmp;
204 }
205
206 void hist_entry__free(struct hist_entry *he)
207 {
208 free(he);
209 }
210
211 /*
212 * collapse the histogram
213 */
214
215 static bool hists__collapse_insert_entry(struct hists *self,
216 struct rb_root *root,
217 struct hist_entry *he)
218 {
219 struct rb_node **p = &root->rb_node;
220 struct rb_node *parent = NULL;
221 struct hist_entry *iter;
222 int64_t cmp;
223
224 while (*p != NULL) {
225 parent = *p;
226 iter = rb_entry(parent, struct hist_entry, rb_node);
227
228 cmp = hist_entry__collapse(iter, he);
229
230 if (!cmp) {
231 iter->period += he->period;
232 if (symbol_conf.use_callchain) {
233 callchain_cursor_reset(&self->callchain_cursor);
234 callchain_merge(&self->callchain_cursor, iter->callchain,
235 he->callchain);
236 }
237 hist_entry__free(he);
238 return false;
239 }
240
241 if (cmp < 0)
242 p = &(*p)->rb_left;
243 else
244 p = &(*p)->rb_right;
245 }
246
247 rb_link_node(&he->rb_node, parent, p);
248 rb_insert_color(&he->rb_node, root);
249 return true;
250 }
251
252 void hists__collapse_resort(struct hists *self)
253 {
254 struct rb_root tmp;
255 struct rb_node *next;
256 struct hist_entry *n;
257
258 if (!sort__need_collapse)
259 return;
260
261 tmp = RB_ROOT;
262 next = rb_first(&self->entries);
263 self->nr_entries = 0;
264 hists__reset_col_len(self);
265
266 while (next) {
267 n = rb_entry(next, struct hist_entry, rb_node);
268 next = rb_next(&n->rb_node);
269
270 rb_erase(&n->rb_node, &self->entries);
271 if (hists__collapse_insert_entry(self, &tmp, n))
272 hists__inc_nr_entries(self, n);
273 }
274
275 self->entries = tmp;
276 }
277
278 /*
279 * reverse the map, sort on period.
280 */
281
282 static void __hists__insert_output_entry(struct rb_root *entries,
283 struct hist_entry *he,
284 u64 min_callchain_hits)
285 {
286 struct rb_node **p = &entries->rb_node;
287 struct rb_node *parent = NULL;
288 struct hist_entry *iter;
289
290 if (symbol_conf.use_callchain)
291 callchain_param.sort(&he->sorted_chain, he->callchain,
292 min_callchain_hits, &callchain_param);
293
294 while (*p != NULL) {
295 parent = *p;
296 iter = rb_entry(parent, struct hist_entry, rb_node);
297
298 if (he->period > iter->period)
299 p = &(*p)->rb_left;
300 else
301 p = &(*p)->rb_right;
302 }
303
304 rb_link_node(&he->rb_node, parent, p);
305 rb_insert_color(&he->rb_node, entries);
306 }
307
308 void hists__output_resort(struct hists *self)
309 {
310 struct rb_root tmp;
311 struct rb_node *next;
312 struct hist_entry *n;
313 u64 min_callchain_hits;
314
315 min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
316
317 tmp = RB_ROOT;
318 next = rb_first(&self->entries);
319
320 self->nr_entries = 0;
321 hists__reset_col_len(self);
322
323 while (next) {
324 n = rb_entry(next, struct hist_entry, rb_node);
325 next = rb_next(&n->rb_node);
326
327 rb_erase(&n->rb_node, &self->entries);
328 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
329 hists__inc_nr_entries(self, n);
330 }
331
332 self->entries = tmp;
333 }
334
335 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
336 {
337 int i;
338 int ret = fprintf(fp, " ");
339
340 for (i = 0; i < left_margin; i++)
341 ret += fprintf(fp, " ");
342
343 return ret;
344 }
345
346 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
347 int left_margin)
348 {
349 int i;
350 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
351
352 for (i = 0; i < depth; i++)
353 if (depth_mask & (1 << i))
354 ret += fprintf(fp, "| ");
355 else
356 ret += fprintf(fp, " ");
357
358 ret += fprintf(fp, "\n");
359
360 return ret;
361 }
362
363 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
364 int depth, int depth_mask, int period,
365 u64 total_samples, u64 hits,
366 int left_margin)
367 {
368 int i;
369 size_t ret = 0;
370
371 ret += callchain__fprintf_left_margin(fp, left_margin);
372 for (i = 0; i < depth; i++) {
373 if (depth_mask & (1 << i))
374 ret += fprintf(fp, "|");
375 else
376 ret += fprintf(fp, " ");
377 if (!period && i == depth - 1) {
378 double percent;
379
380 percent = hits * 100.0 / total_samples;
381 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
382 } else
383 ret += fprintf(fp, "%s", " ");
384 }
385 if (chain->ms.sym)
386 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
387 else
388 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
389
390 return ret;
391 }
392
393 static struct symbol *rem_sq_bracket;
394 static struct callchain_list rem_hits;
395
396 static void init_rem_hits(void)
397 {
398 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
399 if (!rem_sq_bracket) {
400 fprintf(stderr, "Not enough memory to display remaining hits\n");
401 return;
402 }
403
404 strcpy(rem_sq_bracket->name, "[...]");
405 rem_hits.ms.sym = rem_sq_bracket;
406 }
407
408 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
409 u64 total_samples, int depth,
410 int depth_mask, int left_margin)
411 {
412 struct rb_node *node, *next;
413 struct callchain_node *child;
414 struct callchain_list *chain;
415 int new_depth_mask = depth_mask;
416 u64 new_total;
417 u64 remaining;
418 size_t ret = 0;
419 int i;
420 uint entries_printed = 0;
421
422 if (callchain_param.mode == CHAIN_GRAPH_REL)
423 new_total = self->children_hit;
424 else
425 new_total = total_samples;
426
427 remaining = new_total;
428
429 node = rb_first(&self->rb_root);
430 while (node) {
431 u64 cumul;
432
433 child = rb_entry(node, struct callchain_node, rb_node);
434 cumul = callchain_cumul_hits(child);
435 remaining -= cumul;
436
437 /*
438 * The depth mask manages the output of pipes that show
439 * the depth. We don't want to keep the pipes of the current
440 * level for the last child of this depth.
441 * Except if we have remaining filtered hits. They will
442 * supersede the last child
443 */
444 next = rb_next(node);
445 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
446 new_depth_mask &= ~(1 << (depth - 1));
447
448 /*
449 * But we keep the older depth mask for the line separator
450 * to keep the level link until we reach the last child
451 */
452 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
453 left_margin);
454 i = 0;
455 list_for_each_entry(chain, &child->val, list) {
456 ret += ipchain__fprintf_graph(fp, chain, depth,
457 new_depth_mask, i++,
458 new_total,
459 cumul,
460 left_margin);
461 }
462 ret += __callchain__fprintf_graph(fp, child, new_total,
463 depth + 1,
464 new_depth_mask | (1 << depth),
465 left_margin);
466 node = next;
467 if (++entries_printed == callchain_param.print_limit)
468 break;
469 }
470
471 if (callchain_param.mode == CHAIN_GRAPH_REL &&
472 remaining && remaining != new_total) {
473
474 if (!rem_sq_bracket)
475 return ret;
476
477 new_depth_mask &= ~(1 << (depth - 1));
478
479 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
480 new_depth_mask, 0, new_total,
481 remaining, left_margin);
482 }
483
484 return ret;
485 }
486
487 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
488 u64 total_samples, int left_margin)
489 {
490 struct callchain_list *chain;
491 bool printed = false;
492 int i = 0;
493 int ret = 0;
494 u32 entries_printed = 0;
495
496 list_for_each_entry(chain, &self->val, list) {
497 if (!i++ && sort__first_dimension == SORT_SYM)
498 continue;
499
500 if (!printed) {
501 ret += callchain__fprintf_left_margin(fp, left_margin);
502 ret += fprintf(fp, "|\n");
503 ret += callchain__fprintf_left_margin(fp, left_margin);
504 ret += fprintf(fp, "---");
505
506 left_margin += 3;
507 printed = true;
508 } else
509 ret += callchain__fprintf_left_margin(fp, left_margin);
510
511 if (chain->ms.sym)
512 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
513 else
514 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
515
516 if (++entries_printed == callchain_param.print_limit)
517 break;
518 }
519
520 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
521
522 return ret;
523 }
524
525 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
526 u64 total_samples)
527 {
528 struct callchain_list *chain;
529 size_t ret = 0;
530
531 if (!self)
532 return 0;
533
534 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
535
536
537 list_for_each_entry(chain, &self->val, list) {
538 if (chain->ip >= PERF_CONTEXT_MAX)
539 continue;
540 if (chain->ms.sym)
541 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
542 else
543 ret += fprintf(fp, " %p\n",
544 (void *)(long)chain->ip);
545 }
546
547 return ret;
548 }
549
550 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
551 u64 total_samples, int left_margin)
552 {
553 struct rb_node *rb_node;
554 struct callchain_node *chain;
555 size_t ret = 0;
556 u32 entries_printed = 0;
557
558 rb_node = rb_first(&self->sorted_chain);
559 while (rb_node) {
560 double percent;
561
562 chain = rb_entry(rb_node, struct callchain_node, rb_node);
563 percent = chain->hit * 100.0 / total_samples;
564 switch (callchain_param.mode) {
565 case CHAIN_FLAT:
566 ret += percent_color_fprintf(fp, " %6.2f%%\n",
567 percent);
568 ret += callchain__fprintf_flat(fp, chain, total_samples);
569 break;
570 case CHAIN_GRAPH_ABS: /* Falldown */
571 case CHAIN_GRAPH_REL:
572 ret += callchain__fprintf_graph(fp, chain, total_samples,
573 left_margin);
574 case CHAIN_NONE:
575 default:
576 break;
577 }
578 ret += fprintf(fp, "\n");
579 if (++entries_printed == callchain_param.print_limit)
580 break;
581 rb_node = rb_next(rb_node);
582 }
583
584 return ret;
585 }
586
587 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
588 struct hists *hists, struct hists *pair_hists,
589 bool show_displacement, long displacement,
590 bool color, u64 session_total)
591 {
592 struct sort_entry *se;
593 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
594 const char *sep = symbol_conf.field_sep;
595 int ret;
596
597 if (symbol_conf.exclude_other && !self->parent)
598 return 0;
599
600 if (pair_hists) {
601 period = self->pair ? self->pair->period : 0;
602 total = pair_hists->stats.total_period;
603 period_sys = self->pair ? self->pair->period_sys : 0;
604 period_us = self->pair ? self->pair->period_us : 0;
605 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
606 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
607 } else {
608 period = self->period;
609 total = session_total;
610 period_sys = self->period_sys;
611 period_us = self->period_us;
612 period_guest_sys = self->period_guest_sys;
613 period_guest_us = self->period_guest_us;
614 }
615
616 if (total) {
617 if (color)
618 ret = percent_color_snprintf(s, size,
619 sep ? "%.2f" : " %6.2f%%",
620 (period * 100.0) / total);
621 else
622 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
623 (period * 100.0) / total);
624 if (symbol_conf.show_cpu_utilization) {
625 ret += percent_color_snprintf(s + ret, size - ret,
626 sep ? "%.2f" : " %6.2f%%",
627 (period_sys * 100.0) / total);
628 ret += percent_color_snprintf(s + ret, size - ret,
629 sep ? "%.2f" : " %6.2f%%",
630 (period_us * 100.0) / total);
631 if (perf_guest) {
632 ret += percent_color_snprintf(s + ret,
633 size - ret,
634 sep ? "%.2f" : " %6.2f%%",
635 (period_guest_sys * 100.0) /
636 total);
637 ret += percent_color_snprintf(s + ret,
638 size - ret,
639 sep ? "%.2f" : " %6.2f%%",
640 (period_guest_us * 100.0) /
641 total);
642 }
643 }
644 } else
645 ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
646
647 if (symbol_conf.show_nr_samples) {
648 if (sep)
649 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
650 else
651 ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
652 }
653
654 if (pair_hists) {
655 char bf[32];
656 double old_percent = 0, new_percent = 0, diff;
657
658 if (total > 0)
659 old_percent = (period * 100.0) / total;
660 if (session_total > 0)
661 new_percent = (self->period * 100.0) / session_total;
662
663 diff = new_percent - old_percent;
664
665 if (fabs(diff) >= 0.01)
666 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
667 else
668 snprintf(bf, sizeof(bf), " ");
669
670 if (sep)
671 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
672 else
673 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
674
675 if (show_displacement) {
676 if (displacement)
677 snprintf(bf, sizeof(bf), "%+4ld", displacement);
678 else
679 snprintf(bf, sizeof(bf), " ");
680
681 if (sep)
682 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
683 else
684 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
685 }
686 }
687
688 list_for_each_entry(se, &hist_entry__sort_list, list) {
689 if (se->elide)
690 continue;
691
692 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
693 ret += se->se_snprintf(self, s + ret, size - ret,
694 hists__col_len(hists, se->se_width_idx));
695 }
696
697 return ret;
698 }
699
700 int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
701 struct hists *pair_hists, bool show_displacement,
702 long displacement, FILE *fp, u64 session_total)
703 {
704 char bf[512];
705 hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
706 show_displacement, displacement,
707 true, session_total);
708 return fprintf(fp, "%s\n", bf);
709 }
710
711 static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
712 struct hists *hists, FILE *fp,
713 u64 session_total)
714 {
715 int left_margin = 0;
716
717 if (sort__first_dimension == SORT_COMM) {
718 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
719 typeof(*se), list);
720 left_margin = hists__col_len(hists, se->se_width_idx);
721 left_margin -= thread__comm_len(self->thread);
722 }
723
724 return hist_entry_callchain__fprintf(fp, self, session_total,
725 left_margin);
726 }
727
728 size_t hists__fprintf(struct hists *self, struct hists *pair,
729 bool show_displacement, FILE *fp)
730 {
731 struct sort_entry *se;
732 struct rb_node *nd;
733 size_t ret = 0;
734 unsigned long position = 1;
735 long displacement = 0;
736 unsigned int width;
737 const char *sep = symbol_conf.field_sep;
738 const char *col_width = symbol_conf.col_width_list_str;
739
740 init_rem_hits();
741
742 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
743
744 if (symbol_conf.show_nr_samples) {
745 if (sep)
746 fprintf(fp, "%cSamples", *sep);
747 else
748 fputs(" Samples ", fp);
749 }
750
751 if (symbol_conf.show_cpu_utilization) {
752 if (sep) {
753 ret += fprintf(fp, "%csys", *sep);
754 ret += fprintf(fp, "%cus", *sep);
755 if (perf_guest) {
756 ret += fprintf(fp, "%cguest sys", *sep);
757 ret += fprintf(fp, "%cguest us", *sep);
758 }
759 } else {
760 ret += fprintf(fp, " sys ");
761 ret += fprintf(fp, " us ");
762 if (perf_guest) {
763 ret += fprintf(fp, " guest sys ");
764 ret += fprintf(fp, " guest us ");
765 }
766 }
767 }
768
769 if (pair) {
770 if (sep)
771 ret += fprintf(fp, "%cDelta", *sep);
772 else
773 ret += fprintf(fp, " Delta ");
774
775 if (show_displacement) {
776 if (sep)
777 ret += fprintf(fp, "%cDisplacement", *sep);
778 else
779 ret += fprintf(fp, " Displ");
780 }
781 }
782
783 list_for_each_entry(se, &hist_entry__sort_list, list) {
784 if (se->elide)
785 continue;
786 if (sep) {
787 fprintf(fp, "%c%s", *sep, se->se_header);
788 continue;
789 }
790 width = strlen(se->se_header);
791 if (symbol_conf.col_width_list_str) {
792 if (col_width) {
793 hists__set_col_len(self, se->se_width_idx,
794 atoi(col_width));
795 col_width = strchr(col_width, ',');
796 if (col_width)
797 ++col_width;
798 }
799 }
800 if (!hists__new_col_len(self, se->se_width_idx, width))
801 width = hists__col_len(self, se->se_width_idx);
802 fprintf(fp, " %*s", width, se->se_header);
803 }
804 fprintf(fp, "\n");
805
806 if (sep)
807 goto print_entries;
808
809 fprintf(fp, "# ........");
810 if (symbol_conf.show_nr_samples)
811 fprintf(fp, " ..........");
812 if (pair) {
813 fprintf(fp, " ..........");
814 if (show_displacement)
815 fprintf(fp, " .....");
816 }
817 list_for_each_entry(se, &hist_entry__sort_list, list) {
818 unsigned int i;
819
820 if (se->elide)
821 continue;
822
823 fprintf(fp, " ");
824 width = hists__col_len(self, se->se_width_idx);
825 if (width == 0)
826 width = strlen(se->se_header);
827 for (i = 0; i < width; i++)
828 fprintf(fp, ".");
829 }
830
831 fprintf(fp, "\n#\n");
832
833 print_entries:
834 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
835 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
836
837 if (show_displacement) {
838 if (h->pair != NULL)
839 displacement = ((long)h->pair->position -
840 (long)position);
841 else
842 displacement = 0;
843 ++position;
844 }
845 ret += hist_entry__fprintf(h, self, pair, show_displacement,
846 displacement, fp, self->stats.total_period);
847
848 if (symbol_conf.use_callchain)
849 ret += hist_entry__fprintf_callchain(h, self, fp,
850 self->stats.total_period);
851 if (h->ms.map == NULL && verbose > 1) {
852 __map_groups__fprintf_maps(&h->thread->mg,
853 MAP__FUNCTION, verbose, fp);
854 fprintf(fp, "%.10s end\n", graph_dotted_line);
855 }
856 }
857
858 free(rem_sq_bracket);
859
860 return ret;
861 }
862
863 /*
864 * See hists__fprintf to match the column widths
865 */
866 unsigned int hists__sort_list_width(struct hists *self)
867 {
868 struct sort_entry *se;
869 int ret = 9; /* total % */
870
871 if (symbol_conf.show_cpu_utilization) {
872 ret += 7; /* count_sys % */
873 ret += 6; /* count_us % */
874 if (perf_guest) {
875 ret += 13; /* count_guest_sys % */
876 ret += 12; /* count_guest_us % */
877 }
878 }
879
880 if (symbol_conf.show_nr_samples)
881 ret += 11;
882
883 list_for_each_entry(se, &hist_entry__sort_list, list)
884 if (!se->elide)
885 ret += 2 + hists__col_len(self, se->se_width_idx);
886
887 if (verbose) /* Addr + origin */
888 ret += 3 + BITS_PER_LONG / 4;
889
890 return ret;
891 }
892
893 static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
894 enum hist_filter filter)
895 {
896 h->filtered &= ~(1 << filter);
897 if (h->filtered)
898 return;
899
900 ++self->nr_entries;
901 if (h->ms.unfolded)
902 self->nr_entries += h->nr_rows;
903 h->row_offset = 0;
904 self->stats.total_period += h->period;
905 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
906
907 hists__calc_col_len(self, h);
908 }
909
910 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
911 {
912 struct rb_node *nd;
913
914 self->nr_entries = self->stats.total_period = 0;
915 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
916 hists__reset_col_len(self);
917
918 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
919 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
920
921 if (symbol_conf.exclude_other && !h->parent)
922 continue;
923
924 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
925 h->filtered |= (1 << HIST_FILTER__DSO);
926 continue;
927 }
928
929 hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
930 }
931 }
932
933 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
934 {
935 struct rb_node *nd;
936
937 self->nr_entries = self->stats.total_period = 0;
938 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
939 hists__reset_col_len(self);
940
941 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
942 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
943
944 if (thread != NULL && h->thread != thread) {
945 h->filtered |= (1 << HIST_FILTER__THREAD);
946 continue;
947 }
948
949 hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
950 }
951 }
952
953 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
954 {
955 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
956 }
957
958 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
959 {
960 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
961 }
962
963 void hists__inc_nr_events(struct hists *self, u32 type)
964 {
965 ++self->stats.nr_events[0];
966 ++self->stats.nr_events[type];
967 }
968
969 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
970 {
971 int i;
972 size_t ret = 0;
973
974 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
975 const char *name = perf_event__name(i);
976
977 if (!strcmp(name, "UNKNOWN"))
978 continue;
979
980 ret += fprintf(fp, "%16s events: %10d\n", name,
981 self->stats.nr_events[i]);
982 }
983
984 return ret;
985 }
This page took 0.080353 seconds and 5 git commands to generate.