perf hists: Move the dso and thread filters from hist_browser
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 enum hist_filter {
10 HIST_FILTER__DSO,
11 HIST_FILTER__THREAD,
12 HIST_FILTER__PARENT,
13 };
14
15 struct callchain_param callchain_param = {
16 .mode = CHAIN_GRAPH_REL,
17 .min_percent = 0.5,
18 .order = ORDER_CALLEE
19 };
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
35 return true;
36 }
37 return false;
38 }
39
40 static void hists__reset_col_len(struct hists *hists)
41 {
42 enum hist_column col;
43
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
49 {
50 u16 len;
51
52 if (h->ms.sym)
53 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen);
54 else {
55 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
56
57 if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width &&
58 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
59 !symbol_conf.dso_list)
60 hists__set_col_len(hists, HISTC_DSO,
61 unresolved_col_width);
62 }
63
64 len = thread__comm_len(h->thread);
65 if (hists__new_col_len(hists, HISTC_COMM, len))
66 hists__set_col_len(hists, HISTC_THREAD, len + 6);
67
68 if (h->ms.map) {
69 len = dso__name_len(h->ms.map->dso);
70 hists__new_col_len(hists, HISTC_DSO, len);
71 }
72 }
73
74 static void hist_entry__add_cpumode_period(struct hist_entry *self,
75 unsigned int cpumode, u64 period)
76 {
77 switch (cpumode) {
78 case PERF_RECORD_MISC_KERNEL:
79 self->period_sys += period;
80 break;
81 case PERF_RECORD_MISC_USER:
82 self->period_us += period;
83 break;
84 case PERF_RECORD_MISC_GUEST_KERNEL:
85 self->period_guest_sys += period;
86 break;
87 case PERF_RECORD_MISC_GUEST_USER:
88 self->period_guest_us += period;
89 break;
90 default:
91 break;
92 }
93 }
94
95 static void hist_entry__decay(struct hist_entry *he)
96 {
97 he->period = (he->period * 7) / 8;
98 he->nr_events = (he->nr_events * 7) / 8;
99 }
100
101 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
102 {
103 if (he->period == 0)
104 return true;
105 hists->stats.total_period -= he->period;
106 hist_entry__decay(he);
107 hists->stats.total_period += he->period;
108 return he->period == 0;
109 }
110
111 static void __hists__decay_entries(struct hists *hists, bool zap_user,
112 bool zap_kernel, bool threaded)
113 {
114 struct rb_node *next = rb_first(&hists->entries);
115 struct hist_entry *n;
116
117 while (next) {
118 n = rb_entry(next, struct hist_entry, rb_node);
119 next = rb_next(&n->rb_node);
120 /*
121 * We may be annotating this, for instance, so keep it here in
122 * case some it gets new samples, we'll eventually free it when
123 * the user stops browsing and it agains gets fully decayed.
124 */
125 if (((zap_user && n->level == '.') ||
126 (zap_kernel && n->level != '.') ||
127 hists__decay_entry(hists, n)) &&
128 !n->used) {
129 rb_erase(&n->rb_node, &hists->entries);
130
131 if (sort__need_collapse || threaded)
132 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
133
134 hist_entry__free(n);
135 --hists->nr_entries;
136 }
137 }
138 }
139
140 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
141 {
142 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
143 }
144
145 void hists__decay_entries_threaded(struct hists *hists,
146 bool zap_user, bool zap_kernel)
147 {
148 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
149 }
150
151 /*
152 * histogram, sorted on item, collects periods
153 */
154
155 static struct hist_entry *hist_entry__new(struct hist_entry *template)
156 {
157 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
158 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
159
160 if (self != NULL) {
161 *self = *template;
162 self->nr_events = 1;
163 if (self->ms.map)
164 self->ms.map->referenced = true;
165 if (symbol_conf.use_callchain)
166 callchain_init(self->callchain);
167 }
168
169 return self;
170 }
171
172 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
173 {
174 if (!h->filtered) {
175 hists__calc_col_len(hists, h);
176 ++hists->nr_entries;
177 hists->stats.total_period += h->period;
178 }
179 }
180
181 static u8 symbol__parent_filter(const struct symbol *parent)
182 {
183 if (symbol_conf.exclude_other && parent == NULL)
184 return 1 << HIST_FILTER__PARENT;
185 return 0;
186 }
187
188 struct hist_entry *__hists__add_entry(struct hists *hists,
189 struct addr_location *al,
190 struct symbol *sym_parent, u64 period)
191 {
192 struct rb_node **p;
193 struct rb_node *parent = NULL;
194 struct hist_entry *he;
195 struct hist_entry entry = {
196 .thread = al->thread,
197 .ms = {
198 .map = al->map,
199 .sym = al->sym,
200 },
201 .cpu = al->cpu,
202 .ip = al->addr,
203 .level = al->level,
204 .period = period,
205 .parent = sym_parent,
206 .filtered = symbol__parent_filter(sym_parent),
207 };
208 int cmp;
209
210 pthread_mutex_lock(&hists->lock);
211
212 p = &hists->entries_in->rb_node;
213
214 while (*p != NULL) {
215 parent = *p;
216 he = rb_entry(parent, struct hist_entry, rb_node_in);
217
218 cmp = hist_entry__cmp(&entry, he);
219
220 if (!cmp) {
221 he->period += period;
222 ++he->nr_events;
223 goto out;
224 }
225
226 if (cmp < 0)
227 p = &(*p)->rb_left;
228 else
229 p = &(*p)->rb_right;
230 }
231
232 he = hist_entry__new(&entry);
233 if (!he)
234 goto out_unlock;
235
236 rb_link_node(&he->rb_node_in, parent, p);
237 rb_insert_color(&he->rb_node_in, hists->entries_in);
238 out:
239 hist_entry__add_cpumode_period(he, al->cpumode, period);
240 out_unlock:
241 pthread_mutex_unlock(&hists->lock);
242 return he;
243 }
244
245 int64_t
246 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
247 {
248 struct sort_entry *se;
249 int64_t cmp = 0;
250
251 list_for_each_entry(se, &hist_entry__sort_list, list) {
252 cmp = se->se_cmp(left, right);
253 if (cmp)
254 break;
255 }
256
257 return cmp;
258 }
259
260 int64_t
261 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
262 {
263 struct sort_entry *se;
264 int64_t cmp = 0;
265
266 list_for_each_entry(se, &hist_entry__sort_list, list) {
267 int64_t (*f)(struct hist_entry *, struct hist_entry *);
268
269 f = se->se_collapse ?: se->se_cmp;
270
271 cmp = f(left, right);
272 if (cmp)
273 break;
274 }
275
276 return cmp;
277 }
278
279 void hist_entry__free(struct hist_entry *he)
280 {
281 free(he);
282 }
283
284 /*
285 * collapse the histogram
286 */
287
288 static bool hists__collapse_insert_entry(struct hists *hists,
289 struct rb_root *root,
290 struct hist_entry *he)
291 {
292 struct rb_node **p = &root->rb_node;
293 struct rb_node *parent = NULL;
294 struct hist_entry *iter;
295 int64_t cmp;
296
297 while (*p != NULL) {
298 parent = *p;
299 iter = rb_entry(parent, struct hist_entry, rb_node_in);
300
301 cmp = hist_entry__collapse(iter, he);
302
303 if (!cmp) {
304 iter->period += he->period;
305 iter->nr_events += he->nr_events;
306 if (symbol_conf.use_callchain) {
307 callchain_cursor_reset(&hists->callchain_cursor);
308 callchain_merge(&hists->callchain_cursor, iter->callchain,
309 he->callchain);
310 }
311 hist_entry__free(he);
312 return false;
313 }
314
315 if (cmp < 0)
316 p = &(*p)->rb_left;
317 else
318 p = &(*p)->rb_right;
319 }
320
321 rb_link_node(&he->rb_node_in, parent, p);
322 rb_insert_color(&he->rb_node_in, root);
323 return true;
324 }
325
326 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
327 {
328 struct rb_root *root;
329
330 pthread_mutex_lock(&hists->lock);
331
332 root = hists->entries_in;
333 if (++hists->entries_in > &hists->entries_in_array[1])
334 hists->entries_in = &hists->entries_in_array[0];
335
336 pthread_mutex_unlock(&hists->lock);
337
338 return root;
339 }
340
341 static void __hists__collapse_resort(struct hists *hists, bool threaded)
342 {
343 struct rb_root *root;
344 struct rb_node *next;
345 struct hist_entry *n;
346
347 if (!sort__need_collapse && !threaded)
348 return;
349
350 root = hists__get_rotate_entries_in(hists);
351 next = rb_first(root);
352 hists->stats.total_period = 0;
353
354 while (next) {
355 n = rb_entry(next, struct hist_entry, rb_node_in);
356 next = rb_next(&n->rb_node_in);
357
358 rb_erase(&n->rb_node_in, root);
359 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n))
360 hists__inc_nr_entries(hists, n);
361 }
362 }
363
364 void hists__collapse_resort(struct hists *hists)
365 {
366 return __hists__collapse_resort(hists, false);
367 }
368
369 void hists__collapse_resort_threaded(struct hists *hists)
370 {
371 return __hists__collapse_resort(hists, true);
372 }
373
374 /*
375 * reverse the map, sort on period.
376 */
377
378 static void __hists__insert_output_entry(struct rb_root *entries,
379 struct hist_entry *he,
380 u64 min_callchain_hits)
381 {
382 struct rb_node **p = &entries->rb_node;
383 struct rb_node *parent = NULL;
384 struct hist_entry *iter;
385
386 if (symbol_conf.use_callchain)
387 callchain_param.sort(&he->sorted_chain, he->callchain,
388 min_callchain_hits, &callchain_param);
389
390 while (*p != NULL) {
391 parent = *p;
392 iter = rb_entry(parent, struct hist_entry, rb_node);
393
394 if (he->period > iter->period)
395 p = &(*p)->rb_left;
396 else
397 p = &(*p)->rb_right;
398 }
399
400 rb_link_node(&he->rb_node, parent, p);
401 rb_insert_color(&he->rb_node, entries);
402 }
403
404 static void __hists__output_resort(struct hists *hists, bool threaded)
405 {
406 struct rb_root *root;
407 struct rb_node *next;
408 struct hist_entry *n;
409 u64 min_callchain_hits;
410
411 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
412
413 if (sort__need_collapse || threaded)
414 root = &hists->entries_collapsed;
415 else
416 root = hists->entries_in;
417
418 next = rb_first(root);
419 hists->entries = RB_ROOT;
420
421 hists->nr_entries = 0;
422 hists__reset_col_len(hists);
423
424 while (next) {
425 n = rb_entry(next, struct hist_entry, rb_node_in);
426 next = rb_next(&n->rb_node_in);
427
428 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
429 hists__inc_nr_entries(hists, n);
430 }
431 }
432
433 void hists__output_resort(struct hists *hists)
434 {
435 return __hists__output_resort(hists, false);
436 }
437
438 void hists__output_resort_threaded(struct hists *hists)
439 {
440 return __hists__output_resort(hists, true);
441 }
442
443 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
444 {
445 int i;
446 int ret = fprintf(fp, " ");
447
448 for (i = 0; i < left_margin; i++)
449 ret += fprintf(fp, " ");
450
451 return ret;
452 }
453
454 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
455 int left_margin)
456 {
457 int i;
458 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
459
460 for (i = 0; i < depth; i++)
461 if (depth_mask & (1 << i))
462 ret += fprintf(fp, "| ");
463 else
464 ret += fprintf(fp, " ");
465
466 ret += fprintf(fp, "\n");
467
468 return ret;
469 }
470
471 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
472 int depth, int depth_mask, int period,
473 u64 total_samples, u64 hits,
474 int left_margin)
475 {
476 int i;
477 size_t ret = 0;
478
479 ret += callchain__fprintf_left_margin(fp, left_margin);
480 for (i = 0; i < depth; i++) {
481 if (depth_mask & (1 << i))
482 ret += fprintf(fp, "|");
483 else
484 ret += fprintf(fp, " ");
485 if (!period && i == depth - 1) {
486 double percent;
487
488 percent = hits * 100.0 / total_samples;
489 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
490 } else
491 ret += fprintf(fp, "%s", " ");
492 }
493 if (chain->ms.sym)
494 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
495 else
496 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
497
498 return ret;
499 }
500
501 static struct symbol *rem_sq_bracket;
502 static struct callchain_list rem_hits;
503
504 static void init_rem_hits(void)
505 {
506 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
507 if (!rem_sq_bracket) {
508 fprintf(stderr, "Not enough memory to display remaining hits\n");
509 return;
510 }
511
512 strcpy(rem_sq_bracket->name, "[...]");
513 rem_hits.ms.sym = rem_sq_bracket;
514 }
515
516 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
517 u64 total_samples, int depth,
518 int depth_mask, int left_margin)
519 {
520 struct rb_node *node, *next;
521 struct callchain_node *child;
522 struct callchain_list *chain;
523 int new_depth_mask = depth_mask;
524 u64 new_total;
525 u64 remaining;
526 size_t ret = 0;
527 int i;
528 uint entries_printed = 0;
529
530 if (callchain_param.mode == CHAIN_GRAPH_REL)
531 new_total = self->children_hit;
532 else
533 new_total = total_samples;
534
535 remaining = new_total;
536
537 node = rb_first(&self->rb_root);
538 while (node) {
539 u64 cumul;
540
541 child = rb_entry(node, struct callchain_node, rb_node);
542 cumul = callchain_cumul_hits(child);
543 remaining -= cumul;
544
545 /*
546 * The depth mask manages the output of pipes that show
547 * the depth. We don't want to keep the pipes of the current
548 * level for the last child of this depth.
549 * Except if we have remaining filtered hits. They will
550 * supersede the last child
551 */
552 next = rb_next(node);
553 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
554 new_depth_mask &= ~(1 << (depth - 1));
555
556 /*
557 * But we keep the older depth mask for the line separator
558 * to keep the level link until we reach the last child
559 */
560 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
561 left_margin);
562 i = 0;
563 list_for_each_entry(chain, &child->val, list) {
564 ret += ipchain__fprintf_graph(fp, chain, depth,
565 new_depth_mask, i++,
566 new_total,
567 cumul,
568 left_margin);
569 }
570 ret += __callchain__fprintf_graph(fp, child, new_total,
571 depth + 1,
572 new_depth_mask | (1 << depth),
573 left_margin);
574 node = next;
575 if (++entries_printed == callchain_param.print_limit)
576 break;
577 }
578
579 if (callchain_param.mode == CHAIN_GRAPH_REL &&
580 remaining && remaining != new_total) {
581
582 if (!rem_sq_bracket)
583 return ret;
584
585 new_depth_mask &= ~(1 << (depth - 1));
586
587 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
588 new_depth_mask, 0, new_total,
589 remaining, left_margin);
590 }
591
592 return ret;
593 }
594
595 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
596 u64 total_samples, int left_margin)
597 {
598 struct callchain_list *chain;
599 bool printed = false;
600 int i = 0;
601 int ret = 0;
602 u32 entries_printed = 0;
603
604 list_for_each_entry(chain, &self->val, list) {
605 if (!i++ && sort__first_dimension == SORT_SYM)
606 continue;
607
608 if (!printed) {
609 ret += callchain__fprintf_left_margin(fp, left_margin);
610 ret += fprintf(fp, "|\n");
611 ret += callchain__fprintf_left_margin(fp, left_margin);
612 ret += fprintf(fp, "---");
613
614 left_margin += 3;
615 printed = true;
616 } else
617 ret += callchain__fprintf_left_margin(fp, left_margin);
618
619 if (chain->ms.sym)
620 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
621 else
622 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
623
624 if (++entries_printed == callchain_param.print_limit)
625 break;
626 }
627
628 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
629
630 return ret;
631 }
632
633 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
634 u64 total_samples)
635 {
636 struct callchain_list *chain;
637 size_t ret = 0;
638
639 if (!self)
640 return 0;
641
642 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
643
644
645 list_for_each_entry(chain, &self->val, list) {
646 if (chain->ip >= PERF_CONTEXT_MAX)
647 continue;
648 if (chain->ms.sym)
649 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
650 else
651 ret += fprintf(fp, " %p\n",
652 (void *)(long)chain->ip);
653 }
654
655 return ret;
656 }
657
658 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
659 u64 total_samples, int left_margin)
660 {
661 struct rb_node *rb_node;
662 struct callchain_node *chain;
663 size_t ret = 0;
664 u32 entries_printed = 0;
665
666 rb_node = rb_first(&self->sorted_chain);
667 while (rb_node) {
668 double percent;
669
670 chain = rb_entry(rb_node, struct callchain_node, rb_node);
671 percent = chain->hit * 100.0 / total_samples;
672 switch (callchain_param.mode) {
673 case CHAIN_FLAT:
674 ret += percent_color_fprintf(fp, " %6.2f%%\n",
675 percent);
676 ret += callchain__fprintf_flat(fp, chain, total_samples);
677 break;
678 case CHAIN_GRAPH_ABS: /* Falldown */
679 case CHAIN_GRAPH_REL:
680 ret += callchain__fprintf_graph(fp, chain, total_samples,
681 left_margin);
682 case CHAIN_NONE:
683 default:
684 break;
685 }
686 ret += fprintf(fp, "\n");
687 if (++entries_printed == callchain_param.print_limit)
688 break;
689 rb_node = rb_next(rb_node);
690 }
691
692 return ret;
693 }
694
695 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
696 {
697 struct rb_node *next = rb_first(&hists->entries);
698 struct hist_entry *n;
699 int row = 0;
700
701 hists__reset_col_len(hists);
702
703 while (next && row++ < max_rows) {
704 n = rb_entry(next, struct hist_entry, rb_node);
705 hists__calc_col_len(hists, n);
706 next = rb_next(&n->rb_node);
707 }
708 }
709
710 static int hist_entry__pcnt_snprintf(struct hist_entry *self, char *s,
711 size_t size, struct hists *pair_hists,
712 bool show_displacement, long displacement,
713 bool color, u64 session_total)
714 {
715 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
716 u64 nr_events;
717 const char *sep = symbol_conf.field_sep;
718 int ret;
719
720 if (symbol_conf.exclude_other && !self->parent)
721 return 0;
722
723 if (pair_hists) {
724 period = self->pair ? self->pair->period : 0;
725 nr_events = self->pair ? self->pair->nr_events : 0;
726 total = pair_hists->stats.total_period;
727 period_sys = self->pair ? self->pair->period_sys : 0;
728 period_us = self->pair ? self->pair->period_us : 0;
729 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
730 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
731 } else {
732 period = self->period;
733 nr_events = self->nr_events;
734 total = session_total;
735 period_sys = self->period_sys;
736 period_us = self->period_us;
737 period_guest_sys = self->period_guest_sys;
738 period_guest_us = self->period_guest_us;
739 }
740
741 if (total) {
742 if (color)
743 ret = percent_color_snprintf(s, size,
744 sep ? "%.2f" : " %6.2f%%",
745 (period * 100.0) / total);
746 else
747 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
748 (period * 100.0) / total);
749 if (symbol_conf.show_cpu_utilization) {
750 ret += percent_color_snprintf(s + ret, size - ret,
751 sep ? "%.2f" : " %6.2f%%",
752 (period_sys * 100.0) / total);
753 ret += percent_color_snprintf(s + ret, size - ret,
754 sep ? "%.2f" : " %6.2f%%",
755 (period_us * 100.0) / total);
756 if (perf_guest) {
757 ret += percent_color_snprintf(s + ret,
758 size - ret,
759 sep ? "%.2f" : " %6.2f%%",
760 (period_guest_sys * 100.0) /
761 total);
762 ret += percent_color_snprintf(s + ret,
763 size - ret,
764 sep ? "%.2f" : " %6.2f%%",
765 (period_guest_us * 100.0) /
766 total);
767 }
768 }
769 } else
770 ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
771
772 if (symbol_conf.show_nr_samples) {
773 if (sep)
774 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
775 else
776 ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
777 }
778
779 if (symbol_conf.show_total_period) {
780 if (sep)
781 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
782 else
783 ret += snprintf(s + ret, size - ret, " %12" PRIu64, period);
784 }
785
786 if (pair_hists) {
787 char bf[32];
788 double old_percent = 0, new_percent = 0, diff;
789
790 if (total > 0)
791 old_percent = (period * 100.0) / total;
792 if (session_total > 0)
793 new_percent = (self->period * 100.0) / session_total;
794
795 diff = new_percent - old_percent;
796
797 if (fabs(diff) >= 0.01)
798 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
799 else
800 snprintf(bf, sizeof(bf), " ");
801
802 if (sep)
803 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
804 else
805 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
806
807 if (show_displacement) {
808 if (displacement)
809 snprintf(bf, sizeof(bf), "%+4ld", displacement);
810 else
811 snprintf(bf, sizeof(bf), " ");
812
813 if (sep)
814 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
815 else
816 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
817 }
818 }
819
820 return ret;
821 }
822
823 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
824 struct hists *hists)
825 {
826 const char *sep = symbol_conf.field_sep;
827 struct sort_entry *se;
828 int ret = 0;
829
830 list_for_each_entry(se, &hist_entry__sort_list, list) {
831 if (se->elide)
832 continue;
833
834 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
835 ret += se->se_snprintf(he, s + ret, size - ret,
836 hists__col_len(hists, se->se_width_idx));
837 }
838
839 return ret;
840 }
841
842 int hist_entry__fprintf(struct hist_entry *he, size_t size, struct hists *hists,
843 struct hists *pair_hists, bool show_displacement,
844 long displacement, FILE *fp, u64 session_total)
845 {
846 char bf[512];
847 int ret;
848
849 if (size == 0 || size > sizeof(bf))
850 size = sizeof(bf);
851
852 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
853 show_displacement, displacement,
854 true, session_total);
855 hist_entry__snprintf(he, bf + ret, size - ret, hists);
856 return fprintf(fp, "%s\n", bf);
857 }
858
859 static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
860 struct hists *hists, FILE *fp,
861 u64 session_total)
862 {
863 int left_margin = 0;
864
865 if (sort__first_dimension == SORT_COMM) {
866 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
867 typeof(*se), list);
868 left_margin = hists__col_len(hists, se->se_width_idx);
869 left_margin -= thread__comm_len(self->thread);
870 }
871
872 return hist_entry_callchain__fprintf(fp, self, session_total,
873 left_margin);
874 }
875
876 size_t hists__fprintf(struct hists *hists, struct hists *pair,
877 bool show_displacement, bool show_header, int max_rows,
878 int max_cols, FILE *fp)
879 {
880 struct sort_entry *se;
881 struct rb_node *nd;
882 size_t ret = 0;
883 unsigned long position = 1;
884 long displacement = 0;
885 unsigned int width;
886 const char *sep = symbol_conf.field_sep;
887 const char *col_width = symbol_conf.col_width_list_str;
888 int nr_rows = 0;
889
890 init_rem_hits();
891
892 if (!show_header)
893 goto print_entries;
894
895 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
896
897 if (symbol_conf.show_nr_samples) {
898 if (sep)
899 fprintf(fp, "%cSamples", *sep);
900 else
901 fputs(" Samples ", fp);
902 }
903
904 if (symbol_conf.show_total_period) {
905 if (sep)
906 ret += fprintf(fp, "%cPeriod", *sep);
907 else
908 ret += fprintf(fp, " Period ");
909 }
910
911 if (symbol_conf.show_cpu_utilization) {
912 if (sep) {
913 ret += fprintf(fp, "%csys", *sep);
914 ret += fprintf(fp, "%cus", *sep);
915 if (perf_guest) {
916 ret += fprintf(fp, "%cguest sys", *sep);
917 ret += fprintf(fp, "%cguest us", *sep);
918 }
919 } else {
920 ret += fprintf(fp, " sys ");
921 ret += fprintf(fp, " us ");
922 if (perf_guest) {
923 ret += fprintf(fp, " guest sys ");
924 ret += fprintf(fp, " guest us ");
925 }
926 }
927 }
928
929 if (pair) {
930 if (sep)
931 ret += fprintf(fp, "%cDelta", *sep);
932 else
933 ret += fprintf(fp, " Delta ");
934
935 if (show_displacement) {
936 if (sep)
937 ret += fprintf(fp, "%cDisplacement", *sep);
938 else
939 ret += fprintf(fp, " Displ");
940 }
941 }
942
943 list_for_each_entry(se, &hist_entry__sort_list, list) {
944 if (se->elide)
945 continue;
946 if (sep) {
947 fprintf(fp, "%c%s", *sep, se->se_header);
948 continue;
949 }
950 width = strlen(se->se_header);
951 if (symbol_conf.col_width_list_str) {
952 if (col_width) {
953 hists__set_col_len(hists, se->se_width_idx,
954 atoi(col_width));
955 col_width = strchr(col_width, ',');
956 if (col_width)
957 ++col_width;
958 }
959 }
960 if (!hists__new_col_len(hists, se->se_width_idx, width))
961 width = hists__col_len(hists, se->se_width_idx);
962 fprintf(fp, " %*s", width, se->se_header);
963 }
964
965 fprintf(fp, "\n");
966 if (max_rows && ++nr_rows >= max_rows)
967 goto out;
968
969 if (sep)
970 goto print_entries;
971
972 fprintf(fp, "# ........");
973 if (symbol_conf.show_nr_samples)
974 fprintf(fp, " ..........");
975 if (symbol_conf.show_total_period)
976 fprintf(fp, " ............");
977 if (pair) {
978 fprintf(fp, " ..........");
979 if (show_displacement)
980 fprintf(fp, " .....");
981 }
982 list_for_each_entry(se, &hist_entry__sort_list, list) {
983 unsigned int i;
984
985 if (se->elide)
986 continue;
987
988 fprintf(fp, " ");
989 width = hists__col_len(hists, se->se_width_idx);
990 if (width == 0)
991 width = strlen(se->se_header);
992 for (i = 0; i < width; i++)
993 fprintf(fp, ".");
994 }
995
996 fprintf(fp, "\n");
997 if (max_rows && ++nr_rows >= max_rows)
998 goto out;
999
1000 fprintf(fp, "#\n");
1001 if (max_rows && ++nr_rows >= max_rows)
1002 goto out;
1003
1004 print_entries:
1005 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1006 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1007
1008 if (h->filtered)
1009 continue;
1010
1011 if (show_displacement) {
1012 if (h->pair != NULL)
1013 displacement = ((long)h->pair->position -
1014 (long)position);
1015 else
1016 displacement = 0;
1017 ++position;
1018 }
1019 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
1020 displacement, fp, hists->stats.total_period);
1021
1022 if (symbol_conf.use_callchain)
1023 ret += hist_entry__fprintf_callchain(h, hists, fp,
1024 hists->stats.total_period);
1025 if (max_rows && ++nr_rows >= max_rows)
1026 goto out;
1027
1028 if (h->ms.map == NULL && verbose > 1) {
1029 __map_groups__fprintf_maps(&h->thread->mg,
1030 MAP__FUNCTION, verbose, fp);
1031 fprintf(fp, "%.10s end\n", graph_dotted_line);
1032 }
1033 }
1034 out:
1035 free(rem_sq_bracket);
1036
1037 return ret;
1038 }
1039
1040 /*
1041 * See hists__fprintf to match the column widths
1042 */
1043 unsigned int hists__sort_list_width(struct hists *hists)
1044 {
1045 struct sort_entry *se;
1046 int ret = 9; /* total % */
1047
1048 if (symbol_conf.show_cpu_utilization) {
1049 ret += 7; /* count_sys % */
1050 ret += 6; /* count_us % */
1051 if (perf_guest) {
1052 ret += 13; /* count_guest_sys % */
1053 ret += 12; /* count_guest_us % */
1054 }
1055 }
1056
1057 if (symbol_conf.show_nr_samples)
1058 ret += 11;
1059
1060 if (symbol_conf.show_total_period)
1061 ret += 13;
1062
1063 list_for_each_entry(se, &hist_entry__sort_list, list)
1064 if (!se->elide)
1065 ret += 2 + hists__col_len(hists, se->se_width_idx);
1066
1067 if (verbose) /* Addr + origin */
1068 ret += 3 + BITS_PER_LONG / 4;
1069
1070 return ret;
1071 }
1072
1073 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1074 enum hist_filter filter)
1075 {
1076 h->filtered &= ~(1 << filter);
1077 if (h->filtered)
1078 return;
1079
1080 ++hists->nr_entries;
1081 if (h->ms.unfolded)
1082 hists->nr_entries += h->nr_rows;
1083 h->row_offset = 0;
1084 hists->stats.total_period += h->period;
1085 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
1086
1087 hists__calc_col_len(hists, h);
1088 }
1089
1090 void hists__filter_by_dso(struct hists *hists)
1091 {
1092 struct rb_node *nd;
1093
1094 hists->nr_entries = hists->stats.total_period = 0;
1095 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1096 hists__reset_col_len(hists);
1097
1098 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1099 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1100
1101 if (symbol_conf.exclude_other && !h->parent)
1102 continue;
1103
1104 if (hists->dso_filter != NULL &&
1105 (h->ms.map == NULL || h->ms.map->dso != hists->dso_filter)) {
1106 h->filtered |= (1 << HIST_FILTER__DSO);
1107 continue;
1108 }
1109
1110 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1111 }
1112 }
1113
1114 void hists__filter_by_thread(struct hists *hists)
1115 {
1116 struct rb_node *nd;
1117
1118 hists->nr_entries = hists->stats.total_period = 0;
1119 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1120 hists__reset_col_len(hists);
1121
1122 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1123 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1124
1125 if (hists->thread_filter != NULL &&
1126 h->thread != hists->thread_filter) {
1127 h->filtered |= (1 << HIST_FILTER__THREAD);
1128 continue;
1129 }
1130
1131 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1132 }
1133 }
1134
1135 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1136 {
1137 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
1138 }
1139
1140 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
1141 {
1142 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
1143 }
1144
1145 void hists__inc_nr_events(struct hists *hists, u32 type)
1146 {
1147 ++hists->stats.nr_events[0];
1148 ++hists->stats.nr_events[type];
1149 }
1150
1151 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1152 {
1153 int i;
1154 size_t ret = 0;
1155
1156 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1157 const char *name;
1158
1159 if (hists->stats.nr_events[i] == 0)
1160 continue;
1161
1162 name = perf_event__name(i);
1163 if (!strcmp(name, "UNKNOWN"))
1164 continue;
1165
1166 ret += fprintf(fp, "%16s events: %10d\n", name,
1167 hists->stats.nr_events[i]);
1168 }
1169
1170 return ret;
1171 }
1172
1173 void hists__init(struct hists *hists)
1174 {
1175 memset(hists, 0, sizeof(*hists));
1176 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1177 hists->entries_in = &hists->entries_in_array[0];
1178 hists->entries_collapsed = RB_ROOT;
1179 hists->entries = RB_ROOT;
1180 pthread_mutex_init(&hists->lock, NULL);
1181 }
This page took 0.078036 seconds and 6 git commands to generate.