Merge branch 'for-linus-3.6' of git://dev.laptop.org/users/dilinger/linux-olpc
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
15
16 enum hist_filter {
17 HIST_FILTER__DSO,
18 HIST_FILTER__THREAD,
19 HIST_FILTER__PARENT,
20 HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
25 .min_percent = 0.5,
26 .order = ORDER_CALLEE
27 };
28
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
30 {
31 return hists->col_len[col];
32 }
33
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36 hists->col_len[col] = len;
37 }
38
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
43 return true;
44 }
45 return false;
46 }
47
48 static void hists__reset_col_len(struct hists *hists)
49 {
50 enum hist_column col;
51
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
54 }
55
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65
66 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 u16 len;
70
71 if (h->ms.sym)
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73 else
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
79
80 if (h->ms.map) {
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
83 }
84
85 if (h->branch_info) {
86 int symlen;
87 /*
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
90 */
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97 } else {
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101 }
102
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109 } else {
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113 }
114 }
115 }
116
117 static void hist_entry__add_cpumode_period(struct hist_entry *he,
118 unsigned int cpumode, u64 period)
119 {
120 switch (cpumode) {
121 case PERF_RECORD_MISC_KERNEL:
122 he->period_sys += period;
123 break;
124 case PERF_RECORD_MISC_USER:
125 he->period_us += period;
126 break;
127 case PERF_RECORD_MISC_GUEST_KERNEL:
128 he->period_guest_sys += period;
129 break;
130 case PERF_RECORD_MISC_GUEST_USER:
131 he->period_guest_us += period;
132 break;
133 default:
134 break;
135 }
136 }
137
138 static void hist_entry__decay(struct hist_entry *he)
139 {
140 he->period = (he->period * 7) / 8;
141 he->nr_events = (he->nr_events * 7) / 8;
142 }
143
144 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
145 {
146 u64 prev_period = he->period;
147
148 if (prev_period == 0)
149 return true;
150
151 hist_entry__decay(he);
152
153 if (!he->filtered)
154 hists->stats.total_period -= prev_period - he->period;
155
156 return he->period == 0;
157 }
158
159 static void __hists__decay_entries(struct hists *hists, bool zap_user,
160 bool zap_kernel, bool threaded)
161 {
162 struct rb_node *next = rb_first(&hists->entries);
163 struct hist_entry *n;
164
165 while (next) {
166 n = rb_entry(next, struct hist_entry, rb_node);
167 next = rb_next(&n->rb_node);
168 /*
169 * We may be annotating this, for instance, so keep it here in
170 * case some it gets new samples, we'll eventually free it when
171 * the user stops browsing and it agains gets fully decayed.
172 */
173 if (((zap_user && n->level == '.') ||
174 (zap_kernel && n->level != '.') ||
175 hists__decay_entry(hists, n)) &&
176 !n->used) {
177 rb_erase(&n->rb_node, &hists->entries);
178
179 if (sort__need_collapse || threaded)
180 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
181
182 hist_entry__free(n);
183 --hists->nr_entries;
184 }
185 }
186 }
187
188 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
189 {
190 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
191 }
192
193 void hists__decay_entries_threaded(struct hists *hists,
194 bool zap_user, bool zap_kernel)
195 {
196 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
197 }
198
199 /*
200 * histogram, sorted on item, collects periods
201 */
202
203 static struct hist_entry *hist_entry__new(struct hist_entry *template)
204 {
205 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
206 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
207
208 if (he != NULL) {
209 *he = *template;
210 he->nr_events = 1;
211 if (he->ms.map)
212 he->ms.map->referenced = true;
213 if (symbol_conf.use_callchain)
214 callchain_init(he->callchain);
215 }
216
217 return he;
218 }
219
220 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
221 {
222 if (!h->filtered) {
223 hists__calc_col_len(hists, h);
224 ++hists->nr_entries;
225 hists->stats.total_period += h->period;
226 }
227 }
228
229 static u8 symbol__parent_filter(const struct symbol *parent)
230 {
231 if (symbol_conf.exclude_other && parent == NULL)
232 return 1 << HIST_FILTER__PARENT;
233 return 0;
234 }
235
236 static struct hist_entry *add_hist_entry(struct hists *hists,
237 struct hist_entry *entry,
238 struct addr_location *al,
239 u64 period)
240 {
241 struct rb_node **p;
242 struct rb_node *parent = NULL;
243 struct hist_entry *he;
244 int cmp;
245
246 pthread_mutex_lock(&hists->lock);
247
248 p = &hists->entries_in->rb_node;
249
250 while (*p != NULL) {
251 parent = *p;
252 he = rb_entry(parent, struct hist_entry, rb_node_in);
253
254 cmp = hist_entry__cmp(entry, he);
255
256 if (!cmp) {
257 he->period += period;
258 ++he->nr_events;
259
260 /* If the map of an existing hist_entry has
261 * become out-of-date due to an exec() or
262 * similar, update it. Otherwise we will
263 * mis-adjust symbol addresses when computing
264 * the history counter to increment.
265 */
266 if (he->ms.map != entry->ms.map) {
267 he->ms.map = entry->ms.map;
268 if (he->ms.map)
269 he->ms.map->referenced = true;
270 }
271 goto out;
272 }
273
274 if (cmp < 0)
275 p = &(*p)->rb_left;
276 else
277 p = &(*p)->rb_right;
278 }
279
280 he = hist_entry__new(entry);
281 if (!he)
282 goto out_unlock;
283
284 rb_link_node(&he->rb_node_in, parent, p);
285 rb_insert_color(&he->rb_node_in, hists->entries_in);
286 out:
287 hist_entry__add_cpumode_period(he, al->cpumode, period);
288 out_unlock:
289 pthread_mutex_unlock(&hists->lock);
290 return he;
291 }
292
293 struct hist_entry *__hists__add_branch_entry(struct hists *self,
294 struct addr_location *al,
295 struct symbol *sym_parent,
296 struct branch_info *bi,
297 u64 period)
298 {
299 struct hist_entry entry = {
300 .thread = al->thread,
301 .ms = {
302 .map = bi->to.map,
303 .sym = bi->to.sym,
304 },
305 .cpu = al->cpu,
306 .ip = bi->to.addr,
307 .level = al->level,
308 .period = period,
309 .parent = sym_parent,
310 .filtered = symbol__parent_filter(sym_parent),
311 .branch_info = bi,
312 };
313
314 return add_hist_entry(self, &entry, al, period);
315 }
316
317 struct hist_entry *__hists__add_entry(struct hists *self,
318 struct addr_location *al,
319 struct symbol *sym_parent, u64 period)
320 {
321 struct hist_entry entry = {
322 .thread = al->thread,
323 .ms = {
324 .map = al->map,
325 .sym = al->sym,
326 },
327 .cpu = al->cpu,
328 .ip = al->addr,
329 .level = al->level,
330 .period = period,
331 .parent = sym_parent,
332 .filtered = symbol__parent_filter(sym_parent),
333 };
334
335 return add_hist_entry(self, &entry, al, period);
336 }
337
338 int64_t
339 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
340 {
341 struct sort_entry *se;
342 int64_t cmp = 0;
343
344 list_for_each_entry(se, &hist_entry__sort_list, list) {
345 cmp = se->se_cmp(left, right);
346 if (cmp)
347 break;
348 }
349
350 return cmp;
351 }
352
353 int64_t
354 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
355 {
356 struct sort_entry *se;
357 int64_t cmp = 0;
358
359 list_for_each_entry(se, &hist_entry__sort_list, list) {
360 int64_t (*f)(struct hist_entry *, struct hist_entry *);
361
362 f = se->se_collapse ?: se->se_cmp;
363
364 cmp = f(left, right);
365 if (cmp)
366 break;
367 }
368
369 return cmp;
370 }
371
372 void hist_entry__free(struct hist_entry *he)
373 {
374 free(he);
375 }
376
377 /*
378 * collapse the histogram
379 */
380
381 static bool hists__collapse_insert_entry(struct hists *hists __used,
382 struct rb_root *root,
383 struct hist_entry *he)
384 {
385 struct rb_node **p = &root->rb_node;
386 struct rb_node *parent = NULL;
387 struct hist_entry *iter;
388 int64_t cmp;
389
390 while (*p != NULL) {
391 parent = *p;
392 iter = rb_entry(parent, struct hist_entry, rb_node_in);
393
394 cmp = hist_entry__collapse(iter, he);
395
396 if (!cmp) {
397 iter->period += he->period;
398 iter->nr_events += he->nr_events;
399 if (symbol_conf.use_callchain) {
400 callchain_cursor_reset(&callchain_cursor);
401 callchain_merge(&callchain_cursor,
402 iter->callchain,
403 he->callchain);
404 }
405 hist_entry__free(he);
406 return false;
407 }
408
409 if (cmp < 0)
410 p = &(*p)->rb_left;
411 else
412 p = &(*p)->rb_right;
413 }
414
415 rb_link_node(&he->rb_node_in, parent, p);
416 rb_insert_color(&he->rb_node_in, root);
417 return true;
418 }
419
420 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
421 {
422 struct rb_root *root;
423
424 pthread_mutex_lock(&hists->lock);
425
426 root = hists->entries_in;
427 if (++hists->entries_in > &hists->entries_in_array[1])
428 hists->entries_in = &hists->entries_in_array[0];
429
430 pthread_mutex_unlock(&hists->lock);
431
432 return root;
433 }
434
435 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
436 {
437 hists__filter_entry_by_dso(hists, he);
438 hists__filter_entry_by_thread(hists, he);
439 hists__filter_entry_by_symbol(hists, he);
440 }
441
442 static void __hists__collapse_resort(struct hists *hists, bool threaded)
443 {
444 struct rb_root *root;
445 struct rb_node *next;
446 struct hist_entry *n;
447
448 if (!sort__need_collapse && !threaded)
449 return;
450
451 root = hists__get_rotate_entries_in(hists);
452 next = rb_first(root);
453
454 while (next) {
455 n = rb_entry(next, struct hist_entry, rb_node_in);
456 next = rb_next(&n->rb_node_in);
457
458 rb_erase(&n->rb_node_in, root);
459 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
460 /*
461 * If it wasn't combined with one of the entries already
462 * collapsed, we need to apply the filters that may have
463 * been set by, say, the hist_browser.
464 */
465 hists__apply_filters(hists, n);
466 }
467 }
468 }
469
470 void hists__collapse_resort(struct hists *hists)
471 {
472 return __hists__collapse_resort(hists, false);
473 }
474
475 void hists__collapse_resort_threaded(struct hists *hists)
476 {
477 return __hists__collapse_resort(hists, true);
478 }
479
480 /*
481 * reverse the map, sort on period.
482 */
483
484 static void __hists__insert_output_entry(struct rb_root *entries,
485 struct hist_entry *he,
486 u64 min_callchain_hits)
487 {
488 struct rb_node **p = &entries->rb_node;
489 struct rb_node *parent = NULL;
490 struct hist_entry *iter;
491
492 if (symbol_conf.use_callchain)
493 callchain_param.sort(&he->sorted_chain, he->callchain,
494 min_callchain_hits, &callchain_param);
495
496 while (*p != NULL) {
497 parent = *p;
498 iter = rb_entry(parent, struct hist_entry, rb_node);
499
500 if (he->period > iter->period)
501 p = &(*p)->rb_left;
502 else
503 p = &(*p)->rb_right;
504 }
505
506 rb_link_node(&he->rb_node, parent, p);
507 rb_insert_color(&he->rb_node, entries);
508 }
509
510 static void __hists__output_resort(struct hists *hists, bool threaded)
511 {
512 struct rb_root *root;
513 struct rb_node *next;
514 struct hist_entry *n;
515 u64 min_callchain_hits;
516
517 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
518
519 if (sort__need_collapse || threaded)
520 root = &hists->entries_collapsed;
521 else
522 root = hists->entries_in;
523
524 next = rb_first(root);
525 hists->entries = RB_ROOT;
526
527 hists->nr_entries = 0;
528 hists->stats.total_period = 0;
529 hists__reset_col_len(hists);
530
531 while (next) {
532 n = rb_entry(next, struct hist_entry, rb_node_in);
533 next = rb_next(&n->rb_node_in);
534
535 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
536 hists__inc_nr_entries(hists, n);
537 }
538 }
539
540 void hists__output_resort(struct hists *hists)
541 {
542 return __hists__output_resort(hists, false);
543 }
544
545 void hists__output_resort_threaded(struct hists *hists)
546 {
547 return __hists__output_resort(hists, true);
548 }
549
550 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
551 {
552 int i;
553 int ret = fprintf(fp, " ");
554
555 for (i = 0; i < left_margin; i++)
556 ret += fprintf(fp, " ");
557
558 return ret;
559 }
560
561 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
562 int left_margin)
563 {
564 int i;
565 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
566
567 for (i = 0; i < depth; i++)
568 if (depth_mask & (1 << i))
569 ret += fprintf(fp, "| ");
570 else
571 ret += fprintf(fp, " ");
572
573 ret += fprintf(fp, "\n");
574
575 return ret;
576 }
577
578 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
579 int depth, int depth_mask, int period,
580 u64 total_samples, u64 hits,
581 int left_margin)
582 {
583 int i;
584 size_t ret = 0;
585
586 ret += callchain__fprintf_left_margin(fp, left_margin);
587 for (i = 0; i < depth; i++) {
588 if (depth_mask & (1 << i))
589 ret += fprintf(fp, "|");
590 else
591 ret += fprintf(fp, " ");
592 if (!period && i == depth - 1) {
593 double percent;
594
595 percent = hits * 100.0 / total_samples;
596 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
597 } else
598 ret += fprintf(fp, "%s", " ");
599 }
600 if (chain->ms.sym)
601 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
602 else
603 ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
604
605 return ret;
606 }
607
608 static struct symbol *rem_sq_bracket;
609 static struct callchain_list rem_hits;
610
611 static void init_rem_hits(void)
612 {
613 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
614 if (!rem_sq_bracket) {
615 fprintf(stderr, "Not enough memory to display remaining hits\n");
616 return;
617 }
618
619 strcpy(rem_sq_bracket->name, "[...]");
620 rem_hits.ms.sym = rem_sq_bracket;
621 }
622
623 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
624 u64 total_samples, int depth,
625 int depth_mask, int left_margin)
626 {
627 struct rb_node *node, *next;
628 struct callchain_node *child;
629 struct callchain_list *chain;
630 int new_depth_mask = depth_mask;
631 u64 remaining;
632 size_t ret = 0;
633 int i;
634 uint entries_printed = 0;
635
636 remaining = total_samples;
637
638 node = rb_first(root);
639 while (node) {
640 u64 new_total;
641 u64 cumul;
642
643 child = rb_entry(node, struct callchain_node, rb_node);
644 cumul = callchain_cumul_hits(child);
645 remaining -= cumul;
646
647 /*
648 * The depth mask manages the output of pipes that show
649 * the depth. We don't want to keep the pipes of the current
650 * level for the last child of this depth.
651 * Except if we have remaining filtered hits. They will
652 * supersede the last child
653 */
654 next = rb_next(node);
655 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
656 new_depth_mask &= ~(1 << (depth - 1));
657
658 /*
659 * But we keep the older depth mask for the line separator
660 * to keep the level link until we reach the last child
661 */
662 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
663 left_margin);
664 i = 0;
665 list_for_each_entry(chain, &child->val, list) {
666 ret += ipchain__fprintf_graph(fp, chain, depth,
667 new_depth_mask, i++,
668 total_samples,
669 cumul,
670 left_margin);
671 }
672
673 if (callchain_param.mode == CHAIN_GRAPH_REL)
674 new_total = child->children_hit;
675 else
676 new_total = total_samples;
677
678 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
679 depth + 1,
680 new_depth_mask | (1 << depth),
681 left_margin);
682 node = next;
683 if (++entries_printed == callchain_param.print_limit)
684 break;
685 }
686
687 if (callchain_param.mode == CHAIN_GRAPH_REL &&
688 remaining && remaining != total_samples) {
689
690 if (!rem_sq_bracket)
691 return ret;
692
693 new_depth_mask &= ~(1 << (depth - 1));
694 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
695 new_depth_mask, 0, total_samples,
696 remaining, left_margin);
697 }
698
699 return ret;
700 }
701
702 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
703 u64 total_samples, int left_margin)
704 {
705 struct callchain_node *cnode;
706 struct callchain_list *chain;
707 u32 entries_printed = 0;
708 bool printed = false;
709 struct rb_node *node;
710 int i = 0;
711 int ret = 0;
712
713 /*
714 * If have one single callchain root, don't bother printing
715 * its percentage (100 % in fractal mode and the same percentage
716 * than the hist in graph mode). This also avoid one level of column.
717 */
718 node = rb_first(root);
719 if (node && !rb_next(node)) {
720 cnode = rb_entry(node, struct callchain_node, rb_node);
721 list_for_each_entry(chain, &cnode->val, list) {
722 /*
723 * If we sort by symbol, the first entry is the same than
724 * the symbol. No need to print it otherwise it appears as
725 * displayed twice.
726 */
727 if (!i++ && sort__first_dimension == SORT_SYM)
728 continue;
729 if (!printed) {
730 ret += callchain__fprintf_left_margin(fp, left_margin);
731 ret += fprintf(fp, "|\n");
732 ret += callchain__fprintf_left_margin(fp, left_margin);
733 ret += fprintf(fp, "---");
734 left_margin += 3;
735 printed = true;
736 } else
737 ret += callchain__fprintf_left_margin(fp, left_margin);
738
739 if (chain->ms.sym)
740 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
741 else
742 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
743
744 if (++entries_printed == callchain_param.print_limit)
745 break;
746 }
747 root = &cnode->rb_root;
748 }
749
750 ret += __callchain__fprintf_graph(fp, root, total_samples,
751 1, 1, left_margin);
752 ret += fprintf(fp, "\n");
753
754 return ret;
755 }
756
757 static size_t __callchain__fprintf_flat(FILE *fp,
758 struct callchain_node *self,
759 u64 total_samples)
760 {
761 struct callchain_list *chain;
762 size_t ret = 0;
763
764 if (!self)
765 return 0;
766
767 ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
768
769
770 list_for_each_entry(chain, &self->val, list) {
771 if (chain->ip >= PERF_CONTEXT_MAX)
772 continue;
773 if (chain->ms.sym)
774 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
775 else
776 ret += fprintf(fp, " %p\n",
777 (void *)(long)chain->ip);
778 }
779
780 return ret;
781 }
782
783 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
784 u64 total_samples)
785 {
786 size_t ret = 0;
787 u32 entries_printed = 0;
788 struct rb_node *rb_node;
789 struct callchain_node *chain;
790
791 rb_node = rb_first(self);
792 while (rb_node) {
793 double percent;
794
795 chain = rb_entry(rb_node, struct callchain_node, rb_node);
796 percent = chain->hit * 100.0 / total_samples;
797
798 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
799 ret += __callchain__fprintf_flat(fp, chain, total_samples);
800 ret += fprintf(fp, "\n");
801 if (++entries_printed == callchain_param.print_limit)
802 break;
803
804 rb_node = rb_next(rb_node);
805 }
806
807 return ret;
808 }
809
810 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
811 u64 total_samples, int left_margin,
812 FILE *fp)
813 {
814 switch (callchain_param.mode) {
815 case CHAIN_GRAPH_REL:
816 return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
817 left_margin);
818 break;
819 case CHAIN_GRAPH_ABS:
820 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
821 left_margin);
822 break;
823 case CHAIN_FLAT:
824 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
825 break;
826 case CHAIN_NONE:
827 break;
828 default:
829 pr_err("Bad callchain mode\n");
830 }
831
832 return 0;
833 }
834
835 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
836 {
837 struct rb_node *next = rb_first(&hists->entries);
838 struct hist_entry *n;
839 int row = 0;
840
841 hists__reset_col_len(hists);
842
843 while (next && row++ < max_rows) {
844 n = rb_entry(next, struct hist_entry, rb_node);
845 if (!n->filtered)
846 hists__calc_col_len(hists, n);
847 next = rb_next(&n->rb_node);
848 }
849 }
850
851 static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
852 size_t size, struct hists *pair_hists,
853 bool show_displacement, long displacement,
854 bool color, u64 total_period)
855 {
856 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
857 u64 nr_events;
858 const char *sep = symbol_conf.field_sep;
859 int ret;
860
861 if (symbol_conf.exclude_other && !he->parent)
862 return 0;
863
864 if (pair_hists) {
865 period = he->pair ? he->pair->period : 0;
866 nr_events = he->pair ? he->pair->nr_events : 0;
867 total = pair_hists->stats.total_period;
868 period_sys = he->pair ? he->pair->period_sys : 0;
869 period_us = he->pair ? he->pair->period_us : 0;
870 period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
871 period_guest_us = he->pair ? he->pair->period_guest_us : 0;
872 } else {
873 period = he->period;
874 nr_events = he->nr_events;
875 total = total_period;
876 period_sys = he->period_sys;
877 period_us = he->period_us;
878 period_guest_sys = he->period_guest_sys;
879 period_guest_us = he->period_guest_us;
880 }
881
882 if (total) {
883 if (color)
884 ret = percent_color_snprintf(s, size,
885 sep ? "%.2f" : " %6.2f%%",
886 (period * 100.0) / total);
887 else
888 ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
889 (period * 100.0) / total);
890 if (symbol_conf.show_cpu_utilization) {
891 ret += percent_color_snprintf(s + ret, size - ret,
892 sep ? "%.2f" : " %6.2f%%",
893 (period_sys * 100.0) / total);
894 ret += percent_color_snprintf(s + ret, size - ret,
895 sep ? "%.2f" : " %6.2f%%",
896 (period_us * 100.0) / total);
897 if (perf_guest) {
898 ret += percent_color_snprintf(s + ret,
899 size - ret,
900 sep ? "%.2f" : " %6.2f%%",
901 (period_guest_sys * 100.0) /
902 total);
903 ret += percent_color_snprintf(s + ret,
904 size - ret,
905 sep ? "%.2f" : " %6.2f%%",
906 (period_guest_us * 100.0) /
907 total);
908 }
909 }
910 } else
911 ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
912
913 if (symbol_conf.show_nr_samples) {
914 if (sep)
915 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
916 else
917 ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
918 }
919
920 if (symbol_conf.show_total_period) {
921 if (sep)
922 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
923 else
924 ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
925 }
926
927 if (pair_hists) {
928 char bf[32];
929 double old_percent = 0, new_percent = 0, diff;
930
931 if (total > 0)
932 old_percent = (period * 100.0) / total;
933 if (total_period > 0)
934 new_percent = (he->period * 100.0) / total_period;
935
936 diff = new_percent - old_percent;
937
938 if (fabs(diff) >= 0.01)
939 scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
940 else
941 scnprintf(bf, sizeof(bf), " ");
942
943 if (sep)
944 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
945 else
946 ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
947
948 if (show_displacement) {
949 if (displacement)
950 scnprintf(bf, sizeof(bf), "%+4ld", displacement);
951 else
952 scnprintf(bf, sizeof(bf), " ");
953
954 if (sep)
955 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
956 else
957 ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
958 }
959 }
960
961 return ret;
962 }
963
964 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
965 struct hists *hists)
966 {
967 const char *sep = symbol_conf.field_sep;
968 struct sort_entry *se;
969 int ret = 0;
970
971 list_for_each_entry(se, &hist_entry__sort_list, list) {
972 if (se->elide)
973 continue;
974
975 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
976 ret += se->se_snprintf(he, s + ret, size - ret,
977 hists__col_len(hists, se->se_width_idx));
978 }
979
980 return ret;
981 }
982
983 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
984 struct hists *hists, struct hists *pair_hists,
985 bool show_displacement, long displacement,
986 u64 total_period, FILE *fp)
987 {
988 char bf[512];
989 int ret;
990
991 if (size == 0 || size > sizeof(bf))
992 size = sizeof(bf);
993
994 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
995 show_displacement, displacement,
996 true, total_period);
997 hist_entry__snprintf(he, bf + ret, size - ret, hists);
998 return fprintf(fp, "%s\n", bf);
999 }
1000
1001 static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
1002 struct hists *hists,
1003 u64 total_period, FILE *fp)
1004 {
1005 int left_margin = 0;
1006
1007 if (sort__first_dimension == SORT_COMM) {
1008 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
1009 typeof(*se), list);
1010 left_margin = hists__col_len(hists, se->se_width_idx);
1011 left_margin -= thread__comm_len(he->thread);
1012 }
1013
1014 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
1015 }
1016
1017 size_t hists__fprintf(struct hists *hists, struct hists *pair,
1018 bool show_displacement, bool show_header, int max_rows,
1019 int max_cols, FILE *fp)
1020 {
1021 struct sort_entry *se;
1022 struct rb_node *nd;
1023 size_t ret = 0;
1024 u64 total_period;
1025 unsigned long position = 1;
1026 long displacement = 0;
1027 unsigned int width;
1028 const char *sep = symbol_conf.field_sep;
1029 const char *col_width = symbol_conf.col_width_list_str;
1030 int nr_rows = 0;
1031
1032 init_rem_hits();
1033
1034 if (!show_header)
1035 goto print_entries;
1036
1037 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
1038
1039 if (symbol_conf.show_cpu_utilization) {
1040 if (sep) {
1041 ret += fprintf(fp, "%csys", *sep);
1042 ret += fprintf(fp, "%cus", *sep);
1043 if (perf_guest) {
1044 ret += fprintf(fp, "%cguest sys", *sep);
1045 ret += fprintf(fp, "%cguest us", *sep);
1046 }
1047 } else {
1048 ret += fprintf(fp, " sys ");
1049 ret += fprintf(fp, " us ");
1050 if (perf_guest) {
1051 ret += fprintf(fp, " guest sys ");
1052 ret += fprintf(fp, " guest us ");
1053 }
1054 }
1055 }
1056
1057 if (symbol_conf.show_nr_samples) {
1058 if (sep)
1059 fprintf(fp, "%cSamples", *sep);
1060 else
1061 fputs(" Samples ", fp);
1062 }
1063
1064 if (symbol_conf.show_total_period) {
1065 if (sep)
1066 ret += fprintf(fp, "%cPeriod", *sep);
1067 else
1068 ret += fprintf(fp, " Period ");
1069 }
1070
1071 if (pair) {
1072 if (sep)
1073 ret += fprintf(fp, "%cDelta", *sep);
1074 else
1075 ret += fprintf(fp, " Delta ");
1076
1077 if (show_displacement) {
1078 if (sep)
1079 ret += fprintf(fp, "%cDisplacement", *sep);
1080 else
1081 ret += fprintf(fp, " Displ");
1082 }
1083 }
1084
1085 list_for_each_entry(se, &hist_entry__sort_list, list) {
1086 if (se->elide)
1087 continue;
1088 if (sep) {
1089 fprintf(fp, "%c%s", *sep, se->se_header);
1090 continue;
1091 }
1092 width = strlen(se->se_header);
1093 if (symbol_conf.col_width_list_str) {
1094 if (col_width) {
1095 hists__set_col_len(hists, se->se_width_idx,
1096 atoi(col_width));
1097 col_width = strchr(col_width, ',');
1098 if (col_width)
1099 ++col_width;
1100 }
1101 }
1102 if (!hists__new_col_len(hists, se->se_width_idx, width))
1103 width = hists__col_len(hists, se->se_width_idx);
1104 fprintf(fp, " %*s", width, se->se_header);
1105 }
1106
1107 fprintf(fp, "\n");
1108 if (max_rows && ++nr_rows >= max_rows)
1109 goto out;
1110
1111 if (sep)
1112 goto print_entries;
1113
1114 fprintf(fp, "# ........");
1115 if (symbol_conf.show_cpu_utilization)
1116 fprintf(fp, " ....... .......");
1117 if (symbol_conf.show_nr_samples)
1118 fprintf(fp, " ..........");
1119 if (symbol_conf.show_total_period)
1120 fprintf(fp, " ............");
1121 if (pair) {
1122 fprintf(fp, " ..........");
1123 if (show_displacement)
1124 fprintf(fp, " .....");
1125 }
1126 list_for_each_entry(se, &hist_entry__sort_list, list) {
1127 unsigned int i;
1128
1129 if (se->elide)
1130 continue;
1131
1132 fprintf(fp, " ");
1133 width = hists__col_len(hists, se->se_width_idx);
1134 if (width == 0)
1135 width = strlen(se->se_header);
1136 for (i = 0; i < width; i++)
1137 fprintf(fp, ".");
1138 }
1139
1140 fprintf(fp, "\n");
1141 if (max_rows && ++nr_rows >= max_rows)
1142 goto out;
1143
1144 fprintf(fp, "#\n");
1145 if (max_rows && ++nr_rows >= max_rows)
1146 goto out;
1147
1148 print_entries:
1149 total_period = hists->stats.total_period;
1150
1151 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1152 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1153
1154 if (h->filtered)
1155 continue;
1156
1157 if (show_displacement) {
1158 if (h->pair != NULL)
1159 displacement = ((long)h->pair->position -
1160 (long)position);
1161 else
1162 displacement = 0;
1163 ++position;
1164 }
1165 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
1166 displacement, total_period, fp);
1167
1168 if (symbol_conf.use_callchain)
1169 ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
1170 if (max_rows && ++nr_rows >= max_rows)
1171 goto out;
1172
1173 if (h->ms.map == NULL && verbose > 1) {
1174 __map_groups__fprintf_maps(&h->thread->mg,
1175 MAP__FUNCTION, verbose, fp);
1176 fprintf(fp, "%.10s end\n", graph_dotted_line);
1177 }
1178 }
1179 out:
1180 free(rem_sq_bracket);
1181
1182 return ret;
1183 }
1184
1185 /*
1186 * See hists__fprintf to match the column widths
1187 */
1188 unsigned int hists__sort_list_width(struct hists *hists)
1189 {
1190 struct sort_entry *se;
1191 int ret = 9; /* total % */
1192
1193 if (symbol_conf.show_cpu_utilization) {
1194 ret += 7; /* count_sys % */
1195 ret += 6; /* count_us % */
1196 if (perf_guest) {
1197 ret += 13; /* count_guest_sys % */
1198 ret += 12; /* count_guest_us % */
1199 }
1200 }
1201
1202 if (symbol_conf.show_nr_samples)
1203 ret += 11;
1204
1205 if (symbol_conf.show_total_period)
1206 ret += 13;
1207
1208 list_for_each_entry(se, &hist_entry__sort_list, list)
1209 if (!se->elide)
1210 ret += 2 + hists__col_len(hists, se->se_width_idx);
1211
1212 if (verbose) /* Addr + origin */
1213 ret += 3 + BITS_PER_LONG / 4;
1214
1215 return ret;
1216 }
1217
1218 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1219 enum hist_filter filter)
1220 {
1221 h->filtered &= ~(1 << filter);
1222 if (h->filtered)
1223 return;
1224
1225 ++hists->nr_entries;
1226 if (h->ms.unfolded)
1227 hists->nr_entries += h->nr_rows;
1228 h->row_offset = 0;
1229 hists->stats.total_period += h->period;
1230 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
1231
1232 hists__calc_col_len(hists, h);
1233 }
1234
1235
1236 static bool hists__filter_entry_by_dso(struct hists *hists,
1237 struct hist_entry *he)
1238 {
1239 if (hists->dso_filter != NULL &&
1240 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1241 he->filtered |= (1 << HIST_FILTER__DSO);
1242 return true;
1243 }
1244
1245 return false;
1246 }
1247
1248 void hists__filter_by_dso(struct hists *hists)
1249 {
1250 struct rb_node *nd;
1251
1252 hists->nr_entries = hists->stats.total_period = 0;
1253 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1254 hists__reset_col_len(hists);
1255
1256 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1257 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1258
1259 if (symbol_conf.exclude_other && !h->parent)
1260 continue;
1261
1262 if (hists__filter_entry_by_dso(hists, h))
1263 continue;
1264
1265 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1266 }
1267 }
1268
1269 static bool hists__filter_entry_by_thread(struct hists *hists,
1270 struct hist_entry *he)
1271 {
1272 if (hists->thread_filter != NULL &&
1273 he->thread != hists->thread_filter) {
1274 he->filtered |= (1 << HIST_FILTER__THREAD);
1275 return true;
1276 }
1277
1278 return false;
1279 }
1280
1281 void hists__filter_by_thread(struct hists *hists)
1282 {
1283 struct rb_node *nd;
1284
1285 hists->nr_entries = hists->stats.total_period = 0;
1286 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1287 hists__reset_col_len(hists);
1288
1289 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1290 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1291
1292 if (hists__filter_entry_by_thread(hists, h))
1293 continue;
1294
1295 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1296 }
1297 }
1298
1299 static bool hists__filter_entry_by_symbol(struct hists *hists,
1300 struct hist_entry *he)
1301 {
1302 if (hists->symbol_filter_str != NULL &&
1303 (!he->ms.sym || strstr(he->ms.sym->name,
1304 hists->symbol_filter_str) == NULL)) {
1305 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1306 return true;
1307 }
1308
1309 return false;
1310 }
1311
1312 void hists__filter_by_symbol(struct hists *hists)
1313 {
1314 struct rb_node *nd;
1315
1316 hists->nr_entries = hists->stats.total_period = 0;
1317 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1318 hists__reset_col_len(hists);
1319
1320 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1321 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1322
1323 if (hists__filter_entry_by_symbol(hists, h))
1324 continue;
1325
1326 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1327 }
1328 }
1329
1330 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1331 {
1332 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
1333 }
1334
1335 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
1336 {
1337 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
1338 }
1339
1340 void hists__inc_nr_events(struct hists *hists, u32 type)
1341 {
1342 ++hists->stats.nr_events[0];
1343 ++hists->stats.nr_events[type];
1344 }
1345
1346 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1347 {
1348 int i;
1349 size_t ret = 0;
1350
1351 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1352 const char *name;
1353
1354 if (hists->stats.nr_events[i] == 0)
1355 continue;
1356
1357 name = perf_event__name(i);
1358 if (!strcmp(name, "UNKNOWN"))
1359 continue;
1360
1361 ret += fprintf(fp, "%16s events: %10d\n", name,
1362 hists->stats.nr_events[i]);
1363 }
1364
1365 return ret;
1366 }
This page took 0.118386 seconds and 6 git commands to generate.