6 struct callchain_param callchain_param
= {
7 .mode
= CHAIN_GRAPH_REL
,
11 static void hist_entry__add_cpumode_count(struct hist_entry
*self
,
12 unsigned int cpumode
, u64 count
)
15 case PERF_RECORD_MISC_KERNEL
:
16 self
->count_sys
+= count
;
18 case PERF_RECORD_MISC_USER
:
19 self
->count_us
+= count
;
21 case PERF_RECORD_MISC_GUEST_KERNEL
:
22 self
->count_guest_sys
+= count
;
24 case PERF_RECORD_MISC_GUEST_USER
:
25 self
->count_guest_us
+= count
;
33 * histogram, sorted on item, collects counts
36 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
38 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_node
) : 0;
39 struct hist_entry
*self
= malloc(sizeof(*self
) + callchain_size
);
43 if (symbol_conf
.use_callchain
)
44 callchain_init(self
->callchain
);
50 static void hists__inc_nr_entries(struct hists
*self
, struct hist_entry
*entry
)
52 if (entry
->ms
.sym
&& self
->max_sym_namelen
< entry
->ms
.sym
->namelen
)
53 self
->max_sym_namelen
= entry
->ms
.sym
->namelen
;
57 struct hist_entry
*__hists__add_entry(struct hists
*self
,
58 struct addr_location
*al
,
59 struct symbol
*sym_parent
, u64 count
)
61 struct rb_node
**p
= &self
->entries
.rb_node
;
62 struct rb_node
*parent
= NULL
;
63 struct hist_entry
*he
;
64 struct hist_entry entry
= {
79 he
= rb_entry(parent
, struct hist_entry
, rb_node
);
81 cmp
= hist_entry__cmp(&entry
, he
);
94 he
= hist_entry__new(&entry
);
97 rb_link_node(&he
->rb_node
, parent
, p
);
98 rb_insert_color(&he
->rb_node
, &self
->entries
);
99 hists__inc_nr_entries(self
, he
);
101 hist_entry__add_cpumode_count(he
, al
->cpumode
, count
);
106 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
108 struct sort_entry
*se
;
111 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
112 cmp
= se
->se_cmp(left
, right
);
121 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
123 struct sort_entry
*se
;
126 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
127 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
129 f
= se
->se_collapse
?: se
->se_cmp
;
131 cmp
= f(left
, right
);
139 void hist_entry__free(struct hist_entry
*he
)
145 * collapse the histogram
148 static bool collapse__insert_entry(struct rb_root
*root
, struct hist_entry
*he
)
150 struct rb_node
**p
= &root
->rb_node
;
151 struct rb_node
*parent
= NULL
;
152 struct hist_entry
*iter
;
157 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
159 cmp
= hist_entry__collapse(iter
, he
);
162 iter
->count
+= he
->count
;
163 hist_entry__free(he
);
173 rb_link_node(&he
->rb_node
, parent
, p
);
174 rb_insert_color(&he
->rb_node
, root
);
178 void hists__collapse_resort(struct hists
*self
)
181 struct rb_node
*next
;
182 struct hist_entry
*n
;
184 if (!sort__need_collapse
)
188 next
= rb_first(&self
->entries
);
189 self
->nr_entries
= 0;
190 self
->max_sym_namelen
= 0;
193 n
= rb_entry(next
, struct hist_entry
, rb_node
);
194 next
= rb_next(&n
->rb_node
);
196 rb_erase(&n
->rb_node
, &self
->entries
);
197 if (collapse__insert_entry(&tmp
, n
))
198 hists__inc_nr_entries(self
, n
);
205 * reverse the map, sort on count.
208 static void __hists__insert_output_entry(struct rb_root
*entries
,
209 struct hist_entry
*he
,
210 u64 min_callchain_hits
)
212 struct rb_node
**p
= &entries
->rb_node
;
213 struct rb_node
*parent
= NULL
;
214 struct hist_entry
*iter
;
216 if (symbol_conf
.use_callchain
)
217 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
218 min_callchain_hits
, &callchain_param
);
222 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
224 if (he
->count
> iter
->count
)
230 rb_link_node(&he
->rb_node
, parent
, p
);
231 rb_insert_color(&he
->rb_node
, entries
);
234 void hists__output_resort(struct hists
*self
)
237 struct rb_node
*next
;
238 struct hist_entry
*n
;
239 u64 min_callchain_hits
;
241 min_callchain_hits
= self
->stats
.total
* (callchain_param
.min_percent
/ 100);
244 next
= rb_first(&self
->entries
);
246 self
->nr_entries
= 0;
247 self
->max_sym_namelen
= 0;
250 n
= rb_entry(next
, struct hist_entry
, rb_node
);
251 next
= rb_next(&n
->rb_node
);
253 rb_erase(&n
->rb_node
, &self
->entries
);
254 __hists__insert_output_entry(&tmp
, n
, min_callchain_hits
);
255 hists__inc_nr_entries(self
, n
);
261 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
264 int ret
= fprintf(fp
, " ");
266 for (i
= 0; i
< left_margin
; i
++)
267 ret
+= fprintf(fp
, " ");
272 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
276 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
278 for (i
= 0; i
< depth
; i
++)
279 if (depth_mask
& (1 << i
))
280 ret
+= fprintf(fp
, "| ");
282 ret
+= fprintf(fp
, " ");
284 ret
+= fprintf(fp
, "\n");
289 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
,
290 int depth
, int depth_mask
, int count
,
291 u64 total_samples
, int hits
,
297 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
298 for (i
= 0; i
< depth
; i
++) {
299 if (depth_mask
& (1 << i
))
300 ret
+= fprintf(fp
, "|");
302 ret
+= fprintf(fp
, " ");
303 if (!count
&& i
== depth
- 1) {
306 percent
= hits
* 100.0 / total_samples
;
307 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
309 ret
+= fprintf(fp
, "%s", " ");
312 ret
+= fprintf(fp
, "%s\n", chain
->ms
.sym
->name
);
314 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
319 static struct symbol
*rem_sq_bracket
;
320 static struct callchain_list rem_hits
;
322 static void init_rem_hits(void)
324 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
325 if (!rem_sq_bracket
) {
326 fprintf(stderr
, "Not enough memory to display remaining hits\n");
330 strcpy(rem_sq_bracket
->name
, "[...]");
331 rem_hits
.ms
.sym
= rem_sq_bracket
;
334 static size_t __callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
335 u64 total_samples
, int depth
,
336 int depth_mask
, int left_margin
)
338 struct rb_node
*node
, *next
;
339 struct callchain_node
*child
;
340 struct callchain_list
*chain
;
341 int new_depth_mask
= depth_mask
;
346 uint entries_printed
= 0;
348 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
349 new_total
= self
->children_hit
;
351 new_total
= total_samples
;
353 remaining
= new_total
;
355 node
= rb_first(&self
->rb_root
);
359 child
= rb_entry(node
, struct callchain_node
, rb_node
);
360 cumul
= cumul_hits(child
);
364 * The depth mask manages the output of pipes that show
365 * the depth. We don't want to keep the pipes of the current
366 * level for the last child of this depth.
367 * Except if we have remaining filtered hits. They will
368 * supersede the last child
370 next
= rb_next(node
);
371 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
372 new_depth_mask
&= ~(1 << (depth
- 1));
375 * But we keep the older depth mask for the line separator
376 * to keep the level link until we reach the last child
378 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
381 list_for_each_entry(chain
, &child
->val
, list
) {
382 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
388 ret
+= __callchain__fprintf_graph(fp
, child
, new_total
,
390 new_depth_mask
| (1 << depth
),
393 if (++entries_printed
== callchain_param
.print_limit
)
397 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
398 remaining
&& remaining
!= new_total
) {
403 new_depth_mask
&= ~(1 << (depth
- 1));
405 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
406 new_depth_mask
, 0, new_total
,
407 remaining
, left_margin
);
413 static size_t callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
414 u64 total_samples
, int left_margin
)
416 struct callchain_list
*chain
;
417 bool printed
= false;
420 u32 entries_printed
= 0;
422 list_for_each_entry(chain
, &self
->val
, list
) {
423 if (!i
++ && sort__first_dimension
== SORT_SYM
)
427 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
428 ret
+= fprintf(fp
, "|\n");
429 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
430 ret
+= fprintf(fp
, "---");
435 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
438 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
440 ret
+= fprintf(fp
, " %p\n", (void *)(long)chain
->ip
);
442 if (++entries_printed
== callchain_param
.print_limit
)
446 ret
+= __callchain__fprintf_graph(fp
, self
, total_samples
, 1, 1, left_margin
);
451 static size_t callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
454 struct callchain_list
*chain
;
460 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
463 list_for_each_entry(chain
, &self
->val
, list
) {
464 if (chain
->ip
>= PERF_CONTEXT_MAX
)
467 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
469 ret
+= fprintf(fp
, " %p\n",
470 (void *)(long)chain
->ip
);
476 static size_t hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
477 u64 total_samples
, int left_margin
)
479 struct rb_node
*rb_node
;
480 struct callchain_node
*chain
;
482 u32 entries_printed
= 0;
484 rb_node
= rb_first(&self
->sorted_chain
);
488 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
489 percent
= chain
->hit
* 100.0 / total_samples
;
490 switch (callchain_param
.mode
) {
492 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
494 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
496 case CHAIN_GRAPH_ABS
: /* Falldown */
497 case CHAIN_GRAPH_REL
:
498 ret
+= callchain__fprintf_graph(fp
, chain
, total_samples
,
504 ret
+= fprintf(fp
, "\n");
505 if (++entries_printed
== callchain_param
.print_limit
)
507 rb_node
= rb_next(rb_node
);
513 int hist_entry__snprintf(struct hist_entry
*self
, char *s
, size_t size
,
514 struct hists
*pair_hists
, bool show_displacement
,
515 long displacement
, bool color
, u64 session_total
)
517 struct sort_entry
*se
;
518 u64 count
, total
, count_sys
, count_us
, count_guest_sys
, count_guest_us
;
519 const char *sep
= symbol_conf
.field_sep
;
522 if (symbol_conf
.exclude_other
&& !self
->parent
)
526 count
= self
->pair
? self
->pair
->count
: 0;
527 total
= pair_hists
->stats
.total
;
528 count_sys
= self
->pair
? self
->pair
->count_sys
: 0;
529 count_us
= self
->pair
? self
->pair
->count_us
: 0;
530 count_guest_sys
= self
->pair
? self
->pair
->count_guest_sys
: 0;
531 count_guest_us
= self
->pair
? self
->pair
->count_guest_us
: 0;
534 total
= session_total
;
535 count_sys
= self
->count_sys
;
536 count_us
= self
->count_us
;
537 count_guest_sys
= self
->count_guest_sys
;
538 count_guest_us
= self
->count_guest_us
;
543 ret
= percent_color_snprintf(s
, size
,
544 sep
? "%.2f" : " %6.2f%%",
545 (count
* 100.0) / total
);
547 ret
= snprintf(s
, size
, sep
? "%.2f" : " %6.2f%%",
548 (count
* 100.0) / total
);
549 if (symbol_conf
.show_cpu_utilization
) {
550 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
551 sep
? "%.2f" : " %6.2f%%",
552 (count_sys
* 100.0) / total
);
553 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
554 sep
? "%.2f" : " %6.2f%%",
555 (count_us
* 100.0) / total
);
557 ret
+= percent_color_snprintf(s
+ ret
,
559 sep
? "%.2f" : " %6.2f%%",
560 (count_guest_sys
* 100.0) /
562 ret
+= percent_color_snprintf(s
+ ret
,
564 sep
? "%.2f" : " %6.2f%%",
565 (count_guest_us
* 100.0) /
570 ret
= snprintf(s
, size
, sep
? "%lld" : "%12lld ", count
);
572 if (symbol_conf
.show_nr_samples
) {
574 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%lld", *sep
, count
);
576 ret
+= snprintf(s
+ ret
, size
- ret
, "%11lld", count
);
581 double old_percent
= 0, new_percent
= 0, diff
;
584 old_percent
= (count
* 100.0) / total
;
585 if (session_total
> 0)
586 new_percent
= (self
->count
* 100.0) / session_total
;
588 diff
= new_percent
- old_percent
;
590 if (fabs(diff
) >= 0.01)
591 snprintf(bf
, sizeof(bf
), "%+4.2F%%", diff
);
593 snprintf(bf
, sizeof(bf
), " ");
596 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
598 ret
+= snprintf(s
+ ret
, size
- ret
, "%11.11s", bf
);
600 if (show_displacement
) {
602 snprintf(bf
, sizeof(bf
), "%+4ld", displacement
);
604 snprintf(bf
, sizeof(bf
), " ");
607 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
609 ret
+= snprintf(s
+ ret
, size
- ret
, "%6.6s", bf
);
613 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
617 ret
+= snprintf(s
+ ret
, size
- ret
, "%s", sep
?: " ");
618 ret
+= se
->se_snprintf(self
, s
+ ret
, size
- ret
,
619 se
->se_width
? *se
->se_width
: 0);
625 int hist_entry__fprintf(struct hist_entry
*self
, struct hists
*pair_hists
,
626 bool show_displacement
, long displacement
, FILE *fp
,
630 hist_entry__snprintf(self
, bf
, sizeof(bf
), pair_hists
,
631 show_displacement
, displacement
,
632 true, session_total
);
633 return fprintf(fp
, "%s\n", bf
);
636 static size_t hist_entry__fprintf_callchain(struct hist_entry
*self
, FILE *fp
,
641 if (sort__first_dimension
== SORT_COMM
) {
642 struct sort_entry
*se
= list_first_entry(&hist_entry__sort_list
,
644 left_margin
= se
->se_width
? *se
->se_width
: 0;
645 left_margin
-= thread__comm_len(self
->thread
);
648 return hist_entry_callchain__fprintf(fp
, self
, session_total
,
652 size_t hists__fprintf(struct hists
*self
, struct hists
*pair
,
653 bool show_displacement
, FILE *fp
)
655 struct sort_entry
*se
;
658 unsigned long position
= 1;
659 long displacement
= 0;
661 const char *sep
= symbol_conf
.field_sep
;
662 char *col_width
= symbol_conf
.col_width_list_str
;
666 fprintf(fp
, "# %s", pair
? "Baseline" : "Overhead");
668 if (symbol_conf
.show_nr_samples
) {
670 fprintf(fp
, "%cSamples", *sep
);
672 fputs(" Samples ", fp
);
675 if (symbol_conf
.show_cpu_utilization
) {
677 ret
+= fprintf(fp
, "%csys", *sep
);
678 ret
+= fprintf(fp
, "%cus", *sep
);
680 ret
+= fprintf(fp
, "%cguest sys", *sep
);
681 ret
+= fprintf(fp
, "%cguest us", *sep
);
684 ret
+= fprintf(fp
, " sys ");
685 ret
+= fprintf(fp
, " us ");
687 ret
+= fprintf(fp
, " guest sys ");
688 ret
+= fprintf(fp
, " guest us ");
695 ret
+= fprintf(fp
, "%cDelta", *sep
);
697 ret
+= fprintf(fp
, " Delta ");
699 if (show_displacement
) {
701 ret
+= fprintf(fp
, "%cDisplacement", *sep
);
703 ret
+= fprintf(fp
, " Displ");
707 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
711 fprintf(fp
, "%c%s", *sep
, se
->se_header
);
714 width
= strlen(se
->se_header
);
716 if (symbol_conf
.col_width_list_str
) {
718 *se
->se_width
= atoi(col_width
);
719 col_width
= strchr(col_width
, ',');
724 width
= *se
->se_width
= max(*se
->se_width
, width
);
726 fprintf(fp
, " %*s", width
, se
->se_header
);
733 fprintf(fp
, "# ........");
734 if (symbol_conf
.show_nr_samples
)
735 fprintf(fp
, " ..........");
737 fprintf(fp
, " ..........");
738 if (show_displacement
)
739 fprintf(fp
, " .....");
741 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
749 width
= *se
->se_width
;
751 width
= strlen(se
->se_header
);
752 for (i
= 0; i
< width
; i
++)
756 fprintf(fp
, "\n#\n");
759 for (nd
= rb_first(&self
->entries
); nd
; nd
= rb_next(nd
)) {
760 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
762 if (show_displacement
) {
764 displacement
= ((long)h
->pair
->position
-
770 ret
+= hist_entry__fprintf(h
, pair
, show_displacement
,
771 displacement
, fp
, self
->stats
.total
);
773 if (symbol_conf
.use_callchain
)
774 ret
+= hist_entry__fprintf_callchain(h
, fp
, self
->stats
.total
);
776 if (h
->ms
.map
== NULL
&& verbose
> 1) {
777 __map_groups__fprintf_maps(&h
->thread
->mg
,
778 MAP__FUNCTION
, verbose
, fp
);
779 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
783 free(rem_sq_bracket
);