9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_thread(struct hists
*hists
,
15 struct hist_entry
*he
);
16 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
17 struct hist_entry
*he
);
19 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
21 return hists
->col_len
[col
];
24 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
26 hists
->col_len
[col
] = len
;
29 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
31 if (len
> hists__col_len(hists
, col
)) {
32 hists__set_col_len(hists
, col
, len
);
38 void hists__reset_col_len(struct hists
*hists
)
42 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
43 hists__set_col_len(hists
, col
, 0);
46 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
48 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
50 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
51 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
52 !symbol_conf
.dso_list
)
53 hists__set_col_len(hists
, dso
, unresolved_col_width
);
56 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
58 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
68 symlen
= h
->ms
.sym
->namelen
+ 4;
70 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
71 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
73 symlen
= unresolved_col_width
+ 4 + 2;
74 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
75 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
78 len
= thread__comm_len(h
->thread
);
79 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
80 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
83 len
= dso__name_len(h
->ms
.map
->dso
);
84 hists__new_col_len(hists
, HISTC_DSO
, len
);
88 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
91 if (h
->branch_info
->from
.sym
) {
92 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
94 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
95 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
97 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
98 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
100 symlen
= unresolved_col_width
+ 4 + 2;
101 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
102 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
105 if (h
->branch_info
->to
.sym
) {
106 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
108 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
109 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
111 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
112 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
114 symlen
= unresolved_col_width
+ 4 + 2;
115 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
116 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
121 if (h
->mem_info
->daddr
.sym
) {
122 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
123 + unresolved_col_width
+ 2;
124 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
126 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
129 symlen
= unresolved_col_width
+ 4 + 2;
130 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
133 if (h
->mem_info
->daddr
.map
) {
134 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
135 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
138 symlen
= unresolved_col_width
+ 4 + 2;
139 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
142 symlen
= unresolved_col_width
+ 4 + 2;
143 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
144 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
147 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
148 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
149 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
150 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
151 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
152 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
155 hists__new_col_len(hists
, HISTC_SRCLINE
, strlen(h
->srcline
));
158 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
161 hists__new_col_len(hists
, HISTC_TRANSACTION
,
162 hist_entry__transaction_len());
165 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
167 struct rb_node
*next
= rb_first(&hists
->entries
);
168 struct hist_entry
*n
;
171 hists__reset_col_len(hists
);
173 while (next
&& row
++ < max_rows
) {
174 n
= rb_entry(next
, struct hist_entry
, rb_node
);
176 hists__calc_col_len(hists
, n
);
177 next
= rb_next(&n
->rb_node
);
181 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
182 unsigned int cpumode
, u64 period
)
185 case PERF_RECORD_MISC_KERNEL
:
186 he_stat
->period_sys
+= period
;
188 case PERF_RECORD_MISC_USER
:
189 he_stat
->period_us
+= period
;
191 case PERF_RECORD_MISC_GUEST_KERNEL
:
192 he_stat
->period_guest_sys
+= period
;
194 case PERF_RECORD_MISC_GUEST_USER
:
195 he_stat
->period_guest_us
+= period
;
202 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
206 he_stat
->period
+= period
;
207 he_stat
->weight
+= weight
;
208 he_stat
->nr_events
+= 1;
211 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
213 dest
->period
+= src
->period
;
214 dest
->period_sys
+= src
->period_sys
;
215 dest
->period_us
+= src
->period_us
;
216 dest
->period_guest_sys
+= src
->period_guest_sys
;
217 dest
->period_guest_us
+= src
->period_guest_us
;
218 dest
->nr_events
+= src
->nr_events
;
219 dest
->weight
+= src
->weight
;
222 static void he_stat__decay(struct he_stat
*he_stat
)
224 he_stat
->period
= (he_stat
->period
* 7) / 8;
225 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
226 /* XXX need decay for weight too? */
229 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
231 u64 prev_period
= he
->stat
.period
;
234 if (prev_period
== 0)
237 he_stat__decay(&he
->stat
);
238 if (symbol_conf
.cumulate_callchain
)
239 he_stat__decay(he
->stat_acc
);
241 diff
= prev_period
- he
->stat
.period
;
243 hists
->stats
.total_period
-= diff
;
245 hists
->stats
.total_non_filtered_period
-= diff
;
247 return he
->stat
.period
== 0;
250 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
252 rb_erase(&he
->rb_node
, &hists
->entries
);
254 if (sort__need_collapse
)
255 rb_erase(&he
->rb_node_in
, &hists
->entries_collapsed
);
259 --hists
->nr_non_filtered_entries
;
261 hist_entry__delete(he
);
264 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
266 struct rb_node
*next
= rb_first(&hists
->entries
);
267 struct hist_entry
*n
;
270 n
= rb_entry(next
, struct hist_entry
, rb_node
);
271 next
= rb_next(&n
->rb_node
);
272 if (((zap_user
&& n
->level
== '.') ||
273 (zap_kernel
&& n
->level
!= '.') ||
274 hists__decay_entry(hists
, n
))) {
275 hists__delete_entry(hists
, n
);
280 void hists__delete_entries(struct hists
*hists
)
282 struct rb_node
*next
= rb_first(&hists
->entries
);
283 struct hist_entry
*n
;
286 n
= rb_entry(next
, struct hist_entry
, rb_node
);
287 next
= rb_next(&n
->rb_node
);
289 hists__delete_entry(hists
, n
);
294 * histogram, sorted on item, collects periods
297 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
300 size_t callchain_size
= 0;
301 struct hist_entry
*he
;
303 if (symbol_conf
.use_callchain
)
304 callchain_size
= sizeof(struct callchain_root
);
306 he
= zalloc(sizeof(*he
) + callchain_size
);
311 if (symbol_conf
.cumulate_callchain
) {
312 he
->stat_acc
= malloc(sizeof(he
->stat
));
313 if (he
->stat_acc
== NULL
) {
317 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
319 memset(&he
->stat
, 0, sizeof(he
->stat
));
322 map__get(he
->ms
.map
);
324 if (he
->branch_info
) {
326 * This branch info is (a part of) allocated from
327 * sample__resolve_bstack() and will be freed after
328 * adding new entries. So we need to save a copy.
330 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
331 if (he
->branch_info
== NULL
) {
332 map__zput(he
->ms
.map
);
338 memcpy(he
->branch_info
, template->branch_info
,
339 sizeof(*he
->branch_info
));
341 map__get(he
->branch_info
->from
.map
);
342 map__get(he
->branch_info
->to
.map
);
346 map__get(he
->mem_info
->iaddr
.map
);
347 map__get(he
->mem_info
->daddr
.map
);
350 if (symbol_conf
.use_callchain
)
351 callchain_init(he
->callchain
);
353 INIT_LIST_HEAD(&he
->pairs
.node
);
354 thread__get(he
->thread
);
360 static u8
symbol__parent_filter(const struct symbol
*parent
)
362 if (symbol_conf
.exclude_other
&& parent
== NULL
)
363 return 1 << HIST_FILTER__PARENT
;
367 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
368 struct hist_entry
*entry
,
369 struct addr_location
*al
,
373 struct rb_node
*parent
= NULL
;
374 struct hist_entry
*he
;
376 u64 period
= entry
->stat
.period
;
377 u64 weight
= entry
->stat
.weight
;
379 p
= &hists
->entries_in
->rb_node
;
383 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
386 * Make sure that it receives arguments in a same order as
387 * hist_entry__collapse() so that we can use an appropriate
388 * function when searching an entry regardless which sort
391 cmp
= hist_entry__cmp(he
, entry
);
395 he_stat__add_period(&he
->stat
, period
, weight
);
396 if (symbol_conf
.cumulate_callchain
)
397 he_stat__add_period(he
->stat_acc
, period
, weight
);
400 * This mem info was allocated from sample__resolve_mem
401 * and will not be used anymore.
403 zfree(&entry
->mem_info
);
405 /* If the map of an existing hist_entry has
406 * become out-of-date due to an exec() or
407 * similar, update it. Otherwise we will
408 * mis-adjust symbol addresses when computing
409 * the history counter to increment.
411 if (he
->ms
.map
!= entry
->ms
.map
) {
412 map__put(he
->ms
.map
);
413 he
->ms
.map
= map__get(entry
->ms
.map
);
424 he
= hist_entry__new(entry
, sample_self
);
430 rb_link_node(&he
->rb_node_in
, parent
, p
);
431 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
434 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
435 if (symbol_conf
.cumulate_callchain
)
436 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
440 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
441 struct addr_location
*al
,
442 struct symbol
*sym_parent
,
443 struct branch_info
*bi
,
445 u64 period
, u64 weight
, u64 transaction
,
448 struct hist_entry entry
= {
449 .thread
= al
->thread
,
450 .comm
= thread__comm(al
->thread
),
456 .cpumode
= al
->cpumode
,
464 .parent
= sym_parent
,
465 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
469 .transaction
= transaction
,
472 return hists__findnew_entry(hists
, &entry
, al
, sample_self
);
476 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
477 struct addr_location
*al __maybe_unused
)
483 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
484 struct addr_location
*al __maybe_unused
)
490 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
492 struct perf_sample
*sample
= iter
->sample
;
495 mi
= sample__resolve_mem(sample
, al
);
504 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
507 struct mem_info
*mi
= iter
->priv
;
508 struct hists
*hists
= evsel__hists(iter
->evsel
);
509 struct hist_entry
*he
;
514 cost
= iter
->sample
->weight
;
519 * must pass period=weight in order to get the correct
520 * sorting from hists__collapse_resort() which is solely
521 * based on periods. We want sorting be done on nr_events * weight
522 * and this is indirectly achieved by passing period=weight here
523 * and the he_stat__add_period() function.
525 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
526 cost
, cost
, 0, true);
535 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
536 struct addr_location
*al __maybe_unused
)
538 struct perf_evsel
*evsel
= iter
->evsel
;
539 struct hists
*hists
= evsel__hists(evsel
);
540 struct hist_entry
*he
= iter
->he
;
546 hists__inc_nr_samples(hists
, he
->filtered
);
548 err
= hist_entry__append_callchain(he
, iter
->sample
);
552 * We don't need to free iter->priv (mem_info) here since the mem info
553 * was either already freed in hists__findnew_entry() or passed to a
554 * new hist entry by hist_entry__new().
563 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
565 struct branch_info
*bi
;
566 struct perf_sample
*sample
= iter
->sample
;
568 bi
= sample__resolve_bstack(sample
, al
);
573 iter
->total
= sample
->branch_stack
->nr
;
580 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
581 struct addr_location
*al __maybe_unused
)
583 /* to avoid calling callback function */
590 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
592 struct branch_info
*bi
= iter
->priv
;
598 if (iter
->curr
>= iter
->total
)
601 al
->map
= bi
[i
].to
.map
;
602 al
->sym
= bi
[i
].to
.sym
;
603 al
->addr
= bi
[i
].to
.addr
;
608 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
610 struct branch_info
*bi
;
611 struct perf_evsel
*evsel
= iter
->evsel
;
612 struct hists
*hists
= evsel__hists(evsel
);
613 struct hist_entry
*he
= NULL
;
619 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
623 * The report shows the percentage of total branches captured
624 * and not events sampled. Thus we use a pseudo period of 1.
626 he
= __hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
627 1, bi
->flags
.cycles
? bi
->flags
.cycles
: 1,
632 hists__inc_nr_samples(hists
, he
->filtered
);
641 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
642 struct addr_location
*al __maybe_unused
)
647 return iter
->curr
>= iter
->total
? 0 : -1;
651 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
652 struct addr_location
*al __maybe_unused
)
658 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
660 struct perf_evsel
*evsel
= iter
->evsel
;
661 struct perf_sample
*sample
= iter
->sample
;
662 struct hist_entry
*he
;
664 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
665 sample
->period
, sample
->weight
,
666 sample
->transaction
, true);
675 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
676 struct addr_location
*al __maybe_unused
)
678 struct hist_entry
*he
= iter
->he
;
679 struct perf_evsel
*evsel
= iter
->evsel
;
680 struct perf_sample
*sample
= iter
->sample
;
687 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
689 return hist_entry__append_callchain(he
, sample
);
693 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter __maybe_unused
,
694 struct addr_location
*al __maybe_unused
)
696 struct hist_entry
**he_cache
;
698 callchain_cursor_commit(&callchain_cursor
);
701 * This is for detecting cycles or recursions so that they're
702 * cumulated only one time to prevent entries more than 100%
705 he_cache
= malloc(sizeof(*he_cache
) * (PERF_MAX_STACK_DEPTH
+ 1));
706 if (he_cache
== NULL
)
709 iter
->priv
= he_cache
;
716 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
717 struct addr_location
*al
)
719 struct perf_evsel
*evsel
= iter
->evsel
;
720 struct hists
*hists
= evsel__hists(evsel
);
721 struct perf_sample
*sample
= iter
->sample
;
722 struct hist_entry
**he_cache
= iter
->priv
;
723 struct hist_entry
*he
;
726 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
727 sample
->period
, sample
->weight
,
728 sample
->transaction
, true);
733 he_cache
[iter
->curr
++] = he
;
735 hist_entry__append_callchain(he
, sample
);
738 * We need to re-initialize the cursor since callchain_append()
739 * advanced the cursor to the end.
741 callchain_cursor_commit(&callchain_cursor
);
743 hists__inc_nr_samples(hists
, he
->filtered
);
749 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
750 struct addr_location
*al
)
752 struct callchain_cursor_node
*node
;
754 node
= callchain_cursor_current(&callchain_cursor
);
758 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
762 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
763 struct addr_location
*al
)
765 struct perf_evsel
*evsel
= iter
->evsel
;
766 struct perf_sample
*sample
= iter
->sample
;
767 struct hist_entry
**he_cache
= iter
->priv
;
768 struct hist_entry
*he
;
769 struct hist_entry he_tmp
= {
770 .hists
= evsel__hists(evsel
),
772 .thread
= al
->thread
,
773 .comm
= thread__comm(al
->thread
),
779 .parent
= iter
->parent
,
782 struct callchain_cursor cursor
;
784 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
786 callchain_cursor_advance(&callchain_cursor
);
789 * Check if there's duplicate entries in the callchain.
790 * It's possible that it has cycles or recursive calls.
792 for (i
= 0; i
< iter
->curr
; i
++) {
793 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
794 /* to avoid calling callback function */
800 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
801 sample
->period
, sample
->weight
,
802 sample
->transaction
, false);
807 he_cache
[iter
->curr
++] = he
;
809 if (symbol_conf
.use_callchain
)
810 callchain_append(he
->callchain
, &cursor
, sample
->period
);
815 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
816 struct addr_location
*al __maybe_unused
)
824 const struct hist_iter_ops hist_iter_mem
= {
825 .prepare_entry
= iter_prepare_mem_entry
,
826 .add_single_entry
= iter_add_single_mem_entry
,
827 .next_entry
= iter_next_nop_entry
,
828 .add_next_entry
= iter_add_next_nop_entry
,
829 .finish_entry
= iter_finish_mem_entry
,
832 const struct hist_iter_ops hist_iter_branch
= {
833 .prepare_entry
= iter_prepare_branch_entry
,
834 .add_single_entry
= iter_add_single_branch_entry
,
835 .next_entry
= iter_next_branch_entry
,
836 .add_next_entry
= iter_add_next_branch_entry
,
837 .finish_entry
= iter_finish_branch_entry
,
840 const struct hist_iter_ops hist_iter_normal
= {
841 .prepare_entry
= iter_prepare_normal_entry
,
842 .add_single_entry
= iter_add_single_normal_entry
,
843 .next_entry
= iter_next_nop_entry
,
844 .add_next_entry
= iter_add_next_nop_entry
,
845 .finish_entry
= iter_finish_normal_entry
,
848 const struct hist_iter_ops hist_iter_cumulative
= {
849 .prepare_entry
= iter_prepare_cumulative_entry
,
850 .add_single_entry
= iter_add_single_cumulative_entry
,
851 .next_entry
= iter_next_cumulative_entry
,
852 .add_next_entry
= iter_add_next_cumulative_entry
,
853 .finish_entry
= iter_finish_cumulative_entry
,
856 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
857 int max_stack_depth
, void *arg
)
861 err
= sample__resolve_callchain(iter
->sample
, &iter
->parent
,
862 iter
->evsel
, al
, max_stack_depth
);
866 err
= iter
->ops
->prepare_entry(iter
, al
);
870 err
= iter
->ops
->add_single_entry(iter
, al
);
874 if (iter
->he
&& iter
->add_entry_cb
) {
875 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
880 while (iter
->ops
->next_entry(iter
, al
)) {
881 err
= iter
->ops
->add_next_entry(iter
, al
);
885 if (iter
->he
&& iter
->add_entry_cb
) {
886 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
893 err2
= iter
->ops
->finish_entry(iter
, al
);
901 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
903 struct perf_hpp_fmt
*fmt
;
906 perf_hpp__for_each_sort_list(fmt
) {
907 if (perf_hpp__should_skip(fmt
))
910 cmp
= fmt
->cmp(fmt
, left
, right
);
919 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
921 struct perf_hpp_fmt
*fmt
;
924 perf_hpp__for_each_sort_list(fmt
) {
925 if (perf_hpp__should_skip(fmt
))
928 cmp
= fmt
->collapse(fmt
, left
, right
);
936 void hist_entry__delete(struct hist_entry
*he
)
938 thread__zput(he
->thread
);
939 map__zput(he
->ms
.map
);
941 if (he
->branch_info
) {
942 map__zput(he
->branch_info
->from
.map
);
943 map__zput(he
->branch_info
->to
.map
);
944 zfree(&he
->branch_info
);
948 map__zput(he
->mem_info
->iaddr
.map
);
949 map__zput(he
->mem_info
->daddr
.map
);
950 zfree(&he
->mem_info
);
953 zfree(&he
->stat_acc
);
954 free_srcline(he
->srcline
);
955 if (he
->srcfile
&& he
->srcfile
[0])
957 free_callchain(he
->callchain
);
962 * collapse the histogram
965 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
966 struct rb_root
*root
,
967 struct hist_entry
*he
)
969 struct rb_node
**p
= &root
->rb_node
;
970 struct rb_node
*parent
= NULL
;
971 struct hist_entry
*iter
;
976 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
978 cmp
= hist_entry__collapse(iter
, he
);
981 he_stat__add_stat(&iter
->stat
, &he
->stat
);
982 if (symbol_conf
.cumulate_callchain
)
983 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
985 if (symbol_conf
.use_callchain
) {
986 callchain_cursor_reset(&callchain_cursor
);
987 callchain_merge(&callchain_cursor
,
991 hist_entry__delete(he
);
1000 hists
->nr_entries
++;
1002 rb_link_node(&he
->rb_node_in
, parent
, p
);
1003 rb_insert_color(&he
->rb_node_in
, root
);
1007 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1009 struct rb_root
*root
;
1011 pthread_mutex_lock(&hists
->lock
);
1013 root
= hists
->entries_in
;
1014 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1015 hists
->entries_in
= &hists
->entries_in_array
[0];
1017 pthread_mutex_unlock(&hists
->lock
);
1022 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1024 hists__filter_entry_by_dso(hists
, he
);
1025 hists__filter_entry_by_thread(hists
, he
);
1026 hists__filter_entry_by_symbol(hists
, he
);
1029 void hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1031 struct rb_root
*root
;
1032 struct rb_node
*next
;
1033 struct hist_entry
*n
;
1035 if (!sort__need_collapse
)
1038 hists
->nr_entries
= 0;
1040 root
= hists__get_rotate_entries_in(hists
);
1042 next
= rb_first(root
);
1047 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1048 next
= rb_next(&n
->rb_node_in
);
1050 rb_erase(&n
->rb_node_in
, root
);
1051 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
1053 * If it wasn't combined with one of the entries already
1054 * collapsed, we need to apply the filters that may have
1055 * been set by, say, the hist_browser.
1057 hists__apply_filters(hists
, n
);
1060 ui_progress__update(prog
, 1);
1064 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1066 struct perf_hpp_fmt
*fmt
;
1069 perf_hpp__for_each_sort_list(fmt
) {
1070 if (perf_hpp__should_skip(fmt
))
1073 cmp
= fmt
->sort(fmt
, a
, b
);
1081 static void hists__reset_filter_stats(struct hists
*hists
)
1083 hists
->nr_non_filtered_entries
= 0;
1084 hists
->stats
.total_non_filtered_period
= 0;
1087 void hists__reset_stats(struct hists
*hists
)
1089 hists
->nr_entries
= 0;
1090 hists
->stats
.total_period
= 0;
1092 hists__reset_filter_stats(hists
);
1095 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1097 hists
->nr_non_filtered_entries
++;
1098 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1101 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1104 hists__inc_filter_stats(hists
, h
);
1106 hists
->nr_entries
++;
1107 hists
->stats
.total_period
+= h
->stat
.period
;
1110 static void __hists__insert_output_entry(struct rb_root
*entries
,
1111 struct hist_entry
*he
,
1112 u64 min_callchain_hits
,
1115 struct rb_node
**p
= &entries
->rb_node
;
1116 struct rb_node
*parent
= NULL
;
1117 struct hist_entry
*iter
;
1120 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1121 min_callchain_hits
, &callchain_param
);
1123 while (*p
!= NULL
) {
1125 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1127 if (hist_entry__sort(he
, iter
) > 0)
1130 p
= &(*p
)->rb_right
;
1133 rb_link_node(&he
->rb_node
, parent
, p
);
1134 rb_insert_color(&he
->rb_node
, entries
);
1137 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1139 struct rb_root
*root
;
1140 struct rb_node
*next
;
1141 struct hist_entry
*n
;
1142 u64 min_callchain_hits
;
1143 struct perf_evsel
*evsel
= hists_to_evsel(hists
);
1144 bool use_callchain
= evsel
? (evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
) : symbol_conf
.use_callchain
;
1146 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
1148 if (sort__need_collapse
)
1149 root
= &hists
->entries_collapsed
;
1151 root
= hists
->entries_in
;
1153 next
= rb_first(root
);
1154 hists
->entries
= RB_ROOT
;
1156 hists__reset_stats(hists
);
1157 hists__reset_col_len(hists
);
1160 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1161 next
= rb_next(&n
->rb_node_in
);
1163 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1164 hists__inc_stats(hists
, n
);
1167 hists__calc_col_len(hists
, n
);
1170 ui_progress__update(prog
, 1);
1174 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1175 enum hist_filter filter
)
1177 h
->filtered
&= ~(1 << filter
);
1181 /* force fold unfiltered entry for simplicity */
1182 h
->unfolded
= false;
1186 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1188 hists__inc_filter_stats(hists
, h
);
1189 hists__calc_col_len(hists
, h
);
1193 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1194 struct hist_entry
*he
)
1196 if (hists
->dso_filter
!= NULL
&&
1197 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1198 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1205 void hists__filter_by_dso(struct hists
*hists
)
1209 hists
->stats
.nr_non_filtered_samples
= 0;
1211 hists__reset_filter_stats(hists
);
1212 hists__reset_col_len(hists
);
1214 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1215 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1217 if (symbol_conf
.exclude_other
&& !h
->parent
)
1220 if (hists__filter_entry_by_dso(hists
, h
))
1223 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1227 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1228 struct hist_entry
*he
)
1230 if (hists
->thread_filter
!= NULL
&&
1231 he
->thread
!= hists
->thread_filter
) {
1232 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1239 void hists__filter_by_thread(struct hists
*hists
)
1243 hists
->stats
.nr_non_filtered_samples
= 0;
1245 hists__reset_filter_stats(hists
);
1246 hists__reset_col_len(hists
);
1248 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1249 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1251 if (hists__filter_entry_by_thread(hists
, h
))
1254 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1258 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1259 struct hist_entry
*he
)
1261 if (hists
->symbol_filter_str
!= NULL
&&
1262 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1263 hists
->symbol_filter_str
) == NULL
)) {
1264 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1271 void hists__filter_by_symbol(struct hists
*hists
)
1275 hists
->stats
.nr_non_filtered_samples
= 0;
1277 hists__reset_filter_stats(hists
);
1278 hists__reset_col_len(hists
);
1280 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1281 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1283 if (hists__filter_entry_by_symbol(hists
, h
))
1286 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
1290 void events_stats__inc(struct events_stats
*stats
, u32 type
)
1292 ++stats
->nr_events
[0];
1293 ++stats
->nr_events
[type
];
1296 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1298 events_stats__inc(&hists
->stats
, type
);
1301 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
1303 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
1305 hists
->stats
.nr_non_filtered_samples
++;
1308 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
1309 struct hist_entry
*pair
)
1311 struct rb_root
*root
;
1313 struct rb_node
*parent
= NULL
;
1314 struct hist_entry
*he
;
1317 if (sort__need_collapse
)
1318 root
= &hists
->entries_collapsed
;
1320 root
= hists
->entries_in
;
1324 while (*p
!= NULL
) {
1326 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1328 cmp
= hist_entry__collapse(he
, pair
);
1336 p
= &(*p
)->rb_right
;
1339 he
= hist_entry__new(pair
, true);
1341 memset(&he
->stat
, 0, sizeof(he
->stat
));
1343 rb_link_node(&he
->rb_node_in
, parent
, p
);
1344 rb_insert_color(&he
->rb_node_in
, root
);
1345 hists__inc_stats(hists
, he
);
1352 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
1353 struct hist_entry
*he
)
1357 if (sort__need_collapse
)
1358 n
= hists
->entries_collapsed
.rb_node
;
1360 n
= hists
->entries_in
->rb_node
;
1363 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
1364 int64_t cmp
= hist_entry__collapse(iter
, he
);
1378 * Look for pairs to link to the leader buckets (hist_entries):
1380 void hists__match(struct hists
*leader
, struct hists
*other
)
1382 struct rb_root
*root
;
1384 struct hist_entry
*pos
, *pair
;
1386 if (sort__need_collapse
)
1387 root
= &leader
->entries_collapsed
;
1389 root
= leader
->entries_in
;
1391 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1392 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1393 pair
= hists__find_entry(other
, pos
);
1396 hist_entry__add_pair(pair
, pos
);
1401 * Look for entries in the other hists that are not present in the leader, if
1402 * we find them, just add a dummy entry on the leader hists, with period=0,
1403 * nr_events=0, to serve as the list header.
1405 int hists__link(struct hists
*leader
, struct hists
*other
)
1407 struct rb_root
*root
;
1409 struct hist_entry
*pos
, *pair
;
1411 if (sort__need_collapse
)
1412 root
= &other
->entries_collapsed
;
1414 root
= other
->entries_in
;
1416 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1417 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1419 if (!hist_entry__has_pairs(pos
)) {
1420 pair
= hists__add_dummy_entry(leader
, pos
);
1423 hist_entry__add_pair(pos
, pair
);
1430 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
1431 struct perf_sample
*sample
, bool nonany_branch_mode
)
1433 struct branch_info
*bi
;
1435 /* If we have branch cycles always annotate them. */
1436 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
1439 bi
= sample__resolve_bstack(sample
, al
);
1441 struct addr_map_symbol
*prev
= NULL
;
1444 * Ignore errors, still want to process the
1447 * For non standard branch modes always
1448 * force no IPC (prev == NULL)
1450 * Note that perf stores branches reversed from
1453 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
1454 addr_map_symbol__account_cycles(&bi
[i
].from
,
1455 nonany_branch_mode
? NULL
: prev
,
1456 bi
[i
].flags
.cycles
);
1464 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
1466 struct perf_evsel
*pos
;
1469 evlist__for_each(evlist
, pos
) {
1470 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
1471 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
1478 u64
hists__total_period(struct hists
*hists
)
1480 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
1481 hists
->stats
.total_period
;
1484 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
1485 const char *arg
, int unset __maybe_unused
)
1487 if (!strcmp(arg
, "relative"))
1488 symbol_conf
.filter_relative
= true;
1489 else if (!strcmp(arg
, "absolute"))
1490 symbol_conf
.filter_relative
= false;
1497 int perf_hist_config(const char *var
, const char *value
)
1499 if (!strcmp(var
, "hist.percentage"))
1500 return parse_filter_percentage(NULL
, value
, 0);
1505 static int hists_evsel__init(struct perf_evsel
*evsel
)
1507 struct hists
*hists
= evsel__hists(evsel
);
1509 memset(hists
, 0, sizeof(*hists
));
1510 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
1511 hists
->entries_in
= &hists
->entries_in_array
[0];
1512 hists
->entries_collapsed
= RB_ROOT
;
1513 hists
->entries
= RB_ROOT
;
1514 pthread_mutex_init(&hists
->lock
, NULL
);
1519 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1520 * stored in the rbtree...
1523 int hists__init(void)
1525 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
1526 hists_evsel__init
, NULL
);
1528 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);