10 static bool hists__filter_entry_by_dso(struct hists
*hists
,
11 struct hist_entry
*he
);
12 static bool hists__filter_entry_by_thread(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
15 struct hist_entry
*he
);
17 struct callchain_param callchain_param
= {
18 .mode
= CHAIN_GRAPH_REL
,
20 .order
= ORDER_CALLEE
,
24 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
26 return hists
->col_len
[col
];
29 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
31 hists
->col_len
[col
] = len
;
34 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
36 if (len
> hists__col_len(hists
, col
)) {
37 hists__set_col_len(hists
, col
, len
);
43 void hists__reset_col_len(struct hists
*hists
)
47 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
48 hists__set_col_len(hists
, col
, 0);
51 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
53 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
55 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
56 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
57 !symbol_conf
.dso_list
)
58 hists__set_col_len(hists
, dso
, unresolved_col_width
);
61 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
63 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
68 * +4 accounts for '[x] ' priv level info
69 * +2 accounts for 0x prefix on raw addresses
70 * +3 accounts for ' y ' symtab origin info
73 symlen
= h
->ms
.sym
->namelen
+ 4;
75 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
76 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
78 symlen
= unresolved_col_width
+ 4 + 2;
79 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
80 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
83 len
= thread__comm_len(h
->thread
);
84 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
85 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
88 len
= dso__name_len(h
->ms
.map
->dso
);
89 hists__new_col_len(hists
, HISTC_DSO
, len
);
93 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
96 if (h
->branch_info
->from
.sym
) {
97 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
99 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
100 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
102 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
103 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
105 symlen
= unresolved_col_width
+ 4 + 2;
106 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
107 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
110 if (h
->branch_info
->to
.sym
) {
111 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
113 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
114 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
116 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
117 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
119 symlen
= unresolved_col_width
+ 4 + 2;
120 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
121 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
126 if (h
->mem_info
->daddr
.sym
) {
127 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
128 + unresolved_col_width
+ 2;
129 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
132 symlen
= unresolved_col_width
+ 4 + 2;
133 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
136 if (h
->mem_info
->daddr
.map
) {
137 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
138 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
141 symlen
= unresolved_col_width
+ 4 + 2;
142 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
145 symlen
= unresolved_col_width
+ 4 + 2;
146 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
147 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
150 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
151 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
152 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
153 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
154 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
155 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
158 hists__new_col_len(hists
, HISTC_TRANSACTION
,
159 hist_entry__transaction_len());
162 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
164 struct rb_node
*next
= rb_first(&hists
->entries
);
165 struct hist_entry
*n
;
168 hists__reset_col_len(hists
);
170 while (next
&& row
++ < max_rows
) {
171 n
= rb_entry(next
, struct hist_entry
, rb_node
);
173 hists__calc_col_len(hists
, n
);
174 next
= rb_next(&n
->rb_node
);
178 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
179 unsigned int cpumode
, u64 period
)
182 case PERF_RECORD_MISC_KERNEL
:
183 he_stat
->period_sys
+= period
;
185 case PERF_RECORD_MISC_USER
:
186 he_stat
->period_us
+= period
;
188 case PERF_RECORD_MISC_GUEST_KERNEL
:
189 he_stat
->period_guest_sys
+= period
;
191 case PERF_RECORD_MISC_GUEST_USER
:
192 he_stat
->period_guest_us
+= period
;
199 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
203 he_stat
->period
+= period
;
204 he_stat
->weight
+= weight
;
205 he_stat
->nr_events
+= 1;
208 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
210 dest
->period
+= src
->period
;
211 dest
->period_sys
+= src
->period_sys
;
212 dest
->period_us
+= src
->period_us
;
213 dest
->period_guest_sys
+= src
->period_guest_sys
;
214 dest
->period_guest_us
+= src
->period_guest_us
;
215 dest
->nr_events
+= src
->nr_events
;
216 dest
->weight
+= src
->weight
;
219 static void he_stat__decay(struct he_stat
*he_stat
)
221 he_stat
->period
= (he_stat
->period
* 7) / 8;
222 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
223 /* XXX need decay for weight too? */
226 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
228 u64 prev_period
= he
->stat
.period
;
231 if (prev_period
== 0)
234 he_stat__decay(&he
->stat
);
235 if (symbol_conf
.cumulate_callchain
)
236 he_stat__decay(he
->stat_acc
);
238 diff
= prev_period
- he
->stat
.period
;
240 hists
->stats
.total_period
-= diff
;
242 hists
->stats
.total_non_filtered_period
-= diff
;
244 return he
->stat
.period
== 0;
247 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
249 struct rb_node
*next
= rb_first(&hists
->entries
);
250 struct hist_entry
*n
;
253 n
= rb_entry(next
, struct hist_entry
, rb_node
);
254 next
= rb_next(&n
->rb_node
);
256 * We may be annotating this, for instance, so keep it here in
257 * case some it gets new samples, we'll eventually free it when
258 * the user stops browsing and it agains gets fully decayed.
260 if (((zap_user
&& n
->level
== '.') ||
261 (zap_kernel
&& n
->level
!= '.') ||
262 hists__decay_entry(hists
, n
)) &&
264 rb_erase(&n
->rb_node
, &hists
->entries
);
266 if (sort__need_collapse
)
267 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
271 --hists
->nr_non_filtered_entries
;
279 * histogram, sorted on item, collects periods
282 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
285 size_t callchain_size
= 0;
286 struct hist_entry
*he
;
288 if (symbol_conf
.use_callchain
|| symbol_conf
.cumulate_callchain
)
289 callchain_size
= sizeof(struct callchain_root
);
291 he
= zalloc(sizeof(*he
) + callchain_size
);
296 if (symbol_conf
.cumulate_callchain
) {
297 he
->stat_acc
= malloc(sizeof(he
->stat
));
298 if (he
->stat_acc
== NULL
) {
302 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
304 memset(&he
->stat
, 0, sizeof(he
->stat
));
308 he
->ms
.map
->referenced
= true;
310 if (he
->branch_info
) {
312 * This branch info is (a part of) allocated from
313 * sample__resolve_bstack() and will be freed after
314 * adding new entries. So we need to save a copy.
316 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
317 if (he
->branch_info
== NULL
) {
323 memcpy(he
->branch_info
, template->branch_info
,
324 sizeof(*he
->branch_info
));
326 if (he
->branch_info
->from
.map
)
327 he
->branch_info
->from
.map
->referenced
= true;
328 if (he
->branch_info
->to
.map
)
329 he
->branch_info
->to
.map
->referenced
= true;
333 if (he
->mem_info
->iaddr
.map
)
334 he
->mem_info
->iaddr
.map
->referenced
= true;
335 if (he
->mem_info
->daddr
.map
)
336 he
->mem_info
->daddr
.map
->referenced
= true;
339 if (symbol_conf
.use_callchain
)
340 callchain_init(he
->callchain
);
342 INIT_LIST_HEAD(&he
->pairs
.node
);
348 static u8
symbol__parent_filter(const struct symbol
*parent
)
350 if (symbol_conf
.exclude_other
&& parent
== NULL
)
351 return 1 << HIST_FILTER__PARENT
;
355 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
356 struct hist_entry
*entry
,
357 struct addr_location
*al
,
361 struct rb_node
*parent
= NULL
;
362 struct hist_entry
*he
;
364 u64 period
= entry
->stat
.period
;
365 u64 weight
= entry
->stat
.weight
;
367 p
= &hists
->entries_in
->rb_node
;
371 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
374 * Make sure that it receives arguments in a same order as
375 * hist_entry__collapse() so that we can use an appropriate
376 * function when searching an entry regardless which sort
379 cmp
= hist_entry__cmp(he
, entry
);
383 he_stat__add_period(&he
->stat
, period
, weight
);
384 if (symbol_conf
.cumulate_callchain
)
385 he_stat__add_period(he
->stat_acc
, period
, weight
);
388 * This mem info was allocated from sample__resolve_mem
389 * and will not be used anymore.
391 zfree(&entry
->mem_info
);
393 /* If the map of an existing hist_entry has
394 * become out-of-date due to an exec() or
395 * similar, update it. Otherwise we will
396 * mis-adjust symbol addresses when computing
397 * the history counter to increment.
399 if (he
->ms
.map
!= entry
->ms
.map
) {
400 he
->ms
.map
= entry
->ms
.map
;
402 he
->ms
.map
->referenced
= true;
413 he
= hist_entry__new(entry
, sample_self
);
417 rb_link_node(&he
->rb_node_in
, parent
, p
);
418 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
421 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
422 if (symbol_conf
.cumulate_callchain
)
423 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
427 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
428 struct addr_location
*al
,
429 struct symbol
*sym_parent
,
430 struct branch_info
*bi
,
432 u64 period
, u64 weight
, u64 transaction
,
435 struct hist_entry entry
= {
436 .thread
= al
->thread
,
437 .comm
= thread__comm(al
->thread
),
450 .parent
= sym_parent
,
451 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
455 .transaction
= transaction
,
458 return add_hist_entry(hists
, &entry
, al
, sample_self
);
462 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
463 struct addr_location
*al __maybe_unused
)
469 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
470 struct addr_location
*al __maybe_unused
)
476 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
478 struct perf_sample
*sample
= iter
->sample
;
481 mi
= sample__resolve_mem(sample
, al
);
490 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
493 struct mem_info
*mi
= iter
->priv
;
494 struct hist_entry
*he
;
499 cost
= iter
->sample
->weight
;
504 * must pass period=weight in order to get the correct
505 * sorting from hists__collapse_resort() which is solely
506 * based on periods. We want sorting be done on nr_events * weight
507 * and this is indirectly achieved by passing period=weight here
508 * and the he_stat__add_period() function.
510 he
= __hists__add_entry(&iter
->evsel
->hists
, al
, iter
->parent
, NULL
, mi
,
511 cost
, cost
, 0, true);
520 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
521 struct addr_location
*al __maybe_unused
)
523 struct perf_evsel
*evsel
= iter
->evsel
;
524 struct hist_entry
*he
= iter
->he
;
530 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
532 err
= hist_entry__append_callchain(he
, iter
->sample
);
536 * We don't need to free iter->priv (mem_info) here since
537 * the mem info was either already freed in add_hist_entry() or
538 * passed to a new hist entry by hist_entry__new().
547 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
549 struct branch_info
*bi
;
550 struct perf_sample
*sample
= iter
->sample
;
552 bi
= sample__resolve_bstack(sample
, al
);
557 iter
->total
= sample
->branch_stack
->nr
;
564 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
565 struct addr_location
*al __maybe_unused
)
567 /* to avoid calling callback function */
574 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
576 struct branch_info
*bi
= iter
->priv
;
582 if (iter
->curr
>= iter
->total
)
585 al
->map
= bi
[i
].to
.map
;
586 al
->sym
= bi
[i
].to
.sym
;
587 al
->addr
= bi
[i
].to
.addr
;
592 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
594 struct branch_info
*bi
;
595 struct perf_evsel
*evsel
= iter
->evsel
;
596 struct hist_entry
*he
= NULL
;
602 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
606 * The report shows the percentage of total branches captured
607 * and not events sampled. Thus we use a pseudo period of 1.
609 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, &bi
[i
], NULL
,
614 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
623 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
624 struct addr_location
*al __maybe_unused
)
629 return iter
->curr
>= iter
->total
? 0 : -1;
633 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
634 struct addr_location
*al __maybe_unused
)
640 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
642 struct perf_evsel
*evsel
= iter
->evsel
;
643 struct perf_sample
*sample
= iter
->sample
;
644 struct hist_entry
*he
;
646 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, NULL
, NULL
,
647 sample
->period
, sample
->weight
,
648 sample
->transaction
, true);
657 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
658 struct addr_location
*al __maybe_unused
)
660 struct hist_entry
*he
= iter
->he
;
661 struct perf_evsel
*evsel
= iter
->evsel
;
662 struct perf_sample
*sample
= iter
->sample
;
669 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
671 return hist_entry__append_callchain(he
, sample
);
675 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter __maybe_unused
,
676 struct addr_location
*al __maybe_unused
)
678 struct hist_entry
**he_cache
;
680 callchain_cursor_commit(&callchain_cursor
);
683 * This is for detecting cycles or recursions so that they're
684 * cumulated only one time to prevent entries more than 100%
687 he_cache
= malloc(sizeof(*he_cache
) * (PERF_MAX_STACK_DEPTH
+ 1));
688 if (he_cache
== NULL
)
691 iter
->priv
= he_cache
;
698 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
699 struct addr_location
*al
)
701 struct perf_evsel
*evsel
= iter
->evsel
;
702 struct perf_sample
*sample
= iter
->sample
;
703 struct hist_entry
**he_cache
= iter
->priv
;
704 struct hist_entry
*he
;
707 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, NULL
, NULL
,
708 sample
->period
, sample
->weight
,
709 sample
->transaction
, true);
714 he_cache
[iter
->curr
++] = he
;
716 callchain_append(he
->callchain
, &callchain_cursor
, sample
->period
);
719 * We need to re-initialize the cursor since callchain_append()
720 * advanced the cursor to the end.
722 callchain_cursor_commit(&callchain_cursor
);
724 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
730 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
731 struct addr_location
*al
)
733 struct callchain_cursor_node
*node
;
735 node
= callchain_cursor_current(&callchain_cursor
);
739 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
743 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
744 struct addr_location
*al
)
746 struct perf_evsel
*evsel
= iter
->evsel
;
747 struct perf_sample
*sample
= iter
->sample
;
748 struct hist_entry
**he_cache
= iter
->priv
;
749 struct hist_entry
*he
;
750 struct hist_entry he_tmp
= {
752 .thread
= al
->thread
,
753 .comm
= thread__comm(al
->thread
),
759 .parent
= iter
->parent
,
762 struct callchain_cursor cursor
;
764 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
766 callchain_cursor_advance(&callchain_cursor
);
769 * Check if there's duplicate entries in the callchain.
770 * It's possible that it has cycles or recursive calls.
772 for (i
= 0; i
< iter
->curr
; i
++) {
773 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
774 /* to avoid calling callback function */
780 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, NULL
, NULL
,
781 sample
->period
, sample
->weight
,
782 sample
->transaction
, false);
787 he_cache
[iter
->curr
++] = he
;
789 callchain_append(he
->callchain
, &cursor
, sample
->period
);
794 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
795 struct addr_location
*al __maybe_unused
)
803 const struct hist_iter_ops hist_iter_mem
= {
804 .prepare_entry
= iter_prepare_mem_entry
,
805 .add_single_entry
= iter_add_single_mem_entry
,
806 .next_entry
= iter_next_nop_entry
,
807 .add_next_entry
= iter_add_next_nop_entry
,
808 .finish_entry
= iter_finish_mem_entry
,
811 const struct hist_iter_ops hist_iter_branch
= {
812 .prepare_entry
= iter_prepare_branch_entry
,
813 .add_single_entry
= iter_add_single_branch_entry
,
814 .next_entry
= iter_next_branch_entry
,
815 .add_next_entry
= iter_add_next_branch_entry
,
816 .finish_entry
= iter_finish_branch_entry
,
819 const struct hist_iter_ops hist_iter_normal
= {
820 .prepare_entry
= iter_prepare_normal_entry
,
821 .add_single_entry
= iter_add_single_normal_entry
,
822 .next_entry
= iter_next_nop_entry
,
823 .add_next_entry
= iter_add_next_nop_entry
,
824 .finish_entry
= iter_finish_normal_entry
,
827 const struct hist_iter_ops hist_iter_cumulative
= {
828 .prepare_entry
= iter_prepare_cumulative_entry
,
829 .add_single_entry
= iter_add_single_cumulative_entry
,
830 .next_entry
= iter_next_cumulative_entry
,
831 .add_next_entry
= iter_add_next_cumulative_entry
,
832 .finish_entry
= iter_finish_cumulative_entry
,
835 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
836 struct perf_evsel
*evsel
, struct perf_sample
*sample
,
837 int max_stack_depth
, void *arg
)
841 err
= sample__resolve_callchain(sample
, &iter
->parent
, evsel
, al
,
847 iter
->sample
= sample
;
849 err
= iter
->ops
->prepare_entry(iter
, al
);
853 err
= iter
->ops
->add_single_entry(iter
, al
);
857 if (iter
->he
&& iter
->add_entry_cb
) {
858 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
863 while (iter
->ops
->next_entry(iter
, al
)) {
864 err
= iter
->ops
->add_next_entry(iter
, al
);
868 if (iter
->he
&& iter
->add_entry_cb
) {
869 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
876 err2
= iter
->ops
->finish_entry(iter
, al
);
884 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
886 struct perf_hpp_fmt
*fmt
;
889 perf_hpp__for_each_sort_list(fmt
) {
890 if (perf_hpp__should_skip(fmt
))
893 cmp
= fmt
->cmp(left
, right
);
902 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
904 struct perf_hpp_fmt
*fmt
;
907 perf_hpp__for_each_sort_list(fmt
) {
908 if (perf_hpp__should_skip(fmt
))
911 cmp
= fmt
->collapse(left
, right
);
919 void hist_entry__free(struct hist_entry
*he
)
921 zfree(&he
->branch_info
);
922 zfree(&he
->mem_info
);
923 zfree(&he
->stat_acc
);
924 free_srcline(he
->srcline
);
929 * collapse the histogram
932 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
933 struct rb_root
*root
,
934 struct hist_entry
*he
)
936 struct rb_node
**p
= &root
->rb_node
;
937 struct rb_node
*parent
= NULL
;
938 struct hist_entry
*iter
;
943 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
945 cmp
= hist_entry__collapse(iter
, he
);
948 he_stat__add_stat(&iter
->stat
, &he
->stat
);
949 if (symbol_conf
.cumulate_callchain
)
950 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
952 if (symbol_conf
.use_callchain
) {
953 callchain_cursor_reset(&callchain_cursor
);
954 callchain_merge(&callchain_cursor
,
958 hist_entry__free(he
);
968 rb_link_node(&he
->rb_node_in
, parent
, p
);
969 rb_insert_color(&he
->rb_node_in
, root
);
973 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
975 struct rb_root
*root
;
977 pthread_mutex_lock(&hists
->lock
);
979 root
= hists
->entries_in
;
980 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
981 hists
->entries_in
= &hists
->entries_in_array
[0];
983 pthread_mutex_unlock(&hists
->lock
);
988 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
990 hists__filter_entry_by_dso(hists
, he
);
991 hists__filter_entry_by_thread(hists
, he
);
992 hists__filter_entry_by_symbol(hists
, he
);
995 void hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
997 struct rb_root
*root
;
998 struct rb_node
*next
;
999 struct hist_entry
*n
;
1001 if (!sort__need_collapse
)
1004 root
= hists__get_rotate_entries_in(hists
);
1005 next
= rb_first(root
);
1010 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1011 next
= rb_next(&n
->rb_node_in
);
1013 rb_erase(&n
->rb_node_in
, root
);
1014 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
1016 * If it wasn't combined with one of the entries already
1017 * collapsed, we need to apply the filters that may have
1018 * been set by, say, the hist_browser.
1020 hists__apply_filters(hists
, n
);
1023 ui_progress__update(prog
, 1);
1027 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1029 struct perf_hpp_fmt
*fmt
;
1032 perf_hpp__for_each_sort_list(fmt
) {
1033 if (perf_hpp__should_skip(fmt
))
1036 cmp
= fmt
->sort(a
, b
);
1044 static void hists__reset_filter_stats(struct hists
*hists
)
1046 hists
->nr_non_filtered_entries
= 0;
1047 hists
->stats
.total_non_filtered_period
= 0;
1050 void hists__reset_stats(struct hists
*hists
)
1052 hists
->nr_entries
= 0;
1053 hists
->stats
.total_period
= 0;
1055 hists__reset_filter_stats(hists
);
1058 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1060 hists
->nr_non_filtered_entries
++;
1061 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1064 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1067 hists__inc_filter_stats(hists
, h
);
1069 hists
->nr_entries
++;
1070 hists
->stats
.total_period
+= h
->stat
.period
;
1073 static void __hists__insert_output_entry(struct rb_root
*entries
,
1074 struct hist_entry
*he
,
1075 u64 min_callchain_hits
)
1077 struct rb_node
**p
= &entries
->rb_node
;
1078 struct rb_node
*parent
= NULL
;
1079 struct hist_entry
*iter
;
1081 if (symbol_conf
.use_callchain
)
1082 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1083 min_callchain_hits
, &callchain_param
);
1085 while (*p
!= NULL
) {
1087 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1089 if (hist_entry__sort(he
, iter
) > 0)
1092 p
= &(*p
)->rb_right
;
1095 rb_link_node(&he
->rb_node
, parent
, p
);
1096 rb_insert_color(&he
->rb_node
, entries
);
1099 void hists__output_resort(struct hists
*hists
)
1101 struct rb_root
*root
;
1102 struct rb_node
*next
;
1103 struct hist_entry
*n
;
1104 u64 min_callchain_hits
;
1106 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
1108 if (sort__need_collapse
)
1109 root
= &hists
->entries_collapsed
;
1111 root
= hists
->entries_in
;
1113 next
= rb_first(root
);
1114 hists
->entries
= RB_ROOT
;
1116 hists__reset_stats(hists
);
1117 hists__reset_col_len(hists
);
1120 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1121 next
= rb_next(&n
->rb_node_in
);
1123 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
1124 hists__inc_stats(hists
, n
);
1127 hists__calc_col_len(hists
, n
);
1131 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1132 enum hist_filter filter
)
1134 h
->filtered
&= ~(1 << filter
);
1138 /* force fold unfiltered entry for simplicity */
1139 h
->ms
.unfolded
= false;
1142 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1144 hists__inc_filter_stats(hists
, h
);
1145 hists__calc_col_len(hists
, h
);
1149 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1150 struct hist_entry
*he
)
1152 if (hists
->dso_filter
!= NULL
&&
1153 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1154 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1161 void hists__filter_by_dso(struct hists
*hists
)
1165 hists
->stats
.nr_non_filtered_samples
= 0;
1167 hists__reset_filter_stats(hists
);
1168 hists__reset_col_len(hists
);
1170 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1171 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1173 if (symbol_conf
.exclude_other
&& !h
->parent
)
1176 if (hists__filter_entry_by_dso(hists
, h
))
1179 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1183 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1184 struct hist_entry
*he
)
1186 if (hists
->thread_filter
!= NULL
&&
1187 he
->thread
!= hists
->thread_filter
) {
1188 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1195 void hists__filter_by_thread(struct hists
*hists
)
1199 hists
->stats
.nr_non_filtered_samples
= 0;
1201 hists__reset_filter_stats(hists
);
1202 hists__reset_col_len(hists
);
1204 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1205 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1207 if (hists__filter_entry_by_thread(hists
, h
))
1210 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1214 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1215 struct hist_entry
*he
)
1217 if (hists
->symbol_filter_str
!= NULL
&&
1218 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1219 hists
->symbol_filter_str
) == NULL
)) {
1220 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1227 void hists__filter_by_symbol(struct hists
*hists
)
1231 hists
->stats
.nr_non_filtered_samples
= 0;
1233 hists__reset_filter_stats(hists
);
1234 hists__reset_col_len(hists
);
1236 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1237 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1239 if (hists__filter_entry_by_symbol(hists
, h
))
1242 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
1246 void events_stats__inc(struct events_stats
*stats
, u32 type
)
1248 ++stats
->nr_events
[0];
1249 ++stats
->nr_events
[type
];
1252 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1254 events_stats__inc(&hists
->stats
, type
);
1257 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
1259 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
1261 hists
->stats
.nr_non_filtered_samples
++;
1264 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
1265 struct hist_entry
*pair
)
1267 struct rb_root
*root
;
1269 struct rb_node
*parent
= NULL
;
1270 struct hist_entry
*he
;
1273 if (sort__need_collapse
)
1274 root
= &hists
->entries_collapsed
;
1276 root
= hists
->entries_in
;
1280 while (*p
!= NULL
) {
1282 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1284 cmp
= hist_entry__collapse(he
, pair
);
1292 p
= &(*p
)->rb_right
;
1295 he
= hist_entry__new(pair
, true);
1297 memset(&he
->stat
, 0, sizeof(he
->stat
));
1299 rb_link_node(&he
->rb_node_in
, parent
, p
);
1300 rb_insert_color(&he
->rb_node_in
, root
);
1301 hists__inc_stats(hists
, he
);
1308 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
1309 struct hist_entry
*he
)
1313 if (sort__need_collapse
)
1314 n
= hists
->entries_collapsed
.rb_node
;
1316 n
= hists
->entries_in
->rb_node
;
1319 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
1320 int64_t cmp
= hist_entry__collapse(iter
, he
);
1334 * Look for pairs to link to the leader buckets (hist_entries):
1336 void hists__match(struct hists
*leader
, struct hists
*other
)
1338 struct rb_root
*root
;
1340 struct hist_entry
*pos
, *pair
;
1342 if (sort__need_collapse
)
1343 root
= &leader
->entries_collapsed
;
1345 root
= leader
->entries_in
;
1347 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1348 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1349 pair
= hists__find_entry(other
, pos
);
1352 hist_entry__add_pair(pair
, pos
);
1357 * Look for entries in the other hists that are not present in the leader, if
1358 * we find them, just add a dummy entry on the leader hists, with period=0,
1359 * nr_events=0, to serve as the list header.
1361 int hists__link(struct hists
*leader
, struct hists
*other
)
1363 struct rb_root
*root
;
1365 struct hist_entry
*pos
, *pair
;
1367 if (sort__need_collapse
)
1368 root
= &other
->entries_collapsed
;
1370 root
= other
->entries_in
;
1372 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1373 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1375 if (!hist_entry__has_pairs(pos
)) {
1376 pair
= hists__add_dummy_entry(leader
, pos
);
1379 hist_entry__add_pair(pos
, pair
);
1386 u64
hists__total_period(struct hists
*hists
)
1388 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
1389 hists
->stats
.total_period
;
1392 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
1393 const char *arg
, int unset __maybe_unused
)
1395 if (!strcmp(arg
, "relative"))
1396 symbol_conf
.filter_relative
= true;
1397 else if (!strcmp(arg
, "absolute"))
1398 symbol_conf
.filter_relative
= false;
1405 int perf_hist_config(const char *var
, const char *value
)
1407 if (!strcmp(var
, "hist.percentage"))
1408 return parse_filter_percentage(NULL
, value
, 0);