9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_thread(struct hists
*hists
,
15 struct hist_entry
*he
);
16 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
17 struct hist_entry
*he
);
18 static bool hists__filter_entry_by_socket(struct hists
*hists
,
19 struct hist_entry
*he
);
21 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
23 return hists
->col_len
[col
];
26 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
28 hists
->col_len
[col
] = len
;
31 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
33 if (len
> hists__col_len(hists
, col
)) {
34 hists__set_col_len(hists
, col
, len
);
40 void hists__reset_col_len(struct hists
*hists
)
44 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
45 hists__set_col_len(hists
, col
, 0);
48 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
50 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
52 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
53 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
54 !symbol_conf
.dso_list
)
55 hists__set_col_len(hists
, dso
, unresolved_col_width
);
58 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
60 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
70 symlen
= h
->ms
.sym
->namelen
+ 4;
72 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
73 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
75 symlen
= unresolved_col_width
+ 4 + 2;
76 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
77 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
80 len
= thread__comm_len(h
->thread
);
81 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
82 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
85 len
= dso__name_len(h
->ms
.map
->dso
);
86 hists__new_col_len(hists
, HISTC_DSO
, len
);
90 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
93 if (h
->branch_info
->from
.sym
) {
94 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
96 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
97 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
99 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
100 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
102 symlen
= unresolved_col_width
+ 4 + 2;
103 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
104 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
107 if (h
->branch_info
->to
.sym
) {
108 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
110 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
111 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
113 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
114 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
116 symlen
= unresolved_col_width
+ 4 + 2;
117 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
118 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
123 if (h
->mem_info
->daddr
.sym
) {
124 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
125 + unresolved_col_width
+ 2;
126 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
128 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
131 symlen
= unresolved_col_width
+ 4 + 2;
132 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
134 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
138 if (h
->mem_info
->iaddr
.sym
) {
139 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
140 + unresolved_col_width
+ 2;
141 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
144 symlen
= unresolved_col_width
+ 4 + 2;
145 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
149 if (h
->mem_info
->daddr
.map
) {
150 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
151 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
154 symlen
= unresolved_col_width
+ 4 + 2;
155 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
158 symlen
= unresolved_col_width
+ 4 + 2;
159 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
160 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
161 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
164 hists__new_col_len(hists
, HISTC_CPU
, 3);
165 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
166 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
167 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
168 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
169 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
170 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
171 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
174 hists__new_col_len(hists
, HISTC_SRCLINE
, strlen(h
->srcline
));
177 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
180 hists__new_col_len(hists
, HISTC_TRANSACTION
,
181 hist_entry__transaction_len());
184 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
187 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
189 struct rb_node
*next
= rb_first(&hists
->entries
);
190 struct hist_entry
*n
;
193 hists__reset_col_len(hists
);
195 while (next
&& row
++ < max_rows
) {
196 n
= rb_entry(next
, struct hist_entry
, rb_node
);
198 hists__calc_col_len(hists
, n
);
199 next
= rb_next(&n
->rb_node
);
203 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
204 unsigned int cpumode
, u64 period
)
207 case PERF_RECORD_MISC_KERNEL
:
208 he_stat
->period_sys
+= period
;
210 case PERF_RECORD_MISC_USER
:
211 he_stat
->period_us
+= period
;
213 case PERF_RECORD_MISC_GUEST_KERNEL
:
214 he_stat
->period_guest_sys
+= period
;
216 case PERF_RECORD_MISC_GUEST_USER
:
217 he_stat
->period_guest_us
+= period
;
224 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
228 he_stat
->period
+= period
;
229 he_stat
->weight
+= weight
;
230 he_stat
->nr_events
+= 1;
233 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
235 dest
->period
+= src
->period
;
236 dest
->period_sys
+= src
->period_sys
;
237 dest
->period_us
+= src
->period_us
;
238 dest
->period_guest_sys
+= src
->period_guest_sys
;
239 dest
->period_guest_us
+= src
->period_guest_us
;
240 dest
->nr_events
+= src
->nr_events
;
241 dest
->weight
+= src
->weight
;
244 static void he_stat__decay(struct he_stat
*he_stat
)
246 he_stat
->period
= (he_stat
->period
* 7) / 8;
247 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
248 /* XXX need decay for weight too? */
251 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
253 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
255 u64 prev_period
= he
->stat
.period
;
258 if (prev_period
== 0)
261 he_stat__decay(&he
->stat
);
262 if (symbol_conf
.cumulate_callchain
)
263 he_stat__decay(he
->stat_acc
);
264 decay_callchain(he
->callchain
);
266 diff
= prev_period
- he
->stat
.period
;
269 hists
->stats
.total_period
-= diff
;
271 hists
->stats
.total_non_filtered_period
-= diff
;
275 struct hist_entry
*child
;
276 struct rb_node
*node
= rb_first(&he
->hroot_out
);
278 child
= rb_entry(node
, struct hist_entry
, rb_node
);
279 node
= rb_next(node
);
281 if (hists__decay_entry(hists
, child
))
282 hists__delete_entry(hists
, child
);
286 return he
->stat
.period
== 0;
289 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
291 struct rb_root
*root_in
;
292 struct rb_root
*root_out
;
295 root_in
= &he
->parent_he
->hroot_in
;
296 root_out
= &he
->parent_he
->hroot_out
;
298 if (sort__need_collapse
)
299 root_in
= &hists
->entries_collapsed
;
301 root_in
= hists
->entries_in
;
302 root_out
= &hists
->entries
;
305 rb_erase(&he
->rb_node_in
, root_in
);
306 rb_erase(&he
->rb_node
, root_out
);
310 --hists
->nr_non_filtered_entries
;
312 hist_entry__delete(he
);
315 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
317 struct rb_node
*next
= rb_first(&hists
->entries
);
318 struct hist_entry
*n
;
321 n
= rb_entry(next
, struct hist_entry
, rb_node
);
322 next
= rb_next(&n
->rb_node
);
323 if (((zap_user
&& n
->level
== '.') ||
324 (zap_kernel
&& n
->level
!= '.') ||
325 hists__decay_entry(hists
, n
))) {
326 hists__delete_entry(hists
, n
);
331 void hists__delete_entries(struct hists
*hists
)
333 struct rb_node
*next
= rb_first(&hists
->entries
);
334 struct hist_entry
*n
;
337 n
= rb_entry(next
, struct hist_entry
, rb_node
);
338 next
= rb_next(&n
->rb_node
);
340 hists__delete_entry(hists
, n
);
345 * histogram, sorted on item, collects periods
348 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
351 size_t callchain_size
= 0;
352 struct hist_entry
*he
;
354 if (symbol_conf
.use_callchain
)
355 callchain_size
= sizeof(struct callchain_root
);
357 he
= zalloc(sizeof(*he
) + callchain_size
);
362 if (symbol_conf
.cumulate_callchain
) {
363 he
->stat_acc
= malloc(sizeof(he
->stat
));
364 if (he
->stat_acc
== NULL
) {
368 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
370 memset(&he
->stat
, 0, sizeof(he
->stat
));
373 map__get(he
->ms
.map
);
375 if (he
->branch_info
) {
377 * This branch info is (a part of) allocated from
378 * sample__resolve_bstack() and will be freed after
379 * adding new entries. So we need to save a copy.
381 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
382 if (he
->branch_info
== NULL
) {
383 map__zput(he
->ms
.map
);
389 memcpy(he
->branch_info
, template->branch_info
,
390 sizeof(*he
->branch_info
));
392 map__get(he
->branch_info
->from
.map
);
393 map__get(he
->branch_info
->to
.map
);
397 map__get(he
->mem_info
->iaddr
.map
);
398 map__get(he
->mem_info
->daddr
.map
);
401 if (symbol_conf
.use_callchain
)
402 callchain_init(he
->callchain
);
405 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
407 if (he
->raw_data
== NULL
) {
408 map__put(he
->ms
.map
);
409 if (he
->branch_info
) {
410 map__put(he
->branch_info
->from
.map
);
411 map__put(he
->branch_info
->to
.map
);
412 free(he
->branch_info
);
415 map__put(he
->mem_info
->iaddr
.map
);
416 map__put(he
->mem_info
->daddr
.map
);
423 INIT_LIST_HEAD(&he
->pairs
.node
);
424 thread__get(he
->thread
);
426 if (!symbol_conf
.report_hierarchy
)
433 static u8
symbol__parent_filter(const struct symbol
*parent
)
435 if (symbol_conf
.exclude_other
&& parent
== NULL
)
436 return 1 << HIST_FILTER__PARENT
;
440 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
442 if (!symbol_conf
.use_callchain
)
445 he
->hists
->callchain_period
+= period
;
447 he
->hists
->callchain_non_filtered_period
+= period
;
450 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
451 struct hist_entry
*entry
,
452 struct addr_location
*al
,
456 struct rb_node
*parent
= NULL
;
457 struct hist_entry
*he
;
459 u64 period
= entry
->stat
.period
;
460 u64 weight
= entry
->stat
.weight
;
462 p
= &hists
->entries_in
->rb_node
;
466 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
469 * Make sure that it receives arguments in a same order as
470 * hist_entry__collapse() so that we can use an appropriate
471 * function when searching an entry regardless which sort
474 cmp
= hist_entry__cmp(he
, entry
);
478 he_stat__add_period(&he
->stat
, period
, weight
);
479 hist_entry__add_callchain_period(he
, period
);
481 if (symbol_conf
.cumulate_callchain
)
482 he_stat__add_period(he
->stat_acc
, period
, weight
);
485 * This mem info was allocated from sample__resolve_mem
486 * and will not be used anymore.
488 zfree(&entry
->mem_info
);
490 /* If the map of an existing hist_entry has
491 * become out-of-date due to an exec() or
492 * similar, update it. Otherwise we will
493 * mis-adjust symbol addresses when computing
494 * the history counter to increment.
496 if (he
->ms
.map
!= entry
->ms
.map
) {
497 map__put(he
->ms
.map
);
498 he
->ms
.map
= map__get(entry
->ms
.map
);
509 he
= hist_entry__new(entry
, sample_self
);
514 hist_entry__add_callchain_period(he
, period
);
517 rb_link_node(&he
->rb_node_in
, parent
, p
);
518 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
521 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
522 if (symbol_conf
.cumulate_callchain
)
523 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
527 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
528 struct addr_location
*al
,
529 struct symbol
*sym_parent
,
530 struct branch_info
*bi
,
532 struct perf_sample
*sample
,
535 struct hist_entry entry
= {
536 .thread
= al
->thread
,
537 .comm
= thread__comm(al
->thread
),
542 .socket
= al
->socket
,
544 .cpumode
= al
->cpumode
,
549 .period
= sample
->period
,
550 .weight
= sample
->weight
,
552 .parent
= sym_parent
,
553 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
557 .transaction
= sample
->transaction
,
558 .raw_data
= sample
->raw_data
,
559 .raw_size
= sample
->raw_size
,
562 return hists__findnew_entry(hists
, &entry
, al
, sample_self
);
566 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
567 struct addr_location
*al __maybe_unused
)
573 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
574 struct addr_location
*al __maybe_unused
)
580 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
582 struct perf_sample
*sample
= iter
->sample
;
585 mi
= sample__resolve_mem(sample
, al
);
594 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
597 struct mem_info
*mi
= iter
->priv
;
598 struct hists
*hists
= evsel__hists(iter
->evsel
);
599 struct perf_sample
*sample
= iter
->sample
;
600 struct hist_entry
*he
;
605 cost
= sample
->weight
;
610 * must pass period=weight in order to get the correct
611 * sorting from hists__collapse_resort() which is solely
612 * based on periods. We want sorting be done on nr_events * weight
613 * and this is indirectly achieved by passing period=weight here
614 * and the he_stat__add_period() function.
616 sample
->period
= cost
;
618 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
628 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
629 struct addr_location
*al __maybe_unused
)
631 struct perf_evsel
*evsel
= iter
->evsel
;
632 struct hists
*hists
= evsel__hists(evsel
);
633 struct hist_entry
*he
= iter
->he
;
639 hists__inc_nr_samples(hists
, he
->filtered
);
641 err
= hist_entry__append_callchain(he
, iter
->sample
);
645 * We don't need to free iter->priv (mem_info) here since the mem info
646 * was either already freed in hists__findnew_entry() or passed to a
647 * new hist entry by hist_entry__new().
656 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
658 struct branch_info
*bi
;
659 struct perf_sample
*sample
= iter
->sample
;
661 bi
= sample__resolve_bstack(sample
, al
);
666 iter
->total
= sample
->branch_stack
->nr
;
673 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
674 struct addr_location
*al __maybe_unused
)
676 /* to avoid calling callback function */
683 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
685 struct branch_info
*bi
= iter
->priv
;
691 if (iter
->curr
>= iter
->total
)
694 al
->map
= bi
[i
].to
.map
;
695 al
->sym
= bi
[i
].to
.sym
;
696 al
->addr
= bi
[i
].to
.addr
;
701 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
703 struct branch_info
*bi
;
704 struct perf_evsel
*evsel
= iter
->evsel
;
705 struct hists
*hists
= evsel__hists(evsel
);
706 struct perf_sample
*sample
= iter
->sample
;
707 struct hist_entry
*he
= NULL
;
713 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
717 * The report shows the percentage of total branches captured
718 * and not events sampled. Thus we use a pseudo period of 1.
721 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
723 he
= __hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
728 hists__inc_nr_samples(hists
, he
->filtered
);
737 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
738 struct addr_location
*al __maybe_unused
)
743 return iter
->curr
>= iter
->total
? 0 : -1;
747 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
748 struct addr_location
*al __maybe_unused
)
754 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
756 struct perf_evsel
*evsel
= iter
->evsel
;
757 struct perf_sample
*sample
= iter
->sample
;
758 struct hist_entry
*he
;
760 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
770 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
771 struct addr_location
*al __maybe_unused
)
773 struct hist_entry
*he
= iter
->he
;
774 struct perf_evsel
*evsel
= iter
->evsel
;
775 struct perf_sample
*sample
= iter
->sample
;
782 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
784 return hist_entry__append_callchain(he
, sample
);
788 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
789 struct addr_location
*al __maybe_unused
)
791 struct hist_entry
**he_cache
;
793 callchain_cursor_commit(&callchain_cursor
);
796 * This is for detecting cycles or recursions so that they're
797 * cumulated only one time to prevent entries more than 100%
800 he_cache
= malloc(sizeof(*he_cache
) * (iter
->max_stack
+ 1));
801 if (he_cache
== NULL
)
804 iter
->priv
= he_cache
;
811 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
812 struct addr_location
*al
)
814 struct perf_evsel
*evsel
= iter
->evsel
;
815 struct hists
*hists
= evsel__hists(evsel
);
816 struct perf_sample
*sample
= iter
->sample
;
817 struct hist_entry
**he_cache
= iter
->priv
;
818 struct hist_entry
*he
;
821 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
827 he_cache
[iter
->curr
++] = he
;
829 hist_entry__append_callchain(he
, sample
);
832 * We need to re-initialize the cursor since callchain_append()
833 * advanced the cursor to the end.
835 callchain_cursor_commit(&callchain_cursor
);
837 hists__inc_nr_samples(hists
, he
->filtered
);
843 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
844 struct addr_location
*al
)
846 struct callchain_cursor_node
*node
;
848 node
= callchain_cursor_current(&callchain_cursor
);
852 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
856 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
857 struct addr_location
*al
)
859 struct perf_evsel
*evsel
= iter
->evsel
;
860 struct perf_sample
*sample
= iter
->sample
;
861 struct hist_entry
**he_cache
= iter
->priv
;
862 struct hist_entry
*he
;
863 struct hist_entry he_tmp
= {
864 .hists
= evsel__hists(evsel
),
866 .thread
= al
->thread
,
867 .comm
= thread__comm(al
->thread
),
873 .parent
= iter
->parent
,
874 .raw_data
= sample
->raw_data
,
875 .raw_size
= sample
->raw_size
,
878 struct callchain_cursor cursor
;
880 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
882 callchain_cursor_advance(&callchain_cursor
);
885 * Check if there's duplicate entries in the callchain.
886 * It's possible that it has cycles or recursive calls.
888 for (i
= 0; i
< iter
->curr
; i
++) {
889 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
890 /* to avoid calling callback function */
896 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
902 he_cache
[iter
->curr
++] = he
;
904 if (symbol_conf
.use_callchain
)
905 callchain_append(he
->callchain
, &cursor
, sample
->period
);
910 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
911 struct addr_location
*al __maybe_unused
)
919 const struct hist_iter_ops hist_iter_mem
= {
920 .prepare_entry
= iter_prepare_mem_entry
,
921 .add_single_entry
= iter_add_single_mem_entry
,
922 .next_entry
= iter_next_nop_entry
,
923 .add_next_entry
= iter_add_next_nop_entry
,
924 .finish_entry
= iter_finish_mem_entry
,
927 const struct hist_iter_ops hist_iter_branch
= {
928 .prepare_entry
= iter_prepare_branch_entry
,
929 .add_single_entry
= iter_add_single_branch_entry
,
930 .next_entry
= iter_next_branch_entry
,
931 .add_next_entry
= iter_add_next_branch_entry
,
932 .finish_entry
= iter_finish_branch_entry
,
935 const struct hist_iter_ops hist_iter_normal
= {
936 .prepare_entry
= iter_prepare_normal_entry
,
937 .add_single_entry
= iter_add_single_normal_entry
,
938 .next_entry
= iter_next_nop_entry
,
939 .add_next_entry
= iter_add_next_nop_entry
,
940 .finish_entry
= iter_finish_normal_entry
,
943 const struct hist_iter_ops hist_iter_cumulative
= {
944 .prepare_entry
= iter_prepare_cumulative_entry
,
945 .add_single_entry
= iter_add_single_cumulative_entry
,
946 .next_entry
= iter_next_cumulative_entry
,
947 .add_next_entry
= iter_add_next_cumulative_entry
,
948 .finish_entry
= iter_finish_cumulative_entry
,
951 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
952 int max_stack_depth
, void *arg
)
956 err
= sample__resolve_callchain(iter
->sample
, &iter
->parent
,
957 iter
->evsel
, al
, max_stack_depth
);
961 iter
->max_stack
= max_stack_depth
;
963 err
= iter
->ops
->prepare_entry(iter
, al
);
967 err
= iter
->ops
->add_single_entry(iter
, al
);
971 if (iter
->he
&& iter
->add_entry_cb
) {
972 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
977 while (iter
->ops
->next_entry(iter
, al
)) {
978 err
= iter
->ops
->add_next_entry(iter
, al
);
982 if (iter
->he
&& iter
->add_entry_cb
) {
983 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
990 err2
= iter
->ops
->finish_entry(iter
, al
);
998 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1000 struct hists
*hists
= left
->hists
;
1001 struct perf_hpp_fmt
*fmt
;
1004 hists__for_each_sort_list(hists
, fmt
) {
1005 if (perf_hpp__is_dynamic_entry(fmt
) &&
1006 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1009 cmp
= fmt
->cmp(fmt
, left
, right
);
1018 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1020 struct hists
*hists
= left
->hists
;
1021 struct perf_hpp_fmt
*fmt
;
1024 hists__for_each_sort_list(hists
, fmt
) {
1025 if (perf_hpp__is_dynamic_entry(fmt
) &&
1026 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1029 cmp
= fmt
->collapse(fmt
, left
, right
);
1037 void hist_entry__delete(struct hist_entry
*he
)
1039 thread__zput(he
->thread
);
1040 map__zput(he
->ms
.map
);
1042 if (he
->branch_info
) {
1043 map__zput(he
->branch_info
->from
.map
);
1044 map__zput(he
->branch_info
->to
.map
);
1045 zfree(&he
->branch_info
);
1049 map__zput(he
->mem_info
->iaddr
.map
);
1050 map__zput(he
->mem_info
->daddr
.map
);
1051 zfree(&he
->mem_info
);
1054 zfree(&he
->stat_acc
);
1055 free_srcline(he
->srcline
);
1056 if (he
->srcfile
&& he
->srcfile
[0])
1058 free_callchain(he
->callchain
);
1059 free(he
->trace_output
);
1065 * If this is not the last column, then we need to pad it according to the
1066 * pre-calculated max lenght for this column, otherwise don't bother adding
1067 * spaces because that would break viewing this with, for instance, 'less',
1068 * that would show tons of trailing spaces when a long C++ demangled method
1071 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1072 struct perf_hpp_fmt
*fmt
, int printed
)
1074 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1075 const int width
= fmt
->width(fmt
, hpp
, hists_to_evsel(he
->hists
));
1076 if (printed
< width
) {
1077 advance_hpp(hpp
, printed
);
1078 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1086 * collapse the histogram
1089 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1091 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1092 struct rb_root
*root
,
1093 struct hist_entry
*he
,
1094 struct perf_hpp_list
*hpp_list
)
1096 struct rb_node
**p
= &root
->rb_node
;
1097 struct rb_node
*parent
= NULL
;
1098 struct hist_entry
*iter
, *new;
1099 struct perf_hpp_fmt
*fmt
;
1102 while (*p
!= NULL
) {
1104 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1107 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1108 cmp
= fmt
->collapse(fmt
, iter
, he
);
1114 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1119 p
= &parent
->rb_left
;
1121 p
= &parent
->rb_right
;
1124 new = hist_entry__new(he
, true);
1128 hists__apply_filters(hists
, new);
1129 hists
->nr_entries
++;
1131 /* save related format list for output */
1132 new->hpp_list
= hpp_list
;
1134 /* some fields are now passed to 'new' */
1135 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1136 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1137 he
->trace_output
= NULL
;
1139 new->trace_output
= NULL
;
1141 if (perf_hpp__is_srcline_entry(fmt
))
1144 new->srcline
= NULL
;
1146 if (perf_hpp__is_srcfile_entry(fmt
))
1149 new->srcfile
= NULL
;
1152 rb_link_node(&new->rb_node_in
, parent
, p
);
1153 rb_insert_color(&new->rb_node_in
, root
);
1157 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1158 struct rb_root
*root
,
1159 struct hist_entry
*he
)
1161 struct perf_hpp_list_node
*node
;
1162 struct hist_entry
*new_he
= NULL
;
1163 struct hist_entry
*parent
= NULL
;
1167 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1168 /* skip period (overhead) and elided columns */
1169 if (node
->level
== 0 || node
->skip
)
1172 /* insert copy of 'he' for each fmt into the hierarchy */
1173 new_he
= hierarchy_insert_entry(hists
, root
, he
, &node
->hpp
);
1174 if (new_he
== NULL
) {
1179 root
= &new_he
->hroot_in
;
1180 new_he
->parent_he
= parent
;
1181 new_he
->depth
= depth
++;
1186 new_he
->leaf
= true;
1188 if (symbol_conf
.use_callchain
) {
1189 callchain_cursor_reset(&callchain_cursor
);
1190 if (callchain_merge(&callchain_cursor
,
1197 /* 'he' is no longer used */
1198 hist_entry__delete(he
);
1200 /* return 0 (or -1) since it already applied filters */
1204 int hists__collapse_insert_entry(struct hists
*hists
, struct rb_root
*root
,
1205 struct hist_entry
*he
)
1207 struct rb_node
**p
= &root
->rb_node
;
1208 struct rb_node
*parent
= NULL
;
1209 struct hist_entry
*iter
;
1212 if (symbol_conf
.report_hierarchy
)
1213 return hists__hierarchy_insert_entry(hists
, root
, he
);
1215 while (*p
!= NULL
) {
1217 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1219 cmp
= hist_entry__collapse(iter
, he
);
1224 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1225 if (symbol_conf
.cumulate_callchain
)
1226 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1228 if (symbol_conf
.use_callchain
) {
1229 callchain_cursor_reset(&callchain_cursor
);
1230 if (callchain_merge(&callchain_cursor
,
1235 hist_entry__delete(he
);
1242 p
= &(*p
)->rb_right
;
1244 hists
->nr_entries
++;
1246 rb_link_node(&he
->rb_node_in
, parent
, p
);
1247 rb_insert_color(&he
->rb_node_in
, root
);
1251 struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1253 struct rb_root
*root
;
1255 pthread_mutex_lock(&hists
->lock
);
1257 root
= hists
->entries_in
;
1258 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1259 hists
->entries_in
= &hists
->entries_in_array
[0];
1261 pthread_mutex_unlock(&hists
->lock
);
1266 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1268 hists__filter_entry_by_dso(hists
, he
);
1269 hists__filter_entry_by_thread(hists
, he
);
1270 hists__filter_entry_by_symbol(hists
, he
);
1271 hists__filter_entry_by_socket(hists
, he
);
1274 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1276 struct rb_root
*root
;
1277 struct rb_node
*next
;
1278 struct hist_entry
*n
;
1281 if (!sort__need_collapse
)
1284 hists
->nr_entries
= 0;
1286 root
= hists__get_rotate_entries_in(hists
);
1288 next
= rb_first(root
);
1293 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1294 next
= rb_next(&n
->rb_node_in
);
1296 rb_erase(&n
->rb_node_in
, root
);
1297 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1303 * If it wasn't combined with one of the entries already
1304 * collapsed, we need to apply the filters that may have
1305 * been set by, say, the hist_browser.
1307 hists__apply_filters(hists
, n
);
1310 ui_progress__update(prog
, 1);
1315 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1317 struct hists
*hists
= a
->hists
;
1318 struct perf_hpp_fmt
*fmt
;
1321 hists__for_each_sort_list(hists
, fmt
) {
1322 if (perf_hpp__should_skip(fmt
, a
->hists
))
1325 cmp
= fmt
->sort(fmt
, a
, b
);
1333 static void hists__reset_filter_stats(struct hists
*hists
)
1335 hists
->nr_non_filtered_entries
= 0;
1336 hists
->stats
.total_non_filtered_period
= 0;
1339 void hists__reset_stats(struct hists
*hists
)
1341 hists
->nr_entries
= 0;
1342 hists
->stats
.total_period
= 0;
1344 hists__reset_filter_stats(hists
);
1347 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1349 hists
->nr_non_filtered_entries
++;
1350 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1353 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1356 hists__inc_filter_stats(hists
, h
);
1358 hists
->nr_entries
++;
1359 hists
->stats
.total_period
+= h
->stat
.period
;
1362 static void hierarchy_insert_output_entry(struct rb_root
*root
,
1363 struct hist_entry
*he
)
1365 struct rb_node
**p
= &root
->rb_node
;
1366 struct rb_node
*parent
= NULL
;
1367 struct hist_entry
*iter
;
1368 struct perf_hpp_fmt
*fmt
;
1370 while (*p
!= NULL
) {
1372 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1374 if (hist_entry__sort(he
, iter
) > 0)
1375 p
= &parent
->rb_left
;
1377 p
= &parent
->rb_right
;
1380 rb_link_node(&he
->rb_node
, parent
, p
);
1381 rb_insert_color(&he
->rb_node
, root
);
1383 /* update column width of dynamic entry */
1384 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1385 if (perf_hpp__is_dynamic_entry(fmt
))
1386 fmt
->sort(fmt
, he
, NULL
);
1390 static void hists__hierarchy_output_resort(struct hists
*hists
,
1391 struct ui_progress
*prog
,
1392 struct rb_root
*root_in
,
1393 struct rb_root
*root_out
,
1394 u64 min_callchain_hits
,
1397 struct rb_node
*node
;
1398 struct hist_entry
*he
;
1400 *root_out
= RB_ROOT
;
1401 node
= rb_first(root_in
);
1404 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1405 node
= rb_next(node
);
1407 hierarchy_insert_output_entry(root_out
, he
);
1410 ui_progress__update(prog
, 1);
1413 hists__hierarchy_output_resort(hists
, prog
,
1418 hists
->nr_entries
++;
1419 if (!he
->filtered
) {
1420 hists
->nr_non_filtered_entries
++;
1421 hists__calc_col_len(hists
, he
);
1427 /* only update stat for leaf entries to avoid duplication */
1428 hists__inc_stats(hists
, he
);
1430 hists__calc_col_len(hists
, he
);
1435 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1436 u64 total
= he
->stat
.period
;
1438 if (symbol_conf
.cumulate_callchain
)
1439 total
= he
->stat_acc
->period
;
1441 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1444 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1445 min_callchain_hits
, &callchain_param
);
1449 static void __hists__insert_output_entry(struct rb_root
*entries
,
1450 struct hist_entry
*he
,
1451 u64 min_callchain_hits
,
1454 struct rb_node
**p
= &entries
->rb_node
;
1455 struct rb_node
*parent
= NULL
;
1456 struct hist_entry
*iter
;
1457 struct perf_hpp_fmt
*fmt
;
1459 if (use_callchain
) {
1460 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1461 u64 total
= he
->stat
.period
;
1463 if (symbol_conf
.cumulate_callchain
)
1464 total
= he
->stat_acc
->period
;
1466 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1468 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1469 min_callchain_hits
, &callchain_param
);
1472 while (*p
!= NULL
) {
1474 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1476 if (hist_entry__sort(he
, iter
) > 0)
1479 p
= &(*p
)->rb_right
;
1482 rb_link_node(&he
->rb_node
, parent
, p
);
1483 rb_insert_color(&he
->rb_node
, entries
);
1485 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1486 if (perf_hpp__is_dynamic_entry(fmt
) &&
1487 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1488 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1492 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1495 struct rb_root
*root
;
1496 struct rb_node
*next
;
1497 struct hist_entry
*n
;
1498 u64 callchain_total
;
1499 u64 min_callchain_hits
;
1501 callchain_total
= hists
->callchain_period
;
1502 if (symbol_conf
.filter_relative
)
1503 callchain_total
= hists
->callchain_non_filtered_period
;
1505 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1507 hists__reset_stats(hists
);
1508 hists__reset_col_len(hists
);
1510 if (symbol_conf
.report_hierarchy
) {
1511 return hists__hierarchy_output_resort(hists
, prog
,
1512 &hists
->entries_collapsed
,
1518 if (sort__need_collapse
)
1519 root
= &hists
->entries_collapsed
;
1521 root
= hists
->entries_in
;
1523 next
= rb_first(root
);
1524 hists
->entries
= RB_ROOT
;
1527 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1528 next
= rb_next(&n
->rb_node_in
);
1530 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1531 hists__inc_stats(hists
, n
);
1534 hists__calc_col_len(hists
, n
);
1537 ui_progress__update(prog
, 1);
1541 void perf_evsel__output_resort(struct perf_evsel
*evsel
, struct ui_progress
*prog
)
1545 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1546 use_callchain
= evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
;
1548 use_callchain
= symbol_conf
.use_callchain
;
1550 output_resort(evsel__hists(evsel
), prog
, use_callchain
);
1553 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1555 output_resort(hists
, prog
, symbol_conf
.use_callchain
);
1558 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1560 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1563 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1569 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1571 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1573 while (can_goto_child(he
, HMD_NORMAL
)) {
1574 node
= rb_last(&he
->hroot_out
);
1575 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1580 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1582 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1584 if (can_goto_child(he
, hmd
))
1585 node
= rb_first(&he
->hroot_out
);
1587 node
= rb_next(node
);
1589 while (node
== NULL
) {
1594 node
= rb_next(&he
->rb_node
);
1599 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1601 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1603 node
= rb_prev(node
);
1605 return rb_hierarchy_last(node
);
1611 return &he
->rb_node
;
1614 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1616 struct rb_node
*node
;
1617 struct hist_entry
*child
;
1623 node
= rb_first(&he
->hroot_out
);
1624 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1626 while (node
&& child
->filtered
) {
1627 node
= rb_next(node
);
1628 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1632 percent
= hist_entry__get_percent_limit(child
);
1636 return node
&& percent
>= limit
;
1639 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1640 enum hist_filter filter
)
1642 h
->filtered
&= ~(1 << filter
);
1644 if (symbol_conf
.report_hierarchy
) {
1645 struct hist_entry
*parent
= h
->parent_he
;
1648 he_stat__add_stat(&parent
->stat
, &h
->stat
);
1650 parent
->filtered
&= ~(1 << filter
);
1652 if (parent
->filtered
)
1655 /* force fold unfiltered entry for simplicity */
1656 parent
->unfolded
= false;
1657 parent
->has_no_entry
= false;
1658 parent
->row_offset
= 0;
1659 parent
->nr_rows
= 0;
1661 parent
= parent
->parent_he
;
1668 /* force fold unfiltered entry for simplicity */
1669 h
->unfolded
= false;
1670 h
->has_no_entry
= false;
1674 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1676 hists__inc_filter_stats(hists
, h
);
1677 hists__calc_col_len(hists
, h
);
1681 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1682 struct hist_entry
*he
)
1684 if (hists
->dso_filter
!= NULL
&&
1685 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1686 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1693 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1694 struct hist_entry
*he
)
1696 if (hists
->thread_filter
!= NULL
&&
1697 he
->thread
!= hists
->thread_filter
) {
1698 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1705 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1706 struct hist_entry
*he
)
1708 if (hists
->symbol_filter_str
!= NULL
&&
1709 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1710 hists
->symbol_filter_str
) == NULL
)) {
1711 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1718 static bool hists__filter_entry_by_socket(struct hists
*hists
,
1719 struct hist_entry
*he
)
1721 if ((hists
->socket_filter
> -1) &&
1722 (he
->socket
!= hists
->socket_filter
)) {
1723 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
1730 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
1732 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
1736 hists
->stats
.nr_non_filtered_samples
= 0;
1738 hists__reset_filter_stats(hists
);
1739 hists__reset_col_len(hists
);
1741 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1742 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1744 if (filter(hists
, h
))
1747 hists__remove_entry_filter(hists
, h
, type
);
1751 static void resort_filtered_entry(struct rb_root
*root
, struct hist_entry
*he
)
1753 struct rb_node
**p
= &root
->rb_node
;
1754 struct rb_node
*parent
= NULL
;
1755 struct hist_entry
*iter
;
1756 struct rb_root new_root
= RB_ROOT
;
1759 while (*p
!= NULL
) {
1761 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1763 if (hist_entry__sort(he
, iter
) > 0)
1766 p
= &(*p
)->rb_right
;
1769 rb_link_node(&he
->rb_node
, parent
, p
);
1770 rb_insert_color(&he
->rb_node
, root
);
1772 if (he
->leaf
|| he
->filtered
)
1775 nd
= rb_first(&he
->hroot_out
);
1777 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1780 rb_erase(&h
->rb_node
, &he
->hroot_out
);
1782 resort_filtered_entry(&new_root
, h
);
1785 he
->hroot_out
= new_root
;
1788 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
1791 struct rb_root new_root
= RB_ROOT
;
1793 hists
->stats
.nr_non_filtered_samples
= 0;
1795 hists__reset_filter_stats(hists
);
1796 hists__reset_col_len(hists
);
1798 nd
= rb_first(&hists
->entries
);
1800 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1803 ret
= hist_entry__filter(h
, type
, arg
);
1806 * case 1. non-matching type
1807 * zero out the period, set filter marker and move to child
1810 memset(&h
->stat
, 0, sizeof(h
->stat
));
1811 h
->filtered
|= (1 << type
);
1813 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
1816 * case 2. matched type (filter out)
1817 * set filter marker and move to next
1819 else if (ret
== 1) {
1820 h
->filtered
|= (1 << type
);
1822 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
1825 * case 3. ok (not filtered)
1826 * add period to hists and parents, erase the filter marker
1827 * and move to next sibling
1830 hists__remove_entry_filter(hists
, h
, type
);
1832 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
1837 * resort output after applying a new filter since filter in a lower
1838 * hierarchy can change periods in a upper hierarchy.
1840 nd
= rb_first(&hists
->entries
);
1842 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1845 rb_erase(&h
->rb_node
, &hists
->entries
);
1847 resort_filtered_entry(&new_root
, h
);
1850 hists
->entries
= new_root
;
1853 void hists__filter_by_thread(struct hists
*hists
)
1855 if (symbol_conf
.report_hierarchy
)
1856 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
1857 hists
->thread_filter
);
1859 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
1860 hists__filter_entry_by_thread
);
1863 void hists__filter_by_dso(struct hists
*hists
)
1865 if (symbol_conf
.report_hierarchy
)
1866 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
1869 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
1870 hists__filter_entry_by_dso
);
1873 void hists__filter_by_symbol(struct hists
*hists
)
1875 if (symbol_conf
.report_hierarchy
)
1876 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
1877 hists
->symbol_filter_str
);
1879 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
1880 hists__filter_entry_by_symbol
);
1883 void hists__filter_by_socket(struct hists
*hists
)
1885 if (symbol_conf
.report_hierarchy
)
1886 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
1887 &hists
->socket_filter
);
1889 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
1890 hists__filter_entry_by_socket
);
1893 void events_stats__inc(struct events_stats
*stats
, u32 type
)
1895 ++stats
->nr_events
[0];
1896 ++stats
->nr_events
[type
];
1899 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1901 events_stats__inc(&hists
->stats
, type
);
1904 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
1906 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
1908 hists
->stats
.nr_non_filtered_samples
++;
1911 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
1912 struct hist_entry
*pair
)
1914 struct rb_root
*root
;
1916 struct rb_node
*parent
= NULL
;
1917 struct hist_entry
*he
;
1920 if (sort__need_collapse
)
1921 root
= &hists
->entries_collapsed
;
1923 root
= hists
->entries_in
;
1927 while (*p
!= NULL
) {
1929 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1931 cmp
= hist_entry__collapse(he
, pair
);
1939 p
= &(*p
)->rb_right
;
1942 he
= hist_entry__new(pair
, true);
1944 memset(&he
->stat
, 0, sizeof(he
->stat
));
1946 rb_link_node(&he
->rb_node_in
, parent
, p
);
1947 rb_insert_color(&he
->rb_node_in
, root
);
1948 hists__inc_stats(hists
, he
);
1955 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
1956 struct hist_entry
*he
)
1960 if (sort__need_collapse
)
1961 n
= hists
->entries_collapsed
.rb_node
;
1963 n
= hists
->entries_in
->rb_node
;
1966 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
1967 int64_t cmp
= hist_entry__collapse(iter
, he
);
1981 * Look for pairs to link to the leader buckets (hist_entries):
1983 void hists__match(struct hists
*leader
, struct hists
*other
)
1985 struct rb_root
*root
;
1987 struct hist_entry
*pos
, *pair
;
1989 if (sort__need_collapse
)
1990 root
= &leader
->entries_collapsed
;
1992 root
= leader
->entries_in
;
1994 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1995 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1996 pair
= hists__find_entry(other
, pos
);
1999 hist_entry__add_pair(pair
, pos
);
2004 * Look for entries in the other hists that are not present in the leader, if
2005 * we find them, just add a dummy entry on the leader hists, with period=0,
2006 * nr_events=0, to serve as the list header.
2008 int hists__link(struct hists
*leader
, struct hists
*other
)
2010 struct rb_root
*root
;
2012 struct hist_entry
*pos
, *pair
;
2014 if (sort__need_collapse
)
2015 root
= &other
->entries_collapsed
;
2017 root
= other
->entries_in
;
2019 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2020 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2022 if (!hist_entry__has_pairs(pos
)) {
2023 pair
= hists__add_dummy_entry(leader
, pos
);
2026 hist_entry__add_pair(pos
, pair
);
2033 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2034 struct perf_sample
*sample
, bool nonany_branch_mode
)
2036 struct branch_info
*bi
;
2038 /* If we have branch cycles always annotate them. */
2039 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2042 bi
= sample__resolve_bstack(sample
, al
);
2044 struct addr_map_symbol
*prev
= NULL
;
2047 * Ignore errors, still want to process the
2050 * For non standard branch modes always
2051 * force no IPC (prev == NULL)
2053 * Note that perf stores branches reversed from
2056 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2057 addr_map_symbol__account_cycles(&bi
[i
].from
,
2058 nonany_branch_mode
? NULL
: prev
,
2059 bi
[i
].flags
.cycles
);
2067 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
2069 struct perf_evsel
*pos
;
2072 evlist__for_each(evlist
, pos
) {
2073 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2074 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2081 u64
hists__total_period(struct hists
*hists
)
2083 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2084 hists
->stats
.total_period
;
2087 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2088 const char *arg
, int unset __maybe_unused
)
2090 if (!strcmp(arg
, "relative"))
2091 symbol_conf
.filter_relative
= true;
2092 else if (!strcmp(arg
, "absolute"))
2093 symbol_conf
.filter_relative
= false;
2100 int perf_hist_config(const char *var
, const char *value
)
2102 if (!strcmp(var
, "hist.percentage"))
2103 return parse_filter_percentage(NULL
, value
, 0);
2108 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2110 memset(hists
, 0, sizeof(*hists
));
2111 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
2112 hists
->entries_in
= &hists
->entries_in_array
[0];
2113 hists
->entries_collapsed
= RB_ROOT
;
2114 hists
->entries
= RB_ROOT
;
2115 pthread_mutex_init(&hists
->lock
, NULL
);
2116 hists
->socket_filter
= -1;
2117 hists
->hpp_list
= hpp_list
;
2118 INIT_LIST_HEAD(&hists
->hpp_formats
);
2122 static void hists__delete_remaining_entries(struct rb_root
*root
)
2124 struct rb_node
*node
;
2125 struct hist_entry
*he
;
2127 while (!RB_EMPTY_ROOT(root
)) {
2128 node
= rb_first(root
);
2129 rb_erase(node
, root
);
2131 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2132 hist_entry__delete(he
);
2136 static void hists__delete_all_entries(struct hists
*hists
)
2138 hists__delete_entries(hists
);
2139 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2140 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2141 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2144 static void hists_evsel__exit(struct perf_evsel
*evsel
)
2146 struct hists
*hists
= evsel__hists(evsel
);
2147 struct perf_hpp_fmt
*fmt
, *pos
;
2148 struct perf_hpp_list_node
*node
, *tmp
;
2150 hists__delete_all_entries(hists
);
2152 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2153 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2154 list_del(&fmt
->list
);
2157 list_del(&node
->list
);
2162 static int hists_evsel__init(struct perf_evsel
*evsel
)
2164 struct hists
*hists
= evsel__hists(evsel
);
2166 __hists__init(hists
, &perf_hpp_list
);
2171 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2172 * stored in the rbtree...
2175 int hists__init(void)
2177 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2181 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2186 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2188 INIT_LIST_HEAD(&list
->fields
);
2189 INIT_LIST_HEAD(&list
->sorts
);