9 static bool hists__filter_entry_by_dso(struct hists
*hists
,
10 struct hist_entry
*he
);
11 static bool hists__filter_entry_by_thread(struct hists
*hists
,
12 struct hist_entry
*he
);
13 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
14 struct hist_entry
*he
);
23 struct callchain_param callchain_param
= {
24 .mode
= CHAIN_GRAPH_REL
,
29 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
31 return hists
->col_len
[col
];
34 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
36 hists
->col_len
[col
] = len
;
39 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
41 if (len
> hists__col_len(hists
, col
)) {
42 hists__set_col_len(hists
, col
, len
);
48 void hists__reset_col_len(struct hists
*hists
)
52 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
53 hists__set_col_len(hists
, col
, 0);
56 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
58 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
60 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
61 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
62 !symbol_conf
.dso_list
)
63 hists__set_col_len(hists
, dso
, unresolved_col_width
);
66 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
68 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
72 hists__new_col_len(hists
, HISTC_SYMBOL
, h
->ms
.sym
->namelen
+ 4);
74 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
76 len
= thread__comm_len(h
->thread
);
77 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
78 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
81 len
= dso__name_len(h
->ms
.map
->dso
);
82 hists__new_col_len(hists
, HISTC_DSO
, len
);
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
91 if (h
->branch_info
->from
.sym
) {
92 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
93 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
95 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
96 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
98 symlen
= unresolved_col_width
+ 4 + 2;
99 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
100 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
103 if (h
->branch_info
->to
.sym
) {
104 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
105 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
107 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
108 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
110 symlen
= unresolved_col_width
+ 4 + 2;
111 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
112 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
117 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
119 struct rb_node
*next
= rb_first(&hists
->entries
);
120 struct hist_entry
*n
;
123 hists__reset_col_len(hists
);
125 while (next
&& row
++ < max_rows
) {
126 n
= rb_entry(next
, struct hist_entry
, rb_node
);
128 hists__calc_col_len(hists
, n
);
129 next
= rb_next(&n
->rb_node
);
133 static void hist_entry__add_cpumode_period(struct hist_entry
*he
,
134 unsigned int cpumode
, u64 period
)
137 case PERF_RECORD_MISC_KERNEL
:
138 he
->stat
.period_sys
+= period
;
140 case PERF_RECORD_MISC_USER
:
141 he
->stat
.period_us
+= period
;
143 case PERF_RECORD_MISC_GUEST_KERNEL
:
144 he
->stat
.period_guest_sys
+= period
;
146 case PERF_RECORD_MISC_GUEST_USER
:
147 he
->stat
.period_guest_us
+= period
;
154 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
)
156 he_stat
->period
+= period
;
157 he_stat
->nr_events
+= 1;
160 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
162 dest
->period
+= src
->period
;
163 dest
->period_sys
+= src
->period_sys
;
164 dest
->period_us
+= src
->period_us
;
165 dest
->period_guest_sys
+= src
->period_guest_sys
;
166 dest
->period_guest_us
+= src
->period_guest_us
;
167 dest
->nr_events
+= src
->nr_events
;
170 static void hist_entry__decay(struct hist_entry
*he
)
172 he
->stat
.period
= (he
->stat
.period
* 7) / 8;
173 he
->stat
.nr_events
= (he
->stat
.nr_events
* 7) / 8;
176 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
178 u64 prev_period
= he
->stat
.period
;
180 if (prev_period
== 0)
183 hist_entry__decay(he
);
186 hists
->stats
.total_period
-= prev_period
- he
->stat
.period
;
188 return he
->stat
.period
== 0;
191 static void __hists__decay_entries(struct hists
*hists
, bool zap_user
,
192 bool zap_kernel
, bool threaded
)
194 struct rb_node
*next
= rb_first(&hists
->entries
);
195 struct hist_entry
*n
;
198 n
= rb_entry(next
, struct hist_entry
, rb_node
);
199 next
= rb_next(&n
->rb_node
);
201 * We may be annotating this, for instance, so keep it here in
202 * case some it gets new samples, we'll eventually free it when
203 * the user stops browsing and it agains gets fully decayed.
205 if (((zap_user
&& n
->level
== '.') ||
206 (zap_kernel
&& n
->level
!= '.') ||
207 hists__decay_entry(hists
, n
)) &&
209 rb_erase(&n
->rb_node
, &hists
->entries
);
211 if (sort__need_collapse
|| threaded
)
212 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
220 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
222 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, false);
225 void hists__decay_entries_threaded(struct hists
*hists
,
226 bool zap_user
, bool zap_kernel
)
228 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, true);
232 * histogram, sorted on item, collects periods
235 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
237 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
238 struct hist_entry
*he
= malloc(sizeof(*he
) + callchain_size
);
244 he
->ms
.map
->referenced
= true;
245 if (symbol_conf
.use_callchain
)
246 callchain_init(he
->callchain
);
248 INIT_LIST_HEAD(&he
->pairs
.node
);
254 void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
257 hists__calc_col_len(hists
, h
);
259 hists
->stats
.total_period
+= h
->stat
.period
;
263 static u8
symbol__parent_filter(const struct symbol
*parent
)
265 if (symbol_conf
.exclude_other
&& parent
== NULL
)
266 return 1 << HIST_FILTER__PARENT
;
270 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
271 struct hist_entry
*entry
,
272 struct addr_location
*al
,
276 struct rb_node
*parent
= NULL
;
277 struct hist_entry
*he
;
280 pthread_mutex_lock(&hists
->lock
);
282 p
= &hists
->entries_in
->rb_node
;
286 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
289 * Make sure that it receives arguments in a same order as
290 * hist_entry__collapse() so that we can use an appropriate
291 * function when searching an entry regardless which sort
294 cmp
= hist_entry__cmp(he
, entry
);
297 he_stat__add_period(&he
->stat
, period
);
299 /* If the map of an existing hist_entry has
300 * become out-of-date due to an exec() or
301 * similar, update it. Otherwise we will
302 * mis-adjust symbol addresses when computing
303 * the history counter to increment.
305 if (he
->ms
.map
!= entry
->ms
.map
) {
306 he
->ms
.map
= entry
->ms
.map
;
308 he
->ms
.map
->referenced
= true;
319 he
= hist_entry__new(entry
);
323 rb_link_node(&he
->rb_node_in
, parent
, p
);
324 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
326 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
328 pthread_mutex_unlock(&hists
->lock
);
332 struct hist_entry
*__hists__add_branch_entry(struct hists
*self
,
333 struct addr_location
*al
,
334 struct symbol
*sym_parent
,
335 struct branch_info
*bi
,
338 struct hist_entry entry
= {
339 .thread
= al
->thread
,
351 .parent
= sym_parent
,
352 .filtered
= symbol__parent_filter(sym_parent
),
357 return add_hist_entry(self
, &entry
, al
, period
);
360 struct hist_entry
*__hists__add_entry(struct hists
*self
,
361 struct addr_location
*al
,
362 struct symbol
*sym_parent
, u64 period
)
364 struct hist_entry entry
= {
365 .thread
= al
->thread
,
377 .parent
= sym_parent
,
378 .filtered
= symbol__parent_filter(sym_parent
),
382 return add_hist_entry(self
, &entry
, al
, period
);
386 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
388 struct sort_entry
*se
;
391 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
392 cmp
= se
->se_cmp(left
, right
);
401 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
403 struct sort_entry
*se
;
406 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
407 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
409 f
= se
->se_collapse
?: se
->se_cmp
;
411 cmp
= f(left
, right
);
419 void hist_entry__free(struct hist_entry
*he
)
421 free(he
->branch_info
);
426 * collapse the histogram
429 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
430 struct rb_root
*root
,
431 struct hist_entry
*he
)
433 struct rb_node
**p
= &root
->rb_node
;
434 struct rb_node
*parent
= NULL
;
435 struct hist_entry
*iter
;
440 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
442 cmp
= hist_entry__collapse(iter
, he
);
445 he_stat__add_stat(&iter
->stat
, &he
->stat
);
447 if (symbol_conf
.use_callchain
) {
448 callchain_cursor_reset(&callchain_cursor
);
449 callchain_merge(&callchain_cursor
,
453 hist_entry__free(he
);
463 rb_link_node(&he
->rb_node_in
, parent
, p
);
464 rb_insert_color(&he
->rb_node_in
, root
);
468 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
470 struct rb_root
*root
;
472 pthread_mutex_lock(&hists
->lock
);
474 root
= hists
->entries_in
;
475 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
476 hists
->entries_in
= &hists
->entries_in_array
[0];
478 pthread_mutex_unlock(&hists
->lock
);
483 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
485 hists__filter_entry_by_dso(hists
, he
);
486 hists__filter_entry_by_thread(hists
, he
);
487 hists__filter_entry_by_symbol(hists
, he
);
490 static void __hists__collapse_resort(struct hists
*hists
, bool threaded
)
492 struct rb_root
*root
;
493 struct rb_node
*next
;
494 struct hist_entry
*n
;
496 if (!sort__need_collapse
&& !threaded
)
499 root
= hists__get_rotate_entries_in(hists
);
500 next
= rb_first(root
);
503 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
504 next
= rb_next(&n
->rb_node_in
);
506 rb_erase(&n
->rb_node_in
, root
);
507 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
509 * If it wasn't combined with one of the entries already
510 * collapsed, we need to apply the filters that may have
511 * been set by, say, the hist_browser.
513 hists__apply_filters(hists
, n
);
518 void hists__collapse_resort(struct hists
*hists
)
520 return __hists__collapse_resort(hists
, false);
523 void hists__collapse_resort_threaded(struct hists
*hists
)
525 return __hists__collapse_resort(hists
, true);
529 * reverse the map, sort on period.
532 static void __hists__insert_output_entry(struct rb_root
*entries
,
533 struct hist_entry
*he
,
534 u64 min_callchain_hits
)
536 struct rb_node
**p
= &entries
->rb_node
;
537 struct rb_node
*parent
= NULL
;
538 struct hist_entry
*iter
;
540 if (symbol_conf
.use_callchain
)
541 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
542 min_callchain_hits
, &callchain_param
);
546 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
548 if (he
->stat
.period
> iter
->stat
.period
)
554 rb_link_node(&he
->rb_node
, parent
, p
);
555 rb_insert_color(&he
->rb_node
, entries
);
558 static void __hists__output_resort(struct hists
*hists
, bool threaded
)
560 struct rb_root
*root
;
561 struct rb_node
*next
;
562 struct hist_entry
*n
;
563 u64 min_callchain_hits
;
565 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
567 if (sort__need_collapse
|| threaded
)
568 root
= &hists
->entries_collapsed
;
570 root
= hists
->entries_in
;
572 next
= rb_first(root
);
573 hists
->entries
= RB_ROOT
;
575 hists
->nr_entries
= 0;
576 hists
->stats
.total_period
= 0;
577 hists__reset_col_len(hists
);
580 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
581 next
= rb_next(&n
->rb_node_in
);
583 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
584 hists__inc_nr_entries(hists
, n
);
588 void hists__output_resort(struct hists
*hists
)
590 return __hists__output_resort(hists
, false);
593 void hists__output_resort_threaded(struct hists
*hists
)
595 return __hists__output_resort(hists
, true);
598 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
599 enum hist_filter filter
)
601 h
->filtered
&= ~(1 << filter
);
607 hists
->nr_entries
+= h
->nr_rows
;
609 hists
->stats
.total_period
+= h
->stat
.period
;
610 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->stat
.nr_events
;
612 hists__calc_col_len(hists
, h
);
616 static bool hists__filter_entry_by_dso(struct hists
*hists
,
617 struct hist_entry
*he
)
619 if (hists
->dso_filter
!= NULL
&&
620 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
621 he
->filtered
|= (1 << HIST_FILTER__DSO
);
628 void hists__filter_by_dso(struct hists
*hists
)
632 hists
->nr_entries
= hists
->stats
.total_period
= 0;
633 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
634 hists__reset_col_len(hists
);
636 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
637 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
639 if (symbol_conf
.exclude_other
&& !h
->parent
)
642 if (hists__filter_entry_by_dso(hists
, h
))
645 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
649 static bool hists__filter_entry_by_thread(struct hists
*hists
,
650 struct hist_entry
*he
)
652 if (hists
->thread_filter
!= NULL
&&
653 he
->thread
!= hists
->thread_filter
) {
654 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
661 void hists__filter_by_thread(struct hists
*hists
)
665 hists
->nr_entries
= hists
->stats
.total_period
= 0;
666 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
667 hists__reset_col_len(hists
);
669 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
670 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
672 if (hists__filter_entry_by_thread(hists
, h
))
675 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
679 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
680 struct hist_entry
*he
)
682 if (hists
->symbol_filter_str
!= NULL
&&
683 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
684 hists
->symbol_filter_str
) == NULL
)) {
685 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
692 void hists__filter_by_symbol(struct hists
*hists
)
696 hists
->nr_entries
= hists
->stats
.total_period
= 0;
697 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
698 hists__reset_col_len(hists
);
700 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
701 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
703 if (hists__filter_entry_by_symbol(hists
, h
))
706 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
710 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
712 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
715 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
717 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
720 void events_stats__inc(struct events_stats
*stats
, u32 type
)
722 ++stats
->nr_events
[0];
723 ++stats
->nr_events
[type
];
726 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
728 events_stats__inc(&hists
->stats
, type
);
731 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
732 struct hist_entry
*pair
)
734 struct rb_root
*root
;
736 struct rb_node
*parent
= NULL
;
737 struct hist_entry
*he
;
740 if (sort__need_collapse
)
741 root
= &hists
->entries_collapsed
;
743 root
= hists
->entries_in
;
749 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
751 cmp
= hist_entry__collapse(he
, pair
);
762 he
= hist_entry__new(pair
);
764 memset(&he
->stat
, 0, sizeof(he
->stat
));
766 rb_link_node(&he
->rb_node_in
, parent
, p
);
767 rb_insert_color(&he
->rb_node_in
, root
);
768 hists__inc_nr_entries(hists
, he
);
774 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
775 struct hist_entry
*he
)
779 if (sort__need_collapse
)
780 n
= hists
->entries_collapsed
.rb_node
;
782 n
= hists
->entries_in
->rb_node
;
785 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
786 int64_t cmp
= hist_entry__collapse(iter
, he
);
800 * Look for pairs to link to the leader buckets (hist_entries):
802 void hists__match(struct hists
*leader
, struct hists
*other
)
804 struct rb_root
*root
;
806 struct hist_entry
*pos
, *pair
;
808 if (sort__need_collapse
)
809 root
= &leader
->entries_collapsed
;
811 root
= leader
->entries_in
;
813 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
814 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
815 pair
= hists__find_entry(other
, pos
);
818 hist_entry__add_pair(pair
, pos
);
823 * Look for entries in the other hists that are not present in the leader, if
824 * we find them, just add a dummy entry on the leader hists, with period=0,
825 * nr_events=0, to serve as the list header.
827 int hists__link(struct hists
*leader
, struct hists
*other
)
829 struct rb_root
*root
;
831 struct hist_entry
*pos
, *pair
;
833 if (sort__need_collapse
)
834 root
= &other
->entries_collapsed
;
836 root
= other
->entries_in
;
838 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
839 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
841 if (!hist_entry__has_pairs(pos
)) {
842 pair
= hists__add_dummy_entry(leader
, pos
);
845 hist_entry__add_pair(pos
, pair
);