9 static bool hists__filter_entry_by_dso(struct hists
*hists
,
10 struct hist_entry
*he
);
11 static bool hists__filter_entry_by_thread(struct hists
*hists
,
12 struct hist_entry
*he
);
13 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
14 struct hist_entry
*he
);
16 struct callchain_param callchain_param
= {
17 .mode
= CHAIN_GRAPH_REL
,
19 .order
= ORDER_CALLEE
,
23 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
25 return hists
->col_len
[col
];
28 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
30 hists
->col_len
[col
] = len
;
33 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
35 if (len
> hists__col_len(hists
, col
)) {
36 hists__set_col_len(hists
, col
, len
);
42 void hists__reset_col_len(struct hists
*hists
)
46 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
47 hists__set_col_len(hists
, col
, 0);
50 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
52 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
54 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
55 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
56 !symbol_conf
.dso_list
)
57 hists__set_col_len(hists
, dso
, unresolved_col_width
);
60 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
62 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
67 * +4 accounts for '[x] ' priv level info
68 * +2 accounts for 0x prefix on raw addresses
69 * +3 accounts for ' y ' symtab origin info
72 symlen
= h
->ms
.sym
->namelen
+ 4;
74 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
75 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
77 symlen
= unresolved_col_width
+ 4 + 2;
78 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
79 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
82 len
= thread__comm_len(h
->thread
);
83 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
84 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
87 len
= dso__name_len(h
->ms
.map
->dso
);
88 hists__new_col_len(hists
, HISTC_DSO
, len
);
92 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
95 if (h
->branch_info
->from
.sym
) {
96 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
98 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
99 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
101 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
102 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
104 symlen
= unresolved_col_width
+ 4 + 2;
105 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
106 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
109 if (h
->branch_info
->to
.sym
) {
110 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
112 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
113 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
115 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
116 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
118 symlen
= unresolved_col_width
+ 4 + 2;
119 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
120 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
125 if (h
->mem_info
->daddr
.sym
) {
126 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
127 + unresolved_col_width
+ 2;
128 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
131 symlen
= unresolved_col_width
+ 4 + 2;
132 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
135 if (h
->mem_info
->daddr
.map
) {
136 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
137 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
140 symlen
= unresolved_col_width
+ 4 + 2;
141 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
144 symlen
= unresolved_col_width
+ 4 + 2;
145 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
146 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
149 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
150 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
151 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
152 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
153 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
154 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
157 hists__new_col_len(hists
, HISTC_TRANSACTION
,
158 hist_entry__transaction_len());
161 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
163 struct rb_node
*next
= rb_first(&hists
->entries
);
164 struct hist_entry
*n
;
167 hists__reset_col_len(hists
);
169 while (next
&& row
++ < max_rows
) {
170 n
= rb_entry(next
, struct hist_entry
, rb_node
);
172 hists__calc_col_len(hists
, n
);
173 next
= rb_next(&n
->rb_node
);
177 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
178 unsigned int cpumode
, u64 period
)
181 case PERF_RECORD_MISC_KERNEL
:
182 he_stat
->period_sys
+= period
;
184 case PERF_RECORD_MISC_USER
:
185 he_stat
->period_us
+= period
;
187 case PERF_RECORD_MISC_GUEST_KERNEL
:
188 he_stat
->period_guest_sys
+= period
;
190 case PERF_RECORD_MISC_GUEST_USER
:
191 he_stat
->period_guest_us
+= period
;
198 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
202 he_stat
->period
+= period
;
203 he_stat
->weight
+= weight
;
204 he_stat
->nr_events
+= 1;
207 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
209 dest
->period
+= src
->period
;
210 dest
->period_sys
+= src
->period_sys
;
211 dest
->period_us
+= src
->period_us
;
212 dest
->period_guest_sys
+= src
->period_guest_sys
;
213 dest
->period_guest_us
+= src
->period_guest_us
;
214 dest
->nr_events
+= src
->nr_events
;
215 dest
->weight
+= src
->weight
;
218 static void he_stat__decay(struct he_stat
*he_stat
)
220 he_stat
->period
= (he_stat
->period
* 7) / 8;
221 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
222 /* XXX need decay for weight too? */
225 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
227 u64 prev_period
= he
->stat
.period
;
229 if (prev_period
== 0)
232 he_stat__decay(&he
->stat
);
235 hists
->stats
.total_period
-= prev_period
- he
->stat
.period
;
237 return he
->stat
.period
== 0;
240 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
242 struct rb_node
*next
= rb_first(&hists
->entries
);
243 struct hist_entry
*n
;
246 n
= rb_entry(next
, struct hist_entry
, rb_node
);
247 next
= rb_next(&n
->rb_node
);
249 * We may be annotating this, for instance, so keep it here in
250 * case some it gets new samples, we'll eventually free it when
251 * the user stops browsing and it agains gets fully decayed.
253 if (((zap_user
&& n
->level
== '.') ||
254 (zap_kernel
&& n
->level
!= '.') ||
255 hists__decay_entry(hists
, n
)) &&
257 rb_erase(&n
->rb_node
, &hists
->entries
);
259 if (sort__need_collapse
)
260 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
269 * histogram, sorted on item, collects periods
272 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
274 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
275 struct hist_entry
*he
= zalloc(sizeof(*he
) + callchain_size
);
281 he
->ms
.map
->referenced
= true;
283 if (he
->branch_info
) {
285 * This branch info is (a part of) allocated from
286 * sample__resolve_bstack() and will be freed after
287 * adding new entries. So we need to save a copy.
289 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
290 if (he
->branch_info
== NULL
) {
295 memcpy(he
->branch_info
, template->branch_info
,
296 sizeof(*he
->branch_info
));
298 if (he
->branch_info
->from
.map
)
299 he
->branch_info
->from
.map
->referenced
= true;
300 if (he
->branch_info
->to
.map
)
301 he
->branch_info
->to
.map
->referenced
= true;
305 if (he
->mem_info
->iaddr
.map
)
306 he
->mem_info
->iaddr
.map
->referenced
= true;
307 if (he
->mem_info
->daddr
.map
)
308 he
->mem_info
->daddr
.map
->referenced
= true;
311 if (symbol_conf
.use_callchain
)
312 callchain_init(he
->callchain
);
314 INIT_LIST_HEAD(&he
->pairs
.node
);
320 void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
323 hists__calc_col_len(hists
, h
);
325 hists
->stats
.total_period
+= h
->stat
.period
;
329 static u8
symbol__parent_filter(const struct symbol
*parent
)
331 if (symbol_conf
.exclude_other
&& parent
== NULL
)
332 return 1 << HIST_FILTER__PARENT
;
336 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
337 struct hist_entry
*entry
,
338 struct addr_location
*al
)
341 struct rb_node
*parent
= NULL
;
342 struct hist_entry
*he
;
344 u64 period
= entry
->stat
.period
;
345 u64 weight
= entry
->stat
.weight
;
347 p
= &hists
->entries_in
->rb_node
;
351 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
354 * Make sure that it receives arguments in a same order as
355 * hist_entry__collapse() so that we can use an appropriate
356 * function when searching an entry regardless which sort
359 cmp
= hist_entry__cmp(he
, entry
);
362 he_stat__add_period(&he
->stat
, period
, weight
);
365 * This mem info was allocated from sample__resolve_mem
366 * and will not be used anymore.
368 zfree(&entry
->mem_info
);
370 /* If the map of an existing hist_entry has
371 * become out-of-date due to an exec() or
372 * similar, update it. Otherwise we will
373 * mis-adjust symbol addresses when computing
374 * the history counter to increment.
376 if (he
->ms
.map
!= entry
->ms
.map
) {
377 he
->ms
.map
= entry
->ms
.map
;
379 he
->ms
.map
->referenced
= true;
390 he
= hist_entry__new(entry
);
395 rb_link_node(&he
->rb_node_in
, parent
, p
);
396 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
398 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
402 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
403 struct addr_location
*al
,
404 struct symbol
*sym_parent
,
405 struct branch_info
*bi
,
407 u64 period
, u64 weight
, u64 transaction
)
409 struct hist_entry entry
= {
410 .thread
= al
->thread
,
411 .comm
= thread__comm(al
->thread
),
424 .parent
= sym_parent
,
425 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
429 .transaction
= transaction
,
432 return add_hist_entry(hists
, &entry
, al
);
436 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
438 struct sort_entry
*se
;
441 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
442 cmp
= se
->se_cmp(left
, right
);
451 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
453 struct sort_entry
*se
;
456 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
457 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
459 f
= se
->se_collapse
?: se
->se_cmp
;
461 cmp
= f(left
, right
);
469 void hist_entry__free(struct hist_entry
*he
)
471 zfree(&he
->branch_info
);
472 zfree(&he
->mem_info
);
473 free_srcline(he
->srcline
);
478 * collapse the histogram
481 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
482 struct rb_root
*root
,
483 struct hist_entry
*he
)
485 struct rb_node
**p
= &root
->rb_node
;
486 struct rb_node
*parent
= NULL
;
487 struct hist_entry
*iter
;
492 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
494 cmp
= hist_entry__collapse(iter
, he
);
497 he_stat__add_stat(&iter
->stat
, &he
->stat
);
499 if (symbol_conf
.use_callchain
) {
500 callchain_cursor_reset(&callchain_cursor
);
501 callchain_merge(&callchain_cursor
,
505 hist_entry__free(he
);
515 rb_link_node(&he
->rb_node_in
, parent
, p
);
516 rb_insert_color(&he
->rb_node_in
, root
);
520 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
522 struct rb_root
*root
;
524 pthread_mutex_lock(&hists
->lock
);
526 root
= hists
->entries_in
;
527 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
528 hists
->entries_in
= &hists
->entries_in_array
[0];
530 pthread_mutex_unlock(&hists
->lock
);
535 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
537 hists__filter_entry_by_dso(hists
, he
);
538 hists__filter_entry_by_thread(hists
, he
);
539 hists__filter_entry_by_symbol(hists
, he
);
542 void hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
544 struct rb_root
*root
;
545 struct rb_node
*next
;
546 struct hist_entry
*n
;
548 if (!sort__need_collapse
)
551 root
= hists__get_rotate_entries_in(hists
);
552 next
= rb_first(root
);
557 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
558 next
= rb_next(&n
->rb_node_in
);
560 rb_erase(&n
->rb_node_in
, root
);
561 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
563 * If it wasn't combined with one of the entries already
564 * collapsed, we need to apply the filters that may have
565 * been set by, say, the hist_browser.
567 hists__apply_filters(hists
, n
);
570 ui_progress__update(prog
, 1);
575 * reverse the map, sort on period.
578 static int period_cmp(u64 period_a
, u64 period_b
)
580 if (period_a
> period_b
)
582 if (period_a
< period_b
)
587 static int hist_entry__sort_on_period(struct hist_entry
*a
,
588 struct hist_entry
*b
)
592 struct perf_evsel
*evsel
;
593 struct hist_entry
*pair
;
594 u64
*periods_a
, *periods_b
;
596 ret
= period_cmp(a
->stat
.period
, b
->stat
.period
);
597 if (ret
|| !symbol_conf
.event_group
)
600 evsel
= hists_to_evsel(a
->hists
);
601 nr_members
= evsel
->nr_members
;
605 periods_a
= zalloc(sizeof(periods_a
) * nr_members
);
606 periods_b
= zalloc(sizeof(periods_b
) * nr_members
);
608 if (!periods_a
|| !periods_b
)
611 list_for_each_entry(pair
, &a
->pairs
.head
, pairs
.node
) {
612 evsel
= hists_to_evsel(pair
->hists
);
613 periods_a
[perf_evsel__group_idx(evsel
)] = pair
->stat
.period
;
616 list_for_each_entry(pair
, &b
->pairs
.head
, pairs
.node
) {
617 evsel
= hists_to_evsel(pair
->hists
);
618 periods_b
[perf_evsel__group_idx(evsel
)] = pair
->stat
.period
;
621 for (i
= 1; i
< nr_members
; i
++) {
622 ret
= period_cmp(periods_a
[i
], periods_b
[i
]);
634 static void __hists__insert_output_entry(struct rb_root
*entries
,
635 struct hist_entry
*he
,
636 u64 min_callchain_hits
)
638 struct rb_node
**p
= &entries
->rb_node
;
639 struct rb_node
*parent
= NULL
;
640 struct hist_entry
*iter
;
642 if (symbol_conf
.use_callchain
)
643 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
644 min_callchain_hits
, &callchain_param
);
648 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
650 if (hist_entry__sort_on_period(he
, iter
) > 0)
656 rb_link_node(&he
->rb_node
, parent
, p
);
657 rb_insert_color(&he
->rb_node
, entries
);
660 void hists__output_resort(struct hists
*hists
)
662 struct rb_root
*root
;
663 struct rb_node
*next
;
664 struct hist_entry
*n
;
665 u64 min_callchain_hits
;
667 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
669 if (sort__need_collapse
)
670 root
= &hists
->entries_collapsed
;
672 root
= hists
->entries_in
;
674 next
= rb_first(root
);
675 hists
->entries
= RB_ROOT
;
677 hists
->nr_entries
= 0;
678 hists
->stats
.total_period
= 0;
679 hists__reset_col_len(hists
);
682 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
683 next
= rb_next(&n
->rb_node_in
);
685 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
686 hists__inc_nr_entries(hists
, n
);
690 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
691 enum hist_filter filter
)
693 h
->filtered
&= ~(1 << filter
);
699 hists
->nr_entries
+= h
->nr_rows
;
701 hists
->stats
.total_period
+= h
->stat
.period
;
702 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->stat
.nr_events
;
704 hists__calc_col_len(hists
, h
);
708 static bool hists__filter_entry_by_dso(struct hists
*hists
,
709 struct hist_entry
*he
)
711 if (hists
->dso_filter
!= NULL
&&
712 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
713 he
->filtered
|= (1 << HIST_FILTER__DSO
);
720 void hists__filter_by_dso(struct hists
*hists
)
724 hists
->nr_entries
= hists
->stats
.total_period
= 0;
725 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
726 hists__reset_col_len(hists
);
728 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
729 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
731 if (symbol_conf
.exclude_other
&& !h
->parent
)
734 if (hists__filter_entry_by_dso(hists
, h
))
737 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
741 static bool hists__filter_entry_by_thread(struct hists
*hists
,
742 struct hist_entry
*he
)
744 if (hists
->thread_filter
!= NULL
&&
745 he
->thread
!= hists
->thread_filter
) {
746 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
753 void hists__filter_by_thread(struct hists
*hists
)
757 hists
->nr_entries
= hists
->stats
.total_period
= 0;
758 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
759 hists__reset_col_len(hists
);
761 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
762 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
764 if (hists__filter_entry_by_thread(hists
, h
))
767 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
771 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
772 struct hist_entry
*he
)
774 if (hists
->symbol_filter_str
!= NULL
&&
775 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
776 hists
->symbol_filter_str
) == NULL
)) {
777 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
784 void hists__filter_by_symbol(struct hists
*hists
)
788 hists
->nr_entries
= hists
->stats
.total_period
= 0;
789 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
790 hists__reset_col_len(hists
);
792 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
793 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
795 if (hists__filter_entry_by_symbol(hists
, h
))
798 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
802 void events_stats__inc(struct events_stats
*stats
, u32 type
)
804 ++stats
->nr_events
[0];
805 ++stats
->nr_events
[type
];
808 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
810 events_stats__inc(&hists
->stats
, type
);
813 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
814 struct hist_entry
*pair
)
816 struct rb_root
*root
;
818 struct rb_node
*parent
= NULL
;
819 struct hist_entry
*he
;
822 if (sort__need_collapse
)
823 root
= &hists
->entries_collapsed
;
825 root
= hists
->entries_in
;
831 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
833 cmp
= hist_entry__collapse(he
, pair
);
844 he
= hist_entry__new(pair
);
846 memset(&he
->stat
, 0, sizeof(he
->stat
));
848 rb_link_node(&he
->rb_node_in
, parent
, p
);
849 rb_insert_color(&he
->rb_node_in
, root
);
850 hists__inc_nr_entries(hists
, he
);
857 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
858 struct hist_entry
*he
)
862 if (sort__need_collapse
)
863 n
= hists
->entries_collapsed
.rb_node
;
865 n
= hists
->entries_in
->rb_node
;
868 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
869 int64_t cmp
= hist_entry__collapse(iter
, he
);
883 * Look for pairs to link to the leader buckets (hist_entries):
885 void hists__match(struct hists
*leader
, struct hists
*other
)
887 struct rb_root
*root
;
889 struct hist_entry
*pos
, *pair
;
891 if (sort__need_collapse
)
892 root
= &leader
->entries_collapsed
;
894 root
= leader
->entries_in
;
896 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
897 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
898 pair
= hists__find_entry(other
, pos
);
901 hist_entry__add_pair(pair
, pos
);
906 * Look for entries in the other hists that are not present in the leader, if
907 * we find them, just add a dummy entry on the leader hists, with period=0,
908 * nr_events=0, to serve as the list header.
910 int hists__link(struct hists
*leader
, struct hists
*other
)
912 struct rb_root
*root
;
914 struct hist_entry
*pos
, *pair
;
916 if (sort__need_collapse
)
917 root
= &other
->entries_collapsed
;
919 root
= other
->entries_in
;
921 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
922 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
924 if (!hist_entry__has_pairs(pos
)) {
925 pair
= hists__add_dummy_entry(leader
, pos
);
928 hist_entry__add_pair(pos
, pair
);