perf session: There is no need for a per session hists instance
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
15
16 enum hist_filter {
17 HIST_FILTER__DSO,
18 HIST_FILTER__THREAD,
19 HIST_FILTER__PARENT,
20 HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
25 .min_percent = 0.5,
26 .order = ORDER_CALLEE
27 };
28
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
30 {
31 return hists->col_len[col];
32 }
33
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36 hists->col_len[col] = len;
37 }
38
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
43 return true;
44 }
45 return false;
46 }
47
48 void hists__reset_col_len(struct hists *hists)
49 {
50 enum hist_column col;
51
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
54 }
55
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 u16 len;
70
71 if (h->ms.sym)
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73 else
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
79
80 if (h->ms.map) {
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
83 }
84
85 if (h->branch_info) {
86 int symlen;
87 /*
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
90 */
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97 } else {
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101 }
102
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109 } else {
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113 }
114 }
115 }
116
117 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
118 {
119 struct rb_node *next = rb_first(&hists->entries);
120 struct hist_entry *n;
121 int row = 0;
122
123 hists__reset_col_len(hists);
124
125 while (next && row++ < max_rows) {
126 n = rb_entry(next, struct hist_entry, rb_node);
127 if (!n->filtered)
128 hists__calc_col_len(hists, n);
129 next = rb_next(&n->rb_node);
130 }
131 }
132
133 static void hist_entry__add_cpumode_period(struct hist_entry *he,
134 unsigned int cpumode, u64 period)
135 {
136 switch (cpumode) {
137 case PERF_RECORD_MISC_KERNEL:
138 he->stat.period_sys += period;
139 break;
140 case PERF_RECORD_MISC_USER:
141 he->stat.period_us += period;
142 break;
143 case PERF_RECORD_MISC_GUEST_KERNEL:
144 he->stat.period_guest_sys += period;
145 break;
146 case PERF_RECORD_MISC_GUEST_USER:
147 he->stat.period_guest_us += period;
148 break;
149 default:
150 break;
151 }
152 }
153
154 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
155 {
156 he_stat->period += period;
157 he_stat->nr_events += 1;
158 }
159
160 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
161 {
162 dest->period += src->period;
163 dest->period_sys += src->period_sys;
164 dest->period_us += src->period_us;
165 dest->period_guest_sys += src->period_guest_sys;
166 dest->period_guest_us += src->period_guest_us;
167 dest->nr_events += src->nr_events;
168 }
169
170 static void hist_entry__decay(struct hist_entry *he)
171 {
172 he->stat.period = (he->stat.period * 7) / 8;
173 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
174 }
175
176 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
177 {
178 u64 prev_period = he->stat.period;
179
180 if (prev_period == 0)
181 return true;
182
183 hist_entry__decay(he);
184
185 if (!he->filtered)
186 hists->stats.total_period -= prev_period - he->stat.period;
187
188 return he->stat.period == 0;
189 }
190
191 static void __hists__decay_entries(struct hists *hists, bool zap_user,
192 bool zap_kernel, bool threaded)
193 {
194 struct rb_node *next = rb_first(&hists->entries);
195 struct hist_entry *n;
196
197 while (next) {
198 n = rb_entry(next, struct hist_entry, rb_node);
199 next = rb_next(&n->rb_node);
200 /*
201 * We may be annotating this, for instance, so keep it here in
202 * case some it gets new samples, we'll eventually free it when
203 * the user stops browsing and it agains gets fully decayed.
204 */
205 if (((zap_user && n->level == '.') ||
206 (zap_kernel && n->level != '.') ||
207 hists__decay_entry(hists, n)) &&
208 !n->used) {
209 rb_erase(&n->rb_node, &hists->entries);
210
211 if (sort__need_collapse || threaded)
212 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
213
214 hist_entry__free(n);
215 --hists->nr_entries;
216 }
217 }
218 }
219
220 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
221 {
222 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
223 }
224
225 void hists__decay_entries_threaded(struct hists *hists,
226 bool zap_user, bool zap_kernel)
227 {
228 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
229 }
230
231 /*
232 * histogram, sorted on item, collects periods
233 */
234
235 static struct hist_entry *hist_entry__new(struct hist_entry *template)
236 {
237 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
238 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
239
240 if (he != NULL) {
241 *he = *template;
242
243 if (he->ms.map)
244 he->ms.map->referenced = true;
245 if (symbol_conf.use_callchain)
246 callchain_init(he->callchain);
247
248 INIT_LIST_HEAD(&he->pairs.node);
249 }
250
251 return he;
252 }
253
254 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
255 {
256 if (!h->filtered) {
257 hists__calc_col_len(hists, h);
258 ++hists->nr_entries;
259 hists->stats.total_period += h->stat.period;
260 }
261 }
262
263 static u8 symbol__parent_filter(const struct symbol *parent)
264 {
265 if (symbol_conf.exclude_other && parent == NULL)
266 return 1 << HIST_FILTER__PARENT;
267 return 0;
268 }
269
270 static struct hist_entry *add_hist_entry(struct hists *hists,
271 struct hist_entry *entry,
272 struct addr_location *al,
273 u64 period)
274 {
275 struct rb_node **p;
276 struct rb_node *parent = NULL;
277 struct hist_entry *he;
278 int cmp;
279
280 pthread_mutex_lock(&hists->lock);
281
282 p = &hists->entries_in->rb_node;
283
284 while (*p != NULL) {
285 parent = *p;
286 he = rb_entry(parent, struct hist_entry, rb_node_in);
287
288 /*
289 * Make sure that it receives arguments in a same order as
290 * hist_entry__collapse() so that we can use an appropriate
291 * function when searching an entry regardless which sort
292 * keys were used.
293 */
294 cmp = hist_entry__cmp(he, entry);
295
296 if (!cmp) {
297 he_stat__add_period(&he->stat, period);
298
299 /* If the map of an existing hist_entry has
300 * become out-of-date due to an exec() or
301 * similar, update it. Otherwise we will
302 * mis-adjust symbol addresses when computing
303 * the history counter to increment.
304 */
305 if (he->ms.map != entry->ms.map) {
306 he->ms.map = entry->ms.map;
307 if (he->ms.map)
308 he->ms.map->referenced = true;
309 }
310 goto out;
311 }
312
313 if (cmp < 0)
314 p = &(*p)->rb_left;
315 else
316 p = &(*p)->rb_right;
317 }
318
319 he = hist_entry__new(entry);
320 if (!he)
321 goto out_unlock;
322
323 rb_link_node(&he->rb_node_in, parent, p);
324 rb_insert_color(&he->rb_node_in, hists->entries_in);
325 out:
326 hist_entry__add_cpumode_period(he, al->cpumode, period);
327 out_unlock:
328 pthread_mutex_unlock(&hists->lock);
329 return he;
330 }
331
332 struct hist_entry *__hists__add_branch_entry(struct hists *self,
333 struct addr_location *al,
334 struct symbol *sym_parent,
335 struct branch_info *bi,
336 u64 period)
337 {
338 struct hist_entry entry = {
339 .thread = al->thread,
340 .ms = {
341 .map = bi->to.map,
342 .sym = bi->to.sym,
343 },
344 .cpu = al->cpu,
345 .ip = bi->to.addr,
346 .level = al->level,
347 .stat = {
348 .period = period,
349 .nr_events = 1,
350 },
351 .parent = sym_parent,
352 .filtered = symbol__parent_filter(sym_parent),
353 .branch_info = bi,
354 .hists = self,
355 };
356
357 return add_hist_entry(self, &entry, al, period);
358 }
359
360 struct hist_entry *__hists__add_entry(struct hists *self,
361 struct addr_location *al,
362 struct symbol *sym_parent, u64 period)
363 {
364 struct hist_entry entry = {
365 .thread = al->thread,
366 .ms = {
367 .map = al->map,
368 .sym = al->sym,
369 },
370 .cpu = al->cpu,
371 .ip = al->addr,
372 .level = al->level,
373 .stat = {
374 .period = period,
375 .nr_events = 1,
376 },
377 .parent = sym_parent,
378 .filtered = symbol__parent_filter(sym_parent),
379 .hists = self,
380 };
381
382 return add_hist_entry(self, &entry, al, period);
383 }
384
385 int64_t
386 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
387 {
388 struct sort_entry *se;
389 int64_t cmp = 0;
390
391 list_for_each_entry(se, &hist_entry__sort_list, list) {
392 cmp = se->se_cmp(left, right);
393 if (cmp)
394 break;
395 }
396
397 return cmp;
398 }
399
400 int64_t
401 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
402 {
403 struct sort_entry *se;
404 int64_t cmp = 0;
405
406 list_for_each_entry(se, &hist_entry__sort_list, list) {
407 int64_t (*f)(struct hist_entry *, struct hist_entry *);
408
409 f = se->se_collapse ?: se->se_cmp;
410
411 cmp = f(left, right);
412 if (cmp)
413 break;
414 }
415
416 return cmp;
417 }
418
419 void hist_entry__free(struct hist_entry *he)
420 {
421 free(he->branch_info);
422 free(he);
423 }
424
425 /*
426 * collapse the histogram
427 */
428
429 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
430 struct rb_root *root,
431 struct hist_entry *he)
432 {
433 struct rb_node **p = &root->rb_node;
434 struct rb_node *parent = NULL;
435 struct hist_entry *iter;
436 int64_t cmp;
437
438 while (*p != NULL) {
439 parent = *p;
440 iter = rb_entry(parent, struct hist_entry, rb_node_in);
441
442 cmp = hist_entry__collapse(iter, he);
443
444 if (!cmp) {
445 he_stat__add_stat(&iter->stat, &he->stat);
446
447 if (symbol_conf.use_callchain) {
448 callchain_cursor_reset(&callchain_cursor);
449 callchain_merge(&callchain_cursor,
450 iter->callchain,
451 he->callchain);
452 }
453 hist_entry__free(he);
454 return false;
455 }
456
457 if (cmp < 0)
458 p = &(*p)->rb_left;
459 else
460 p = &(*p)->rb_right;
461 }
462
463 rb_link_node(&he->rb_node_in, parent, p);
464 rb_insert_color(&he->rb_node_in, root);
465 return true;
466 }
467
468 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
469 {
470 struct rb_root *root;
471
472 pthread_mutex_lock(&hists->lock);
473
474 root = hists->entries_in;
475 if (++hists->entries_in > &hists->entries_in_array[1])
476 hists->entries_in = &hists->entries_in_array[0];
477
478 pthread_mutex_unlock(&hists->lock);
479
480 return root;
481 }
482
483 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
484 {
485 hists__filter_entry_by_dso(hists, he);
486 hists__filter_entry_by_thread(hists, he);
487 hists__filter_entry_by_symbol(hists, he);
488 }
489
490 static void __hists__collapse_resort(struct hists *hists, bool threaded)
491 {
492 struct rb_root *root;
493 struct rb_node *next;
494 struct hist_entry *n;
495
496 if (!sort__need_collapse && !threaded)
497 return;
498
499 root = hists__get_rotate_entries_in(hists);
500 next = rb_first(root);
501
502 while (next) {
503 n = rb_entry(next, struct hist_entry, rb_node_in);
504 next = rb_next(&n->rb_node_in);
505
506 rb_erase(&n->rb_node_in, root);
507 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
508 /*
509 * If it wasn't combined with one of the entries already
510 * collapsed, we need to apply the filters that may have
511 * been set by, say, the hist_browser.
512 */
513 hists__apply_filters(hists, n);
514 }
515 }
516 }
517
518 void hists__collapse_resort(struct hists *hists)
519 {
520 return __hists__collapse_resort(hists, false);
521 }
522
523 void hists__collapse_resort_threaded(struct hists *hists)
524 {
525 return __hists__collapse_resort(hists, true);
526 }
527
528 /*
529 * reverse the map, sort on period.
530 */
531
532 static void __hists__insert_output_entry(struct rb_root *entries,
533 struct hist_entry *he,
534 u64 min_callchain_hits)
535 {
536 struct rb_node **p = &entries->rb_node;
537 struct rb_node *parent = NULL;
538 struct hist_entry *iter;
539
540 if (symbol_conf.use_callchain)
541 callchain_param.sort(&he->sorted_chain, he->callchain,
542 min_callchain_hits, &callchain_param);
543
544 while (*p != NULL) {
545 parent = *p;
546 iter = rb_entry(parent, struct hist_entry, rb_node);
547
548 if (he->stat.period > iter->stat.period)
549 p = &(*p)->rb_left;
550 else
551 p = &(*p)->rb_right;
552 }
553
554 rb_link_node(&he->rb_node, parent, p);
555 rb_insert_color(&he->rb_node, entries);
556 }
557
558 static void __hists__output_resort(struct hists *hists, bool threaded)
559 {
560 struct rb_root *root;
561 struct rb_node *next;
562 struct hist_entry *n;
563 u64 min_callchain_hits;
564
565 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
566
567 if (sort__need_collapse || threaded)
568 root = &hists->entries_collapsed;
569 else
570 root = hists->entries_in;
571
572 next = rb_first(root);
573 hists->entries = RB_ROOT;
574
575 hists->nr_entries = 0;
576 hists->stats.total_period = 0;
577 hists__reset_col_len(hists);
578
579 while (next) {
580 n = rb_entry(next, struct hist_entry, rb_node_in);
581 next = rb_next(&n->rb_node_in);
582
583 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
584 hists__inc_nr_entries(hists, n);
585 }
586 }
587
588 void hists__output_resort(struct hists *hists)
589 {
590 return __hists__output_resort(hists, false);
591 }
592
593 void hists__output_resort_threaded(struct hists *hists)
594 {
595 return __hists__output_resort(hists, true);
596 }
597
598 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
599 enum hist_filter filter)
600 {
601 h->filtered &= ~(1 << filter);
602 if (h->filtered)
603 return;
604
605 ++hists->nr_entries;
606 if (h->ms.unfolded)
607 hists->nr_entries += h->nr_rows;
608 h->row_offset = 0;
609 hists->stats.total_period += h->stat.period;
610 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
611
612 hists__calc_col_len(hists, h);
613 }
614
615
616 static bool hists__filter_entry_by_dso(struct hists *hists,
617 struct hist_entry *he)
618 {
619 if (hists->dso_filter != NULL &&
620 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
621 he->filtered |= (1 << HIST_FILTER__DSO);
622 return true;
623 }
624
625 return false;
626 }
627
628 void hists__filter_by_dso(struct hists *hists)
629 {
630 struct rb_node *nd;
631
632 hists->nr_entries = hists->stats.total_period = 0;
633 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
634 hists__reset_col_len(hists);
635
636 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
637 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
638
639 if (symbol_conf.exclude_other && !h->parent)
640 continue;
641
642 if (hists__filter_entry_by_dso(hists, h))
643 continue;
644
645 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
646 }
647 }
648
649 static bool hists__filter_entry_by_thread(struct hists *hists,
650 struct hist_entry *he)
651 {
652 if (hists->thread_filter != NULL &&
653 he->thread != hists->thread_filter) {
654 he->filtered |= (1 << HIST_FILTER__THREAD);
655 return true;
656 }
657
658 return false;
659 }
660
661 void hists__filter_by_thread(struct hists *hists)
662 {
663 struct rb_node *nd;
664
665 hists->nr_entries = hists->stats.total_period = 0;
666 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
667 hists__reset_col_len(hists);
668
669 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
670 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
671
672 if (hists__filter_entry_by_thread(hists, h))
673 continue;
674
675 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
676 }
677 }
678
679 static bool hists__filter_entry_by_symbol(struct hists *hists,
680 struct hist_entry *he)
681 {
682 if (hists->symbol_filter_str != NULL &&
683 (!he->ms.sym || strstr(he->ms.sym->name,
684 hists->symbol_filter_str) == NULL)) {
685 he->filtered |= (1 << HIST_FILTER__SYMBOL);
686 return true;
687 }
688
689 return false;
690 }
691
692 void hists__filter_by_symbol(struct hists *hists)
693 {
694 struct rb_node *nd;
695
696 hists->nr_entries = hists->stats.total_period = 0;
697 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
698 hists__reset_col_len(hists);
699
700 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
701 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
702
703 if (hists__filter_entry_by_symbol(hists, h))
704 continue;
705
706 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
707 }
708 }
709
710 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
711 {
712 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
713 }
714
715 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
716 {
717 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
718 }
719
720 void events_stats__inc(struct events_stats *stats, u32 type)
721 {
722 ++stats->nr_events[0];
723 ++stats->nr_events[type];
724 }
725
726 void hists__inc_nr_events(struct hists *hists, u32 type)
727 {
728 events_stats__inc(&hists->stats, type);
729 }
730
731 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
732 struct hist_entry *pair)
733 {
734 struct rb_root *root;
735 struct rb_node **p;
736 struct rb_node *parent = NULL;
737 struct hist_entry *he;
738 int cmp;
739
740 if (sort__need_collapse)
741 root = &hists->entries_collapsed;
742 else
743 root = hists->entries_in;
744
745 p = &root->rb_node;
746
747 while (*p != NULL) {
748 parent = *p;
749 he = rb_entry(parent, struct hist_entry, rb_node_in);
750
751 cmp = hist_entry__collapse(he, pair);
752
753 if (!cmp)
754 goto out;
755
756 if (cmp < 0)
757 p = &(*p)->rb_left;
758 else
759 p = &(*p)->rb_right;
760 }
761
762 he = hist_entry__new(pair);
763 if (he) {
764 memset(&he->stat, 0, sizeof(he->stat));
765 he->hists = hists;
766 rb_link_node(&he->rb_node_in, parent, p);
767 rb_insert_color(&he->rb_node_in, root);
768 hists__inc_nr_entries(hists, he);
769 }
770 out:
771 return he;
772 }
773
774 static struct hist_entry *hists__find_entry(struct hists *hists,
775 struct hist_entry *he)
776 {
777 struct rb_node *n;
778
779 if (sort__need_collapse)
780 n = hists->entries_collapsed.rb_node;
781 else
782 n = hists->entries_in->rb_node;
783
784 while (n) {
785 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
786 int64_t cmp = hist_entry__collapse(iter, he);
787
788 if (cmp < 0)
789 n = n->rb_left;
790 else if (cmp > 0)
791 n = n->rb_right;
792 else
793 return iter;
794 }
795
796 return NULL;
797 }
798
799 /*
800 * Look for pairs to link to the leader buckets (hist_entries):
801 */
802 void hists__match(struct hists *leader, struct hists *other)
803 {
804 struct rb_root *root;
805 struct rb_node *nd;
806 struct hist_entry *pos, *pair;
807
808 if (sort__need_collapse)
809 root = &leader->entries_collapsed;
810 else
811 root = leader->entries_in;
812
813 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
814 pos = rb_entry(nd, struct hist_entry, rb_node_in);
815 pair = hists__find_entry(other, pos);
816
817 if (pair)
818 hist_entry__add_pair(pair, pos);
819 }
820 }
821
822 /*
823 * Look for entries in the other hists that are not present in the leader, if
824 * we find them, just add a dummy entry on the leader hists, with period=0,
825 * nr_events=0, to serve as the list header.
826 */
827 int hists__link(struct hists *leader, struct hists *other)
828 {
829 struct rb_root *root;
830 struct rb_node *nd;
831 struct hist_entry *pos, *pair;
832
833 if (sort__need_collapse)
834 root = &other->entries_collapsed;
835 else
836 root = other->entries_in;
837
838 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
839 pos = rb_entry(nd, struct hist_entry, rb_node_in);
840
841 if (!hist_entry__has_pairs(pos)) {
842 pair = hists__add_dummy_entry(leader, pos);
843 if (pair == NULL)
844 return -1;
845 hist_entry__add_pair(pos, pair);
846 }
847 }
848
849 return 0;
850 }
This page took 0.111641 seconds and 5 git commands to generate.