Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penber...
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
15
16 enum hist_filter {
17 HIST_FILTER__DSO,
18 HIST_FILTER__THREAD,
19 HIST_FILTER__PARENT,
20 HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
25 .min_percent = 0.5,
26 .order = ORDER_CALLEE
27 };
28
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
30 {
31 return hists->col_len[col];
32 }
33
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36 hists->col_len[col] = len;
37 }
38
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
43 return true;
44 }
45 return false;
46 }
47
48 void hists__reset_col_len(struct hists *hists)
49 {
50 enum hist_column col;
51
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
54 }
55
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 u16 len;
70
71 if (h->ms.sym)
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73 else
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
79
80 if (h->ms.map) {
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
83 }
84
85 if (h->branch_info) {
86 int symlen;
87 /*
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
90 */
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97 } else {
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101 }
102
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109 } else {
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113 }
114 }
115 }
116
117 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
118 {
119 struct rb_node *next = rb_first(&hists->entries);
120 struct hist_entry *n;
121 int row = 0;
122
123 hists__reset_col_len(hists);
124
125 while (next && row++ < max_rows) {
126 n = rb_entry(next, struct hist_entry, rb_node);
127 if (!n->filtered)
128 hists__calc_col_len(hists, n);
129 next = rb_next(&n->rb_node);
130 }
131 }
132
133 static void hist_entry__add_cpumode_period(struct hist_entry *he,
134 unsigned int cpumode, u64 period)
135 {
136 switch (cpumode) {
137 case PERF_RECORD_MISC_KERNEL:
138 he->stat.period_sys += period;
139 break;
140 case PERF_RECORD_MISC_USER:
141 he->stat.period_us += period;
142 break;
143 case PERF_RECORD_MISC_GUEST_KERNEL:
144 he->stat.period_guest_sys += period;
145 break;
146 case PERF_RECORD_MISC_GUEST_USER:
147 he->stat.period_guest_us += period;
148 break;
149 default:
150 break;
151 }
152 }
153
154 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
155 {
156 he_stat->period += period;
157 he_stat->nr_events += 1;
158 }
159
160 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
161 {
162 dest->period += src->period;
163 dest->period_sys += src->period_sys;
164 dest->period_us += src->period_us;
165 dest->period_guest_sys += src->period_guest_sys;
166 dest->period_guest_us += src->period_guest_us;
167 dest->nr_events += src->nr_events;
168 }
169
170 static void hist_entry__decay(struct hist_entry *he)
171 {
172 he->stat.period = (he->stat.period * 7) / 8;
173 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
174 }
175
176 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
177 {
178 u64 prev_period = he->stat.period;
179
180 if (prev_period == 0)
181 return true;
182
183 hist_entry__decay(he);
184
185 if (!he->filtered)
186 hists->stats.total_period -= prev_period - he->stat.period;
187
188 return he->stat.period == 0;
189 }
190
191 static void __hists__decay_entries(struct hists *hists, bool zap_user,
192 bool zap_kernel, bool threaded)
193 {
194 struct rb_node *next = rb_first(&hists->entries);
195 struct hist_entry *n;
196
197 while (next) {
198 n = rb_entry(next, struct hist_entry, rb_node);
199 next = rb_next(&n->rb_node);
200 /*
201 * We may be annotating this, for instance, so keep it here in
202 * case some it gets new samples, we'll eventually free it when
203 * the user stops browsing and it agains gets fully decayed.
204 */
205 if (((zap_user && n->level == '.') ||
206 (zap_kernel && n->level != '.') ||
207 hists__decay_entry(hists, n)) &&
208 !n->used) {
209 rb_erase(&n->rb_node, &hists->entries);
210
211 if (sort__need_collapse || threaded)
212 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
213
214 hist_entry__free(n);
215 --hists->nr_entries;
216 }
217 }
218 }
219
220 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
221 {
222 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
223 }
224
225 void hists__decay_entries_threaded(struct hists *hists,
226 bool zap_user, bool zap_kernel)
227 {
228 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
229 }
230
231 /*
232 * histogram, sorted on item, collects periods
233 */
234
235 static struct hist_entry *hist_entry__new(struct hist_entry *template)
236 {
237 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
238 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
239
240 if (he != NULL) {
241 *he = *template;
242
243 if (he->ms.map)
244 he->ms.map->referenced = true;
245 if (symbol_conf.use_callchain)
246 callchain_init(he->callchain);
247
248 INIT_LIST_HEAD(&he->pairs.node);
249 }
250
251 return he;
252 }
253
254 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
255 {
256 if (!h->filtered) {
257 hists__calc_col_len(hists, h);
258 ++hists->nr_entries;
259 hists->stats.total_period += h->stat.period;
260 }
261 }
262
263 static u8 symbol__parent_filter(const struct symbol *parent)
264 {
265 if (symbol_conf.exclude_other && parent == NULL)
266 return 1 << HIST_FILTER__PARENT;
267 return 0;
268 }
269
270 static struct hist_entry *add_hist_entry(struct hists *hists,
271 struct hist_entry *entry,
272 struct addr_location *al,
273 u64 period)
274 {
275 struct rb_node **p;
276 struct rb_node *parent = NULL;
277 struct hist_entry *he;
278 int cmp;
279
280 pthread_mutex_lock(&hists->lock);
281
282 p = &hists->entries_in->rb_node;
283
284 while (*p != NULL) {
285 parent = *p;
286 he = rb_entry(parent, struct hist_entry, rb_node_in);
287
288 cmp = hist_entry__cmp(entry, he);
289
290 if (!cmp) {
291 he_stat__add_period(&he->stat, period);
292
293 /* If the map of an existing hist_entry has
294 * become out-of-date due to an exec() or
295 * similar, update it. Otherwise we will
296 * mis-adjust symbol addresses when computing
297 * the history counter to increment.
298 */
299 if (he->ms.map != entry->ms.map) {
300 he->ms.map = entry->ms.map;
301 if (he->ms.map)
302 he->ms.map->referenced = true;
303 }
304 goto out;
305 }
306
307 if (cmp < 0)
308 p = &(*p)->rb_left;
309 else
310 p = &(*p)->rb_right;
311 }
312
313 he = hist_entry__new(entry);
314 if (!he)
315 goto out_unlock;
316
317 rb_link_node(&he->rb_node_in, parent, p);
318 rb_insert_color(&he->rb_node_in, hists->entries_in);
319 out:
320 hist_entry__add_cpumode_period(he, al->cpumode, period);
321 out_unlock:
322 pthread_mutex_unlock(&hists->lock);
323 return he;
324 }
325
326 struct hist_entry *__hists__add_branch_entry(struct hists *self,
327 struct addr_location *al,
328 struct symbol *sym_parent,
329 struct branch_info *bi,
330 u64 period)
331 {
332 struct hist_entry entry = {
333 .thread = al->thread,
334 .ms = {
335 .map = bi->to.map,
336 .sym = bi->to.sym,
337 },
338 .cpu = al->cpu,
339 .ip = bi->to.addr,
340 .level = al->level,
341 .stat = {
342 .period = period,
343 .nr_events = 1,
344 },
345 .parent = sym_parent,
346 .filtered = symbol__parent_filter(sym_parent),
347 .branch_info = bi,
348 .hists = self,
349 };
350
351 return add_hist_entry(self, &entry, al, period);
352 }
353
354 struct hist_entry *__hists__add_entry(struct hists *self,
355 struct addr_location *al,
356 struct symbol *sym_parent, u64 period)
357 {
358 struct hist_entry entry = {
359 .thread = al->thread,
360 .ms = {
361 .map = al->map,
362 .sym = al->sym,
363 },
364 .cpu = al->cpu,
365 .ip = al->addr,
366 .level = al->level,
367 .stat = {
368 .period = period,
369 .nr_events = 1,
370 },
371 .parent = sym_parent,
372 .filtered = symbol__parent_filter(sym_parent),
373 .hists = self,
374 };
375
376 return add_hist_entry(self, &entry, al, period);
377 }
378
379 int64_t
380 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
381 {
382 struct sort_entry *se;
383 int64_t cmp = 0;
384
385 list_for_each_entry(se, &hist_entry__sort_list, list) {
386 cmp = se->se_cmp(left, right);
387 if (cmp)
388 break;
389 }
390
391 return cmp;
392 }
393
394 int64_t
395 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
396 {
397 struct sort_entry *se;
398 int64_t cmp = 0;
399
400 list_for_each_entry(se, &hist_entry__sort_list, list) {
401 int64_t (*f)(struct hist_entry *, struct hist_entry *);
402
403 f = se->se_collapse ?: se->se_cmp;
404
405 cmp = f(left, right);
406 if (cmp)
407 break;
408 }
409
410 return cmp;
411 }
412
413 void hist_entry__free(struct hist_entry *he)
414 {
415 free(he->branch_info);
416 free(he);
417 }
418
419 /*
420 * collapse the histogram
421 */
422
423 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
424 struct rb_root *root,
425 struct hist_entry *he)
426 {
427 struct rb_node **p = &root->rb_node;
428 struct rb_node *parent = NULL;
429 struct hist_entry *iter;
430 int64_t cmp;
431
432 while (*p != NULL) {
433 parent = *p;
434 iter = rb_entry(parent, struct hist_entry, rb_node_in);
435
436 cmp = hist_entry__collapse(iter, he);
437
438 if (!cmp) {
439 he_stat__add_stat(&iter->stat, &he->stat);
440
441 if (symbol_conf.use_callchain) {
442 callchain_cursor_reset(&callchain_cursor);
443 callchain_merge(&callchain_cursor,
444 iter->callchain,
445 he->callchain);
446 }
447 hist_entry__free(he);
448 return false;
449 }
450
451 if (cmp < 0)
452 p = &(*p)->rb_left;
453 else
454 p = &(*p)->rb_right;
455 }
456
457 rb_link_node(&he->rb_node_in, parent, p);
458 rb_insert_color(&he->rb_node_in, root);
459 return true;
460 }
461
462 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
463 {
464 struct rb_root *root;
465
466 pthread_mutex_lock(&hists->lock);
467
468 root = hists->entries_in;
469 if (++hists->entries_in > &hists->entries_in_array[1])
470 hists->entries_in = &hists->entries_in_array[0];
471
472 pthread_mutex_unlock(&hists->lock);
473
474 return root;
475 }
476
477 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
478 {
479 hists__filter_entry_by_dso(hists, he);
480 hists__filter_entry_by_thread(hists, he);
481 hists__filter_entry_by_symbol(hists, he);
482 }
483
484 static void __hists__collapse_resort(struct hists *hists, bool threaded)
485 {
486 struct rb_root *root;
487 struct rb_node *next;
488 struct hist_entry *n;
489
490 if (!sort__need_collapse && !threaded)
491 return;
492
493 root = hists__get_rotate_entries_in(hists);
494 next = rb_first(root);
495
496 while (next) {
497 n = rb_entry(next, struct hist_entry, rb_node_in);
498 next = rb_next(&n->rb_node_in);
499
500 rb_erase(&n->rb_node_in, root);
501 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
502 /*
503 * If it wasn't combined with one of the entries already
504 * collapsed, we need to apply the filters that may have
505 * been set by, say, the hist_browser.
506 */
507 hists__apply_filters(hists, n);
508 }
509 }
510 }
511
512 void hists__collapse_resort(struct hists *hists)
513 {
514 return __hists__collapse_resort(hists, false);
515 }
516
517 void hists__collapse_resort_threaded(struct hists *hists)
518 {
519 return __hists__collapse_resort(hists, true);
520 }
521
522 /*
523 * reverse the map, sort on period.
524 */
525
526 static void __hists__insert_output_entry(struct rb_root *entries,
527 struct hist_entry *he,
528 u64 min_callchain_hits)
529 {
530 struct rb_node **p = &entries->rb_node;
531 struct rb_node *parent = NULL;
532 struct hist_entry *iter;
533
534 if (symbol_conf.use_callchain)
535 callchain_param.sort(&he->sorted_chain, he->callchain,
536 min_callchain_hits, &callchain_param);
537
538 while (*p != NULL) {
539 parent = *p;
540 iter = rb_entry(parent, struct hist_entry, rb_node);
541
542 if (he->stat.period > iter->stat.period)
543 p = &(*p)->rb_left;
544 else
545 p = &(*p)->rb_right;
546 }
547
548 rb_link_node(&he->rb_node, parent, p);
549 rb_insert_color(&he->rb_node, entries);
550 }
551
552 static void __hists__output_resort(struct hists *hists, bool threaded)
553 {
554 struct rb_root *root;
555 struct rb_node *next;
556 struct hist_entry *n;
557 u64 min_callchain_hits;
558
559 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
560
561 if (sort__need_collapse || threaded)
562 root = &hists->entries_collapsed;
563 else
564 root = hists->entries_in;
565
566 next = rb_first(root);
567 hists->entries = RB_ROOT;
568
569 hists->nr_entries = 0;
570 hists->stats.total_period = 0;
571 hists__reset_col_len(hists);
572
573 while (next) {
574 n = rb_entry(next, struct hist_entry, rb_node_in);
575 next = rb_next(&n->rb_node_in);
576
577 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
578 hists__inc_nr_entries(hists, n);
579 }
580 }
581
582 void hists__output_resort(struct hists *hists)
583 {
584 return __hists__output_resort(hists, false);
585 }
586
587 void hists__output_resort_threaded(struct hists *hists)
588 {
589 return __hists__output_resort(hists, true);
590 }
591
592 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
593 enum hist_filter filter)
594 {
595 h->filtered &= ~(1 << filter);
596 if (h->filtered)
597 return;
598
599 ++hists->nr_entries;
600 if (h->ms.unfolded)
601 hists->nr_entries += h->nr_rows;
602 h->row_offset = 0;
603 hists->stats.total_period += h->stat.period;
604 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
605
606 hists__calc_col_len(hists, h);
607 }
608
609
610 static bool hists__filter_entry_by_dso(struct hists *hists,
611 struct hist_entry *he)
612 {
613 if (hists->dso_filter != NULL &&
614 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
615 he->filtered |= (1 << HIST_FILTER__DSO);
616 return true;
617 }
618
619 return false;
620 }
621
622 void hists__filter_by_dso(struct hists *hists)
623 {
624 struct rb_node *nd;
625
626 hists->nr_entries = hists->stats.total_period = 0;
627 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
628 hists__reset_col_len(hists);
629
630 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
631 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
632
633 if (symbol_conf.exclude_other && !h->parent)
634 continue;
635
636 if (hists__filter_entry_by_dso(hists, h))
637 continue;
638
639 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
640 }
641 }
642
643 static bool hists__filter_entry_by_thread(struct hists *hists,
644 struct hist_entry *he)
645 {
646 if (hists->thread_filter != NULL &&
647 he->thread != hists->thread_filter) {
648 he->filtered |= (1 << HIST_FILTER__THREAD);
649 return true;
650 }
651
652 return false;
653 }
654
655 void hists__filter_by_thread(struct hists *hists)
656 {
657 struct rb_node *nd;
658
659 hists->nr_entries = hists->stats.total_period = 0;
660 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
661 hists__reset_col_len(hists);
662
663 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
664 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
665
666 if (hists__filter_entry_by_thread(hists, h))
667 continue;
668
669 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
670 }
671 }
672
673 static bool hists__filter_entry_by_symbol(struct hists *hists,
674 struct hist_entry *he)
675 {
676 if (hists->symbol_filter_str != NULL &&
677 (!he->ms.sym || strstr(he->ms.sym->name,
678 hists->symbol_filter_str) == NULL)) {
679 he->filtered |= (1 << HIST_FILTER__SYMBOL);
680 return true;
681 }
682
683 return false;
684 }
685
686 void hists__filter_by_symbol(struct hists *hists)
687 {
688 struct rb_node *nd;
689
690 hists->nr_entries = hists->stats.total_period = 0;
691 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
692 hists__reset_col_len(hists);
693
694 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
695 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
696
697 if (hists__filter_entry_by_symbol(hists, h))
698 continue;
699
700 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
701 }
702 }
703
704 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
705 {
706 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
707 }
708
709 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
710 {
711 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
712 }
713
714 void hists__inc_nr_events(struct hists *hists, u32 type)
715 {
716 ++hists->stats.nr_events[0];
717 ++hists->stats.nr_events[type];
718 }
719
720 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
721 struct hist_entry *pair)
722 {
723 struct rb_node **p = &hists->entries.rb_node;
724 struct rb_node *parent = NULL;
725 struct hist_entry *he;
726 int cmp;
727
728 while (*p != NULL) {
729 parent = *p;
730 he = rb_entry(parent, struct hist_entry, rb_node);
731
732 cmp = hist_entry__cmp(pair, he);
733
734 if (!cmp)
735 goto out;
736
737 if (cmp < 0)
738 p = &(*p)->rb_left;
739 else
740 p = &(*p)->rb_right;
741 }
742
743 he = hist_entry__new(pair);
744 if (he) {
745 memset(&he->stat, 0, sizeof(he->stat));
746 he->hists = hists;
747 rb_link_node(&he->rb_node, parent, p);
748 rb_insert_color(&he->rb_node, &hists->entries);
749 hists__inc_nr_entries(hists, he);
750 }
751 out:
752 return he;
753 }
754
755 static struct hist_entry *hists__find_entry(struct hists *hists,
756 struct hist_entry *he)
757 {
758 struct rb_node *n = hists->entries.rb_node;
759
760 while (n) {
761 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
762 int64_t cmp = hist_entry__cmp(he, iter);
763
764 if (cmp < 0)
765 n = n->rb_left;
766 else if (cmp > 0)
767 n = n->rb_right;
768 else
769 return iter;
770 }
771
772 return NULL;
773 }
774
775 /*
776 * Look for pairs to link to the leader buckets (hist_entries):
777 */
778 void hists__match(struct hists *leader, struct hists *other)
779 {
780 struct rb_node *nd;
781 struct hist_entry *pos, *pair;
782
783 for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) {
784 pos = rb_entry(nd, struct hist_entry, rb_node);
785 pair = hists__find_entry(other, pos);
786
787 if (pair)
788 hist__entry_add_pair(pos, pair);
789 }
790 }
791
792 /*
793 * Look for entries in the other hists that are not present in the leader, if
794 * we find them, just add a dummy entry on the leader hists, with period=0,
795 * nr_events=0, to serve as the list header.
796 */
797 int hists__link(struct hists *leader, struct hists *other)
798 {
799 struct rb_node *nd;
800 struct hist_entry *pos, *pair;
801
802 for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) {
803 pos = rb_entry(nd, struct hist_entry, rb_node);
804
805 if (!hist_entry__has_pairs(pos)) {
806 pair = hists__add_dummy_entry(leader, pos);
807 if (pair == NULL)
808 return -1;
809 hist__entry_add_pair(pair, pos);
810 }
811 }
812
813 return 0;
814 }
This page took 0.049521 seconds and 6 git commands to generate.