perf test: Consider PERF_SAMPLE_TRANSACTION in the "sample parsing" test
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
16
17 enum hist_filter {
18 HIST_FILTER__DSO,
19 HIST_FILTER__THREAD,
20 HIST_FILTER__PARENT,
21 HIST_FILTER__SYMBOL,
22 };
23
24 struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
26 .min_percent = 0.5,
27 .order = ORDER_CALLEE,
28 .key = CCKEY_FUNCTION
29 };
30
31 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 {
33 return hists->col_len[col];
34 }
35
36 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 {
38 hists->col_len[col] = len;
39 }
40
41 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 {
43 if (len > hists__col_len(hists, col)) {
44 hists__set_col_len(hists, col, len);
45 return true;
46 }
47 return false;
48 }
49
50 void hists__reset_col_len(struct hists *hists)
51 {
52 enum hist_column col;
53
54 for (col = 0; col < HISTC_NR_COLS; ++col)
55 hists__set_col_len(hists, col, 0);
56 }
57
58 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 {
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61
62 if (hists__col_len(hists, dso) < unresolved_col_width &&
63 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64 !symbol_conf.dso_list)
65 hists__set_col_len(hists, dso, unresolved_col_width);
66 }
67
68 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 {
70 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
71 int symlen;
72 u16 len;
73
74 /*
75 * +4 accounts for '[x] ' priv level info
76 * +2 accounts for 0x prefix on raw addresses
77 * +3 accounts for ' y ' symtab origin info
78 */
79 if (h->ms.sym) {
80 symlen = h->ms.sym->namelen + 4;
81 if (verbose)
82 symlen += BITS_PER_LONG / 4 + 2 + 3;
83 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84 } else {
85 symlen = unresolved_col_width + 4 + 2;
86 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
87 hists__set_unres_dso_col_len(hists, HISTC_DSO);
88 }
89
90 len = thread__comm_len(h->thread);
91 if (hists__new_col_len(hists, HISTC_COMM, len))
92 hists__set_col_len(hists, HISTC_THREAD, len + 6);
93
94 if (h->ms.map) {
95 len = dso__name_len(h->ms.map->dso);
96 hists__new_col_len(hists, HISTC_DSO, len);
97 }
98
99 if (h->parent)
100 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
101
102 if (h->branch_info) {
103 if (h->branch_info->from.sym) {
104 symlen = (int)h->branch_info->from.sym->namelen + 4;
105 if (verbose)
106 symlen += BITS_PER_LONG / 4 + 2 + 3;
107 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108
109 symlen = dso__name_len(h->branch_info->from.map->dso);
110 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
111 } else {
112 symlen = unresolved_col_width + 4 + 2;
113 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
114 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
115 }
116
117 if (h->branch_info->to.sym) {
118 symlen = (int)h->branch_info->to.sym->namelen + 4;
119 if (verbose)
120 symlen += BITS_PER_LONG / 4 + 2 + 3;
121 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122
123 symlen = dso__name_len(h->branch_info->to.map->dso);
124 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
125 } else {
126 symlen = unresolved_col_width + 4 + 2;
127 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
128 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
129 }
130 }
131
132 if (h->mem_info) {
133 if (h->mem_info->daddr.sym) {
134 symlen = (int)h->mem_info->daddr.sym->namelen + 4
135 + unresolved_col_width + 2;
136 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
137 symlen);
138 } else {
139 symlen = unresolved_col_width + 4 + 2;
140 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
141 symlen);
142 }
143 if (h->mem_info->daddr.map) {
144 symlen = dso__name_len(h->mem_info->daddr.map->dso);
145 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
146 symlen);
147 } else {
148 symlen = unresolved_col_width + 4 + 2;
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150 }
151 } else {
152 symlen = unresolved_col_width + 4 + 2;
153 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
154 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
155 }
156
157 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
158 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
159 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
160 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
161 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
162 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
163
164 if (h->transaction)
165 hists__new_col_len(hists, HISTC_TRANSACTION,
166 hist_entry__transaction_len());
167 }
168
169 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
170 {
171 struct rb_node *next = rb_first(&hists->entries);
172 struct hist_entry *n;
173 int row = 0;
174
175 hists__reset_col_len(hists);
176
177 while (next && row++ < max_rows) {
178 n = rb_entry(next, struct hist_entry, rb_node);
179 if (!n->filtered)
180 hists__calc_col_len(hists, n);
181 next = rb_next(&n->rb_node);
182 }
183 }
184
185 static void hist_entry__add_cpumode_period(struct hist_entry *he,
186 unsigned int cpumode, u64 period)
187 {
188 switch (cpumode) {
189 case PERF_RECORD_MISC_KERNEL:
190 he->stat.period_sys += period;
191 break;
192 case PERF_RECORD_MISC_USER:
193 he->stat.period_us += period;
194 break;
195 case PERF_RECORD_MISC_GUEST_KERNEL:
196 he->stat.period_guest_sys += period;
197 break;
198 case PERF_RECORD_MISC_GUEST_USER:
199 he->stat.period_guest_us += period;
200 break;
201 default:
202 break;
203 }
204 }
205
206 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
207 u64 weight)
208 {
209
210 he_stat->period += period;
211 he_stat->weight += weight;
212 he_stat->nr_events += 1;
213 }
214
215 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
216 {
217 dest->period += src->period;
218 dest->period_sys += src->period_sys;
219 dest->period_us += src->period_us;
220 dest->period_guest_sys += src->period_guest_sys;
221 dest->period_guest_us += src->period_guest_us;
222 dest->nr_events += src->nr_events;
223 dest->weight += src->weight;
224 }
225
226 static void hist_entry__decay(struct hist_entry *he)
227 {
228 he->stat.period = (he->stat.period * 7) / 8;
229 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
230 /* XXX need decay for weight too? */
231 }
232
233 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
234 {
235 u64 prev_period = he->stat.period;
236
237 if (prev_period == 0)
238 return true;
239
240 hist_entry__decay(he);
241
242 if (!he->filtered)
243 hists->stats.total_period -= prev_period - he->stat.period;
244
245 return he->stat.period == 0;
246 }
247
248 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
249 {
250 struct rb_node *next = rb_first(&hists->entries);
251 struct hist_entry *n;
252
253 while (next) {
254 n = rb_entry(next, struct hist_entry, rb_node);
255 next = rb_next(&n->rb_node);
256 /*
257 * We may be annotating this, for instance, so keep it here in
258 * case some it gets new samples, we'll eventually free it when
259 * the user stops browsing and it agains gets fully decayed.
260 */
261 if (((zap_user && n->level == '.') ||
262 (zap_kernel && n->level != '.') ||
263 hists__decay_entry(hists, n)) &&
264 !n->used) {
265 rb_erase(&n->rb_node, &hists->entries);
266
267 if (sort__need_collapse)
268 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
269
270 hist_entry__free(n);
271 --hists->nr_entries;
272 }
273 }
274 }
275
276 /*
277 * histogram, sorted on item, collects periods
278 */
279
280 static struct hist_entry *hist_entry__new(struct hist_entry *template)
281 {
282 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
283 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
284
285 if (he != NULL) {
286 *he = *template;
287
288 if (he->ms.map)
289 he->ms.map->referenced = true;
290
291 if (he->branch_info) {
292 /*
293 * This branch info is (a part of) allocated from
294 * machine__resolve_bstack() and will be freed after
295 * adding new entries. So we need to save a copy.
296 */
297 he->branch_info = malloc(sizeof(*he->branch_info));
298 if (he->branch_info == NULL) {
299 free(he);
300 return NULL;
301 }
302
303 memcpy(he->branch_info, template->branch_info,
304 sizeof(*he->branch_info));
305
306 if (he->branch_info->from.map)
307 he->branch_info->from.map->referenced = true;
308 if (he->branch_info->to.map)
309 he->branch_info->to.map->referenced = true;
310 }
311
312 if (he->mem_info) {
313 if (he->mem_info->iaddr.map)
314 he->mem_info->iaddr.map->referenced = true;
315 if (he->mem_info->daddr.map)
316 he->mem_info->daddr.map->referenced = true;
317 }
318
319 if (symbol_conf.use_callchain)
320 callchain_init(he->callchain);
321
322 INIT_LIST_HEAD(&he->pairs.node);
323 }
324
325 return he;
326 }
327
328 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
329 {
330 if (!h->filtered) {
331 hists__calc_col_len(hists, h);
332 ++hists->nr_entries;
333 hists->stats.total_period += h->stat.period;
334 }
335 }
336
337 static u8 symbol__parent_filter(const struct symbol *parent)
338 {
339 if (symbol_conf.exclude_other && parent == NULL)
340 return 1 << HIST_FILTER__PARENT;
341 return 0;
342 }
343
344 static struct hist_entry *add_hist_entry(struct hists *hists,
345 struct hist_entry *entry,
346 struct addr_location *al,
347 u64 period,
348 u64 weight)
349 {
350 struct rb_node **p;
351 struct rb_node *parent = NULL;
352 struct hist_entry *he;
353 int64_t cmp;
354
355 p = &hists->entries_in->rb_node;
356
357 while (*p != NULL) {
358 parent = *p;
359 he = rb_entry(parent, struct hist_entry, rb_node_in);
360
361 /*
362 * Make sure that it receives arguments in a same order as
363 * hist_entry__collapse() so that we can use an appropriate
364 * function when searching an entry regardless which sort
365 * keys were used.
366 */
367 cmp = hist_entry__cmp(he, entry);
368
369 if (!cmp) {
370 he_stat__add_period(&he->stat, period, weight);
371
372 /*
373 * This mem info was allocated from machine__resolve_mem
374 * and will not be used anymore.
375 */
376 free(entry->mem_info);
377
378 /* If the map of an existing hist_entry has
379 * become out-of-date due to an exec() or
380 * similar, update it. Otherwise we will
381 * mis-adjust symbol addresses when computing
382 * the history counter to increment.
383 */
384 if (he->ms.map != entry->ms.map) {
385 he->ms.map = entry->ms.map;
386 if (he->ms.map)
387 he->ms.map->referenced = true;
388 }
389 goto out;
390 }
391
392 if (cmp < 0)
393 p = &(*p)->rb_left;
394 else
395 p = &(*p)->rb_right;
396 }
397
398 he = hist_entry__new(entry);
399 if (!he)
400 return NULL;
401
402 rb_link_node(&he->rb_node_in, parent, p);
403 rb_insert_color(&he->rb_node_in, hists->entries_in);
404 out:
405 hist_entry__add_cpumode_period(he, al->cpumode, period);
406 return he;
407 }
408
409 struct hist_entry *__hists__add_mem_entry(struct hists *self,
410 struct addr_location *al,
411 struct symbol *sym_parent,
412 struct mem_info *mi,
413 u64 period,
414 u64 weight)
415 {
416 struct hist_entry entry = {
417 .thread = al->thread,
418 .ms = {
419 .map = al->map,
420 .sym = al->sym,
421 },
422 .stat = {
423 .period = period,
424 .weight = weight,
425 .nr_events = 1,
426 },
427 .cpu = al->cpu,
428 .ip = al->addr,
429 .level = al->level,
430 .parent = sym_parent,
431 .filtered = symbol__parent_filter(sym_parent),
432 .hists = self,
433 .mem_info = mi,
434 .branch_info = NULL,
435 };
436 return add_hist_entry(self, &entry, al, period, weight);
437 }
438
439 struct hist_entry *__hists__add_branch_entry(struct hists *self,
440 struct addr_location *al,
441 struct symbol *sym_parent,
442 struct branch_info *bi,
443 u64 period,
444 u64 weight)
445 {
446 struct hist_entry entry = {
447 .thread = al->thread,
448 .ms = {
449 .map = bi->to.map,
450 .sym = bi->to.sym,
451 },
452 .cpu = al->cpu,
453 .ip = bi->to.addr,
454 .level = al->level,
455 .stat = {
456 .period = period,
457 .nr_events = 1,
458 .weight = weight,
459 },
460 .parent = sym_parent,
461 .filtered = symbol__parent_filter(sym_parent),
462 .branch_info = bi,
463 .hists = self,
464 .mem_info = NULL,
465 };
466
467 return add_hist_entry(self, &entry, al, period, weight);
468 }
469
470 struct hist_entry *__hists__add_entry(struct hists *self,
471 struct addr_location *al,
472 struct symbol *sym_parent, u64 period,
473 u64 weight, u64 transaction)
474 {
475 struct hist_entry entry = {
476 .thread = al->thread,
477 .ms = {
478 .map = al->map,
479 .sym = al->sym,
480 },
481 .cpu = al->cpu,
482 .ip = al->addr,
483 .level = al->level,
484 .stat = {
485 .period = period,
486 .nr_events = 1,
487 .weight = weight,
488 },
489 .parent = sym_parent,
490 .filtered = symbol__parent_filter(sym_parent),
491 .hists = self,
492 .branch_info = NULL,
493 .mem_info = NULL,
494 .transaction = transaction,
495 };
496
497 return add_hist_entry(self, &entry, al, period, weight);
498 }
499
500 int64_t
501 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
502 {
503 struct sort_entry *se;
504 int64_t cmp = 0;
505
506 list_for_each_entry(se, &hist_entry__sort_list, list) {
507 cmp = se->se_cmp(left, right);
508 if (cmp)
509 break;
510 }
511
512 return cmp;
513 }
514
515 int64_t
516 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
517 {
518 struct sort_entry *se;
519 int64_t cmp = 0;
520
521 list_for_each_entry(se, &hist_entry__sort_list, list) {
522 int64_t (*f)(struct hist_entry *, struct hist_entry *);
523
524 f = se->se_collapse ?: se->se_cmp;
525
526 cmp = f(left, right);
527 if (cmp)
528 break;
529 }
530
531 return cmp;
532 }
533
534 void hist_entry__free(struct hist_entry *he)
535 {
536 free(he->branch_info);
537 free(he->mem_info);
538 free_srcline(he->srcline);
539 free(he);
540 }
541
542 /*
543 * collapse the histogram
544 */
545
546 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
547 struct rb_root *root,
548 struct hist_entry *he)
549 {
550 struct rb_node **p = &root->rb_node;
551 struct rb_node *parent = NULL;
552 struct hist_entry *iter;
553 int64_t cmp;
554
555 while (*p != NULL) {
556 parent = *p;
557 iter = rb_entry(parent, struct hist_entry, rb_node_in);
558
559 cmp = hist_entry__collapse(iter, he);
560
561 if (!cmp) {
562 he_stat__add_stat(&iter->stat, &he->stat);
563
564 if (symbol_conf.use_callchain) {
565 callchain_cursor_reset(&callchain_cursor);
566 callchain_merge(&callchain_cursor,
567 iter->callchain,
568 he->callchain);
569 }
570 hist_entry__free(he);
571 return false;
572 }
573
574 if (cmp < 0)
575 p = &(*p)->rb_left;
576 else
577 p = &(*p)->rb_right;
578 }
579
580 rb_link_node(&he->rb_node_in, parent, p);
581 rb_insert_color(&he->rb_node_in, root);
582 return true;
583 }
584
585 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
586 {
587 struct rb_root *root;
588
589 pthread_mutex_lock(&hists->lock);
590
591 root = hists->entries_in;
592 if (++hists->entries_in > &hists->entries_in_array[1])
593 hists->entries_in = &hists->entries_in_array[0];
594
595 pthread_mutex_unlock(&hists->lock);
596
597 return root;
598 }
599
600 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
601 {
602 hists__filter_entry_by_dso(hists, he);
603 hists__filter_entry_by_thread(hists, he);
604 hists__filter_entry_by_symbol(hists, he);
605 }
606
607 void hists__collapse_resort(struct hists *hists)
608 {
609 struct rb_root *root;
610 struct rb_node *next;
611 struct hist_entry *n;
612
613 if (!sort__need_collapse)
614 return;
615
616 root = hists__get_rotate_entries_in(hists);
617 next = rb_first(root);
618
619 while (next) {
620 if (session_done())
621 break;
622 n = rb_entry(next, struct hist_entry, rb_node_in);
623 next = rb_next(&n->rb_node_in);
624
625 rb_erase(&n->rb_node_in, root);
626 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
627 /*
628 * If it wasn't combined with one of the entries already
629 * collapsed, we need to apply the filters that may have
630 * been set by, say, the hist_browser.
631 */
632 hists__apply_filters(hists, n);
633 }
634 }
635 }
636
637 /*
638 * reverse the map, sort on period.
639 */
640
641 static int period_cmp(u64 period_a, u64 period_b)
642 {
643 if (period_a > period_b)
644 return 1;
645 if (period_a < period_b)
646 return -1;
647 return 0;
648 }
649
650 static int hist_entry__sort_on_period(struct hist_entry *a,
651 struct hist_entry *b)
652 {
653 int ret;
654 int i, nr_members;
655 struct perf_evsel *evsel;
656 struct hist_entry *pair;
657 u64 *periods_a, *periods_b;
658
659 ret = period_cmp(a->stat.period, b->stat.period);
660 if (ret || !symbol_conf.event_group)
661 return ret;
662
663 evsel = hists_to_evsel(a->hists);
664 nr_members = evsel->nr_members;
665 if (nr_members <= 1)
666 return ret;
667
668 periods_a = zalloc(sizeof(periods_a) * nr_members);
669 periods_b = zalloc(sizeof(periods_b) * nr_members);
670
671 if (!periods_a || !periods_b)
672 goto out;
673
674 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
675 evsel = hists_to_evsel(pair->hists);
676 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
677 }
678
679 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
680 evsel = hists_to_evsel(pair->hists);
681 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
682 }
683
684 for (i = 1; i < nr_members; i++) {
685 ret = period_cmp(periods_a[i], periods_b[i]);
686 if (ret)
687 break;
688 }
689
690 out:
691 free(periods_a);
692 free(periods_b);
693
694 return ret;
695 }
696
697 static void __hists__insert_output_entry(struct rb_root *entries,
698 struct hist_entry *he,
699 u64 min_callchain_hits)
700 {
701 struct rb_node **p = &entries->rb_node;
702 struct rb_node *parent = NULL;
703 struct hist_entry *iter;
704
705 if (symbol_conf.use_callchain)
706 callchain_param.sort(&he->sorted_chain, he->callchain,
707 min_callchain_hits, &callchain_param);
708
709 while (*p != NULL) {
710 parent = *p;
711 iter = rb_entry(parent, struct hist_entry, rb_node);
712
713 if (hist_entry__sort_on_period(he, iter) > 0)
714 p = &(*p)->rb_left;
715 else
716 p = &(*p)->rb_right;
717 }
718
719 rb_link_node(&he->rb_node, parent, p);
720 rb_insert_color(&he->rb_node, entries);
721 }
722
723 void hists__output_resort(struct hists *hists)
724 {
725 struct rb_root *root;
726 struct rb_node *next;
727 struct hist_entry *n;
728 u64 min_callchain_hits;
729
730 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
731
732 if (sort__need_collapse)
733 root = &hists->entries_collapsed;
734 else
735 root = hists->entries_in;
736
737 next = rb_first(root);
738 hists->entries = RB_ROOT;
739
740 hists->nr_entries = 0;
741 hists->stats.total_period = 0;
742 hists__reset_col_len(hists);
743
744 while (next) {
745 n = rb_entry(next, struct hist_entry, rb_node_in);
746 next = rb_next(&n->rb_node_in);
747
748 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
749 hists__inc_nr_entries(hists, n);
750 }
751 }
752
753 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
754 enum hist_filter filter)
755 {
756 h->filtered &= ~(1 << filter);
757 if (h->filtered)
758 return;
759
760 ++hists->nr_entries;
761 if (h->ms.unfolded)
762 hists->nr_entries += h->nr_rows;
763 h->row_offset = 0;
764 hists->stats.total_period += h->stat.period;
765 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
766
767 hists__calc_col_len(hists, h);
768 }
769
770
771 static bool hists__filter_entry_by_dso(struct hists *hists,
772 struct hist_entry *he)
773 {
774 if (hists->dso_filter != NULL &&
775 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
776 he->filtered |= (1 << HIST_FILTER__DSO);
777 return true;
778 }
779
780 return false;
781 }
782
783 void hists__filter_by_dso(struct hists *hists)
784 {
785 struct rb_node *nd;
786
787 hists->nr_entries = hists->stats.total_period = 0;
788 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
789 hists__reset_col_len(hists);
790
791 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
792 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
793
794 if (symbol_conf.exclude_other && !h->parent)
795 continue;
796
797 if (hists__filter_entry_by_dso(hists, h))
798 continue;
799
800 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
801 }
802 }
803
804 static bool hists__filter_entry_by_thread(struct hists *hists,
805 struct hist_entry *he)
806 {
807 if (hists->thread_filter != NULL &&
808 he->thread != hists->thread_filter) {
809 he->filtered |= (1 << HIST_FILTER__THREAD);
810 return true;
811 }
812
813 return false;
814 }
815
816 void hists__filter_by_thread(struct hists *hists)
817 {
818 struct rb_node *nd;
819
820 hists->nr_entries = hists->stats.total_period = 0;
821 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
822 hists__reset_col_len(hists);
823
824 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
825 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
826
827 if (hists__filter_entry_by_thread(hists, h))
828 continue;
829
830 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
831 }
832 }
833
834 static bool hists__filter_entry_by_symbol(struct hists *hists,
835 struct hist_entry *he)
836 {
837 if (hists->symbol_filter_str != NULL &&
838 (!he->ms.sym || strstr(he->ms.sym->name,
839 hists->symbol_filter_str) == NULL)) {
840 he->filtered |= (1 << HIST_FILTER__SYMBOL);
841 return true;
842 }
843
844 return false;
845 }
846
847 void hists__filter_by_symbol(struct hists *hists)
848 {
849 struct rb_node *nd;
850
851 hists->nr_entries = hists->stats.total_period = 0;
852 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
853 hists__reset_col_len(hists);
854
855 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
856 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
857
858 if (hists__filter_entry_by_symbol(hists, h))
859 continue;
860
861 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
862 }
863 }
864
865 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
866 {
867 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
868 }
869
870 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
871 {
872 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
873 }
874
875 void events_stats__inc(struct events_stats *stats, u32 type)
876 {
877 ++stats->nr_events[0];
878 ++stats->nr_events[type];
879 }
880
881 void hists__inc_nr_events(struct hists *hists, u32 type)
882 {
883 events_stats__inc(&hists->stats, type);
884 }
885
886 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
887 struct hist_entry *pair)
888 {
889 struct rb_root *root;
890 struct rb_node **p;
891 struct rb_node *parent = NULL;
892 struct hist_entry *he;
893 int64_t cmp;
894
895 if (sort__need_collapse)
896 root = &hists->entries_collapsed;
897 else
898 root = hists->entries_in;
899
900 p = &root->rb_node;
901
902 while (*p != NULL) {
903 parent = *p;
904 he = rb_entry(parent, struct hist_entry, rb_node_in);
905
906 cmp = hist_entry__collapse(he, pair);
907
908 if (!cmp)
909 goto out;
910
911 if (cmp < 0)
912 p = &(*p)->rb_left;
913 else
914 p = &(*p)->rb_right;
915 }
916
917 he = hist_entry__new(pair);
918 if (he) {
919 memset(&he->stat, 0, sizeof(he->stat));
920 he->hists = hists;
921 rb_link_node(&he->rb_node_in, parent, p);
922 rb_insert_color(&he->rb_node_in, root);
923 hists__inc_nr_entries(hists, he);
924 he->dummy = true;
925 }
926 out:
927 return he;
928 }
929
930 static struct hist_entry *hists__find_entry(struct hists *hists,
931 struct hist_entry *he)
932 {
933 struct rb_node *n;
934
935 if (sort__need_collapse)
936 n = hists->entries_collapsed.rb_node;
937 else
938 n = hists->entries_in->rb_node;
939
940 while (n) {
941 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
942 int64_t cmp = hist_entry__collapse(iter, he);
943
944 if (cmp < 0)
945 n = n->rb_left;
946 else if (cmp > 0)
947 n = n->rb_right;
948 else
949 return iter;
950 }
951
952 return NULL;
953 }
954
955 /*
956 * Look for pairs to link to the leader buckets (hist_entries):
957 */
958 void hists__match(struct hists *leader, struct hists *other)
959 {
960 struct rb_root *root;
961 struct rb_node *nd;
962 struct hist_entry *pos, *pair;
963
964 if (sort__need_collapse)
965 root = &leader->entries_collapsed;
966 else
967 root = leader->entries_in;
968
969 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
970 pos = rb_entry(nd, struct hist_entry, rb_node_in);
971 pair = hists__find_entry(other, pos);
972
973 if (pair)
974 hist_entry__add_pair(pair, pos);
975 }
976 }
977
978 /*
979 * Look for entries in the other hists that are not present in the leader, if
980 * we find them, just add a dummy entry on the leader hists, with period=0,
981 * nr_events=0, to serve as the list header.
982 */
983 int hists__link(struct hists *leader, struct hists *other)
984 {
985 struct rb_root *root;
986 struct rb_node *nd;
987 struct hist_entry *pos, *pair;
988
989 if (sort__need_collapse)
990 root = &other->entries_collapsed;
991 else
992 root = other->entries_in;
993
994 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
995 pos = rb_entry(nd, struct hist_entry, rb_node_in);
996
997 if (!hist_entry__has_pairs(pos)) {
998 pair = hists__add_dummy_entry(leader, pos);
999 if (pair == NULL)
1000 return -1;
1001 hist_entry__add_pair(pos, pair);
1002 }
1003 }
1004
1005 return 0;
1006 }
This page took 0.08362 seconds and 6 git commands to generate.