perf session: Remove last reference to hists struct
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include <math.h>
10
11 static bool hists__filter_entry_by_dso(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_thread(struct hists *hists,
14 struct hist_entry *he);
15 static bool hists__filter_entry_by_symbol(struct hists *hists,
16 struct hist_entry *he);
17
18 u16 hists__col_len(struct hists *hists, enum hist_column col)
19 {
20 return hists->col_len[col];
21 }
22
23 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
24 {
25 hists->col_len[col] = len;
26 }
27
28 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
29 {
30 if (len > hists__col_len(hists, col)) {
31 hists__set_col_len(hists, col, len);
32 return true;
33 }
34 return false;
35 }
36
37 void hists__reset_col_len(struct hists *hists)
38 {
39 enum hist_column col;
40
41 for (col = 0; col < HISTC_NR_COLS; ++col)
42 hists__set_col_len(hists, col, 0);
43 }
44
45 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
46 {
47 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
48
49 if (hists__col_len(hists, dso) < unresolved_col_width &&
50 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
51 !symbol_conf.dso_list)
52 hists__set_col_len(hists, dso, unresolved_col_width);
53 }
54
55 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
56 {
57 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
58 int symlen;
59 u16 len;
60
61 /*
62 * +4 accounts for '[x] ' priv level info
63 * +2 accounts for 0x prefix on raw addresses
64 * +3 accounts for ' y ' symtab origin info
65 */
66 if (h->ms.sym) {
67 symlen = h->ms.sym->namelen + 4;
68 if (verbose)
69 symlen += BITS_PER_LONG / 4 + 2 + 3;
70 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
71 } else {
72 symlen = unresolved_col_width + 4 + 2;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75 }
76
77 len = thread__comm_len(h->thread);
78 if (hists__new_col_len(hists, HISTC_COMM, len))
79 hists__set_col_len(hists, HISTC_THREAD, len + 6);
80
81 if (h->ms.map) {
82 len = dso__name_len(h->ms.map->dso);
83 hists__new_col_len(hists, HISTC_DSO, len);
84 }
85
86 if (h->parent)
87 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
88
89 if (h->branch_info) {
90 if (h->branch_info->from.sym) {
91 symlen = (int)h->branch_info->from.sym->namelen + 4;
92 if (verbose)
93 symlen += BITS_PER_LONG / 4 + 2 + 3;
94 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
95
96 symlen = dso__name_len(h->branch_info->from.map->dso);
97 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
98 } else {
99 symlen = unresolved_col_width + 4 + 2;
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
102 }
103
104 if (h->branch_info->to.sym) {
105 symlen = (int)h->branch_info->to.sym->namelen + 4;
106 if (verbose)
107 symlen += BITS_PER_LONG / 4 + 2 + 3;
108 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
109
110 symlen = dso__name_len(h->branch_info->to.map->dso);
111 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
112 } else {
113 symlen = unresolved_col_width + 4 + 2;
114 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
115 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
116 }
117 }
118
119 if (h->mem_info) {
120 if (h->mem_info->daddr.sym) {
121 symlen = (int)h->mem_info->daddr.sym->namelen + 4
122 + unresolved_col_width + 2;
123 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
124 symlen);
125 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
126 symlen + 1);
127 } else {
128 symlen = unresolved_col_width + 4 + 2;
129 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
130 symlen);
131 }
132 if (h->mem_info->daddr.map) {
133 symlen = dso__name_len(h->mem_info->daddr.map->dso);
134 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
135 symlen);
136 } else {
137 symlen = unresolved_col_width + 4 + 2;
138 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
139 }
140 } else {
141 symlen = unresolved_col_width + 4 + 2;
142 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
143 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
144 }
145
146 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
147 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
148 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
149 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
150 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
151 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
152
153 if (h->transaction)
154 hists__new_col_len(hists, HISTC_TRANSACTION,
155 hist_entry__transaction_len());
156 }
157
158 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
159 {
160 struct rb_node *next = rb_first(&hists->entries);
161 struct hist_entry *n;
162 int row = 0;
163
164 hists__reset_col_len(hists);
165
166 while (next && row++ < max_rows) {
167 n = rb_entry(next, struct hist_entry, rb_node);
168 if (!n->filtered)
169 hists__calc_col_len(hists, n);
170 next = rb_next(&n->rb_node);
171 }
172 }
173
174 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
175 unsigned int cpumode, u64 period)
176 {
177 switch (cpumode) {
178 case PERF_RECORD_MISC_KERNEL:
179 he_stat->period_sys += period;
180 break;
181 case PERF_RECORD_MISC_USER:
182 he_stat->period_us += period;
183 break;
184 case PERF_RECORD_MISC_GUEST_KERNEL:
185 he_stat->period_guest_sys += period;
186 break;
187 case PERF_RECORD_MISC_GUEST_USER:
188 he_stat->period_guest_us += period;
189 break;
190 default:
191 break;
192 }
193 }
194
195 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
196 u64 weight)
197 {
198
199 he_stat->period += period;
200 he_stat->weight += weight;
201 he_stat->nr_events += 1;
202 }
203
204 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
205 {
206 dest->period += src->period;
207 dest->period_sys += src->period_sys;
208 dest->period_us += src->period_us;
209 dest->period_guest_sys += src->period_guest_sys;
210 dest->period_guest_us += src->period_guest_us;
211 dest->nr_events += src->nr_events;
212 dest->weight += src->weight;
213 }
214
215 static void he_stat__decay(struct he_stat *he_stat)
216 {
217 he_stat->period = (he_stat->period * 7) / 8;
218 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
219 /* XXX need decay for weight too? */
220 }
221
222 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
223 {
224 u64 prev_period = he->stat.period;
225 u64 diff;
226
227 if (prev_period == 0)
228 return true;
229
230 he_stat__decay(&he->stat);
231 if (symbol_conf.cumulate_callchain)
232 he_stat__decay(he->stat_acc);
233
234 diff = prev_period - he->stat.period;
235
236 hists->stats.total_period -= diff;
237 if (!he->filtered)
238 hists->stats.total_non_filtered_period -= diff;
239
240 return he->stat.period == 0;
241 }
242
243 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
244 {
245 struct rb_node *next = rb_first(&hists->entries);
246 struct hist_entry *n;
247
248 while (next) {
249 n = rb_entry(next, struct hist_entry, rb_node);
250 next = rb_next(&n->rb_node);
251 /*
252 * We may be annotating this, for instance, so keep it here in
253 * case some it gets new samples, we'll eventually free it when
254 * the user stops browsing and it agains gets fully decayed.
255 */
256 if (((zap_user && n->level == '.') ||
257 (zap_kernel && n->level != '.') ||
258 hists__decay_entry(hists, n)) &&
259 !n->used) {
260 rb_erase(&n->rb_node, &hists->entries);
261
262 if (sort__need_collapse)
263 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
264
265 --hists->nr_entries;
266 if (!n->filtered)
267 --hists->nr_non_filtered_entries;
268
269 hist_entry__free(n);
270 }
271 }
272 }
273
274 void hists__delete_entries(struct hists *hists)
275 {
276 struct rb_node *next = rb_first(&hists->entries);
277 struct hist_entry *n;
278
279 while (next) {
280 n = rb_entry(next, struct hist_entry, rb_node);
281 next = rb_next(&n->rb_node);
282
283 rb_erase(&n->rb_node, &hists->entries);
284
285 if (sort__need_collapse)
286 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
287
288 --hists->nr_entries;
289 if (!n->filtered)
290 --hists->nr_non_filtered_entries;
291
292 hist_entry__free(n);
293 }
294 }
295
296 /*
297 * histogram, sorted on item, collects periods
298 */
299
300 static struct hist_entry *hist_entry__new(struct hist_entry *template,
301 bool sample_self)
302 {
303 size_t callchain_size = 0;
304 struct hist_entry *he;
305
306 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
307 callchain_size = sizeof(struct callchain_root);
308
309 he = zalloc(sizeof(*he) + callchain_size);
310
311 if (he != NULL) {
312 *he = *template;
313
314 if (symbol_conf.cumulate_callchain) {
315 he->stat_acc = malloc(sizeof(he->stat));
316 if (he->stat_acc == NULL) {
317 free(he);
318 return NULL;
319 }
320 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
321 if (!sample_self)
322 memset(&he->stat, 0, sizeof(he->stat));
323 }
324
325 if (he->ms.map)
326 he->ms.map->referenced = true;
327
328 if (he->branch_info) {
329 /*
330 * This branch info is (a part of) allocated from
331 * sample__resolve_bstack() and will be freed after
332 * adding new entries. So we need to save a copy.
333 */
334 he->branch_info = malloc(sizeof(*he->branch_info));
335 if (he->branch_info == NULL) {
336 free(he->stat_acc);
337 free(he);
338 return NULL;
339 }
340
341 memcpy(he->branch_info, template->branch_info,
342 sizeof(*he->branch_info));
343
344 if (he->branch_info->from.map)
345 he->branch_info->from.map->referenced = true;
346 if (he->branch_info->to.map)
347 he->branch_info->to.map->referenced = true;
348 }
349
350 if (he->mem_info) {
351 if (he->mem_info->iaddr.map)
352 he->mem_info->iaddr.map->referenced = true;
353 if (he->mem_info->daddr.map)
354 he->mem_info->daddr.map->referenced = true;
355 }
356
357 if (symbol_conf.use_callchain)
358 callchain_init(he->callchain);
359
360 INIT_LIST_HEAD(&he->pairs.node);
361 }
362
363 return he;
364 }
365
366 static u8 symbol__parent_filter(const struct symbol *parent)
367 {
368 if (symbol_conf.exclude_other && parent == NULL)
369 return 1 << HIST_FILTER__PARENT;
370 return 0;
371 }
372
373 static struct hist_entry *add_hist_entry(struct hists *hists,
374 struct hist_entry *entry,
375 struct addr_location *al,
376 bool sample_self)
377 {
378 struct rb_node **p;
379 struct rb_node *parent = NULL;
380 struct hist_entry *he;
381 int64_t cmp;
382 u64 period = entry->stat.period;
383 u64 weight = entry->stat.weight;
384
385 p = &hists->entries_in->rb_node;
386
387 while (*p != NULL) {
388 parent = *p;
389 he = rb_entry(parent, struct hist_entry, rb_node_in);
390
391 /*
392 * Make sure that it receives arguments in a same order as
393 * hist_entry__collapse() so that we can use an appropriate
394 * function when searching an entry regardless which sort
395 * keys were used.
396 */
397 cmp = hist_entry__cmp(he, entry);
398
399 if (!cmp) {
400 if (sample_self)
401 he_stat__add_period(&he->stat, period, weight);
402 if (symbol_conf.cumulate_callchain)
403 he_stat__add_period(he->stat_acc, period, weight);
404
405 /*
406 * This mem info was allocated from sample__resolve_mem
407 * and will not be used anymore.
408 */
409 zfree(&entry->mem_info);
410
411 /* If the map of an existing hist_entry has
412 * become out-of-date due to an exec() or
413 * similar, update it. Otherwise we will
414 * mis-adjust symbol addresses when computing
415 * the history counter to increment.
416 */
417 if (he->ms.map != entry->ms.map) {
418 he->ms.map = entry->ms.map;
419 if (he->ms.map)
420 he->ms.map->referenced = true;
421 }
422 goto out;
423 }
424
425 if (cmp < 0)
426 p = &(*p)->rb_left;
427 else
428 p = &(*p)->rb_right;
429 }
430
431 he = hist_entry__new(entry, sample_self);
432 if (!he)
433 return NULL;
434
435 rb_link_node(&he->rb_node_in, parent, p);
436 rb_insert_color(&he->rb_node_in, hists->entries_in);
437 out:
438 if (sample_self)
439 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
440 if (symbol_conf.cumulate_callchain)
441 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
442 return he;
443 }
444
445 struct hist_entry *__hists__add_entry(struct hists *hists,
446 struct addr_location *al,
447 struct symbol *sym_parent,
448 struct branch_info *bi,
449 struct mem_info *mi,
450 u64 period, u64 weight, u64 transaction,
451 bool sample_self)
452 {
453 struct hist_entry entry = {
454 .thread = al->thread,
455 .comm = thread__comm(al->thread),
456 .ms = {
457 .map = al->map,
458 .sym = al->sym,
459 },
460 .cpu = al->cpu,
461 .cpumode = al->cpumode,
462 .ip = al->addr,
463 .level = al->level,
464 .stat = {
465 .nr_events = 1,
466 .period = period,
467 .weight = weight,
468 },
469 .parent = sym_parent,
470 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
471 .hists = hists,
472 .branch_info = bi,
473 .mem_info = mi,
474 .transaction = transaction,
475 };
476
477 return add_hist_entry(hists, &entry, al, sample_self);
478 }
479
480 static int
481 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
482 struct addr_location *al __maybe_unused)
483 {
484 return 0;
485 }
486
487 static int
488 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
489 struct addr_location *al __maybe_unused)
490 {
491 return 0;
492 }
493
494 static int
495 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
496 {
497 struct perf_sample *sample = iter->sample;
498 struct mem_info *mi;
499
500 mi = sample__resolve_mem(sample, al);
501 if (mi == NULL)
502 return -ENOMEM;
503
504 iter->priv = mi;
505 return 0;
506 }
507
508 static int
509 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
510 {
511 u64 cost;
512 struct mem_info *mi = iter->priv;
513 struct hists *hists = evsel__hists(iter->evsel);
514 struct hist_entry *he;
515
516 if (mi == NULL)
517 return -EINVAL;
518
519 cost = iter->sample->weight;
520 if (!cost)
521 cost = 1;
522
523 /*
524 * must pass period=weight in order to get the correct
525 * sorting from hists__collapse_resort() which is solely
526 * based on periods. We want sorting be done on nr_events * weight
527 * and this is indirectly achieved by passing period=weight here
528 * and the he_stat__add_period() function.
529 */
530 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
531 cost, cost, 0, true);
532 if (!he)
533 return -ENOMEM;
534
535 iter->he = he;
536 return 0;
537 }
538
539 static int
540 iter_finish_mem_entry(struct hist_entry_iter *iter,
541 struct addr_location *al __maybe_unused)
542 {
543 struct perf_evsel *evsel = iter->evsel;
544 struct hists *hists = evsel__hists(evsel);
545 struct hist_entry *he = iter->he;
546 int err = -EINVAL;
547
548 if (he == NULL)
549 goto out;
550
551 hists__inc_nr_samples(hists, he->filtered);
552
553 err = hist_entry__append_callchain(he, iter->sample);
554
555 out:
556 /*
557 * We don't need to free iter->priv (mem_info) here since
558 * the mem info was either already freed in add_hist_entry() or
559 * passed to a new hist entry by hist_entry__new().
560 */
561 iter->priv = NULL;
562
563 iter->he = NULL;
564 return err;
565 }
566
567 static int
568 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
569 {
570 struct branch_info *bi;
571 struct perf_sample *sample = iter->sample;
572
573 bi = sample__resolve_bstack(sample, al);
574 if (!bi)
575 return -ENOMEM;
576
577 iter->curr = 0;
578 iter->total = sample->branch_stack->nr;
579
580 iter->priv = bi;
581 return 0;
582 }
583
584 static int
585 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
586 struct addr_location *al __maybe_unused)
587 {
588 /* to avoid calling callback function */
589 iter->he = NULL;
590
591 return 0;
592 }
593
594 static int
595 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
596 {
597 struct branch_info *bi = iter->priv;
598 int i = iter->curr;
599
600 if (bi == NULL)
601 return 0;
602
603 if (iter->curr >= iter->total)
604 return 0;
605
606 al->map = bi[i].to.map;
607 al->sym = bi[i].to.sym;
608 al->addr = bi[i].to.addr;
609 return 1;
610 }
611
612 static int
613 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
614 {
615 struct branch_info *bi;
616 struct perf_evsel *evsel = iter->evsel;
617 struct hists *hists = evsel__hists(evsel);
618 struct hist_entry *he = NULL;
619 int i = iter->curr;
620 int err = 0;
621
622 bi = iter->priv;
623
624 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
625 goto out;
626
627 /*
628 * The report shows the percentage of total branches captured
629 * and not events sampled. Thus we use a pseudo period of 1.
630 */
631 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
632 1, 1, 0, true);
633 if (he == NULL)
634 return -ENOMEM;
635
636 hists__inc_nr_samples(hists, he->filtered);
637
638 out:
639 iter->he = he;
640 iter->curr++;
641 return err;
642 }
643
644 static int
645 iter_finish_branch_entry(struct hist_entry_iter *iter,
646 struct addr_location *al __maybe_unused)
647 {
648 zfree(&iter->priv);
649 iter->he = NULL;
650
651 return iter->curr >= iter->total ? 0 : -1;
652 }
653
654 static int
655 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
656 struct addr_location *al __maybe_unused)
657 {
658 return 0;
659 }
660
661 static int
662 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
663 {
664 struct perf_evsel *evsel = iter->evsel;
665 struct perf_sample *sample = iter->sample;
666 struct hist_entry *he;
667
668 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
669 sample->period, sample->weight,
670 sample->transaction, true);
671 if (he == NULL)
672 return -ENOMEM;
673
674 iter->he = he;
675 return 0;
676 }
677
678 static int
679 iter_finish_normal_entry(struct hist_entry_iter *iter,
680 struct addr_location *al __maybe_unused)
681 {
682 struct hist_entry *he = iter->he;
683 struct perf_evsel *evsel = iter->evsel;
684 struct perf_sample *sample = iter->sample;
685
686 if (he == NULL)
687 return 0;
688
689 iter->he = NULL;
690
691 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
692
693 return hist_entry__append_callchain(he, sample);
694 }
695
696 static int
697 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
698 struct addr_location *al __maybe_unused)
699 {
700 struct hist_entry **he_cache;
701
702 callchain_cursor_commit(&callchain_cursor);
703
704 /*
705 * This is for detecting cycles or recursions so that they're
706 * cumulated only one time to prevent entries more than 100%
707 * overhead.
708 */
709 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
710 if (he_cache == NULL)
711 return -ENOMEM;
712
713 iter->priv = he_cache;
714 iter->curr = 0;
715
716 return 0;
717 }
718
719 static int
720 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
721 struct addr_location *al)
722 {
723 struct perf_evsel *evsel = iter->evsel;
724 struct hists *hists = evsel__hists(evsel);
725 struct perf_sample *sample = iter->sample;
726 struct hist_entry **he_cache = iter->priv;
727 struct hist_entry *he;
728 int err = 0;
729
730 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
731 sample->period, sample->weight,
732 sample->transaction, true);
733 if (he == NULL)
734 return -ENOMEM;
735
736 iter->he = he;
737 he_cache[iter->curr++] = he;
738
739 callchain_append(he->callchain, &callchain_cursor, sample->period);
740
741 /*
742 * We need to re-initialize the cursor since callchain_append()
743 * advanced the cursor to the end.
744 */
745 callchain_cursor_commit(&callchain_cursor);
746
747 hists__inc_nr_samples(hists, he->filtered);
748
749 return err;
750 }
751
752 static int
753 iter_next_cumulative_entry(struct hist_entry_iter *iter,
754 struct addr_location *al)
755 {
756 struct callchain_cursor_node *node;
757
758 node = callchain_cursor_current(&callchain_cursor);
759 if (node == NULL)
760 return 0;
761
762 return fill_callchain_info(al, node, iter->hide_unresolved);
763 }
764
765 static int
766 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
767 struct addr_location *al)
768 {
769 struct perf_evsel *evsel = iter->evsel;
770 struct perf_sample *sample = iter->sample;
771 struct hist_entry **he_cache = iter->priv;
772 struct hist_entry *he;
773 struct hist_entry he_tmp = {
774 .cpu = al->cpu,
775 .thread = al->thread,
776 .comm = thread__comm(al->thread),
777 .ip = al->addr,
778 .ms = {
779 .map = al->map,
780 .sym = al->sym,
781 },
782 .parent = iter->parent,
783 };
784 int i;
785 struct callchain_cursor cursor;
786
787 callchain_cursor_snapshot(&cursor, &callchain_cursor);
788
789 callchain_cursor_advance(&callchain_cursor);
790
791 /*
792 * Check if there's duplicate entries in the callchain.
793 * It's possible that it has cycles or recursive calls.
794 */
795 for (i = 0; i < iter->curr; i++) {
796 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
797 /* to avoid calling callback function */
798 iter->he = NULL;
799 return 0;
800 }
801 }
802
803 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
804 sample->period, sample->weight,
805 sample->transaction, false);
806 if (he == NULL)
807 return -ENOMEM;
808
809 iter->he = he;
810 he_cache[iter->curr++] = he;
811
812 callchain_append(he->callchain, &cursor, sample->period);
813 return 0;
814 }
815
816 static int
817 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
818 struct addr_location *al __maybe_unused)
819 {
820 zfree(&iter->priv);
821 iter->he = NULL;
822
823 return 0;
824 }
825
826 const struct hist_iter_ops hist_iter_mem = {
827 .prepare_entry = iter_prepare_mem_entry,
828 .add_single_entry = iter_add_single_mem_entry,
829 .next_entry = iter_next_nop_entry,
830 .add_next_entry = iter_add_next_nop_entry,
831 .finish_entry = iter_finish_mem_entry,
832 };
833
834 const struct hist_iter_ops hist_iter_branch = {
835 .prepare_entry = iter_prepare_branch_entry,
836 .add_single_entry = iter_add_single_branch_entry,
837 .next_entry = iter_next_branch_entry,
838 .add_next_entry = iter_add_next_branch_entry,
839 .finish_entry = iter_finish_branch_entry,
840 };
841
842 const struct hist_iter_ops hist_iter_normal = {
843 .prepare_entry = iter_prepare_normal_entry,
844 .add_single_entry = iter_add_single_normal_entry,
845 .next_entry = iter_next_nop_entry,
846 .add_next_entry = iter_add_next_nop_entry,
847 .finish_entry = iter_finish_normal_entry,
848 };
849
850 const struct hist_iter_ops hist_iter_cumulative = {
851 .prepare_entry = iter_prepare_cumulative_entry,
852 .add_single_entry = iter_add_single_cumulative_entry,
853 .next_entry = iter_next_cumulative_entry,
854 .add_next_entry = iter_add_next_cumulative_entry,
855 .finish_entry = iter_finish_cumulative_entry,
856 };
857
858 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
859 struct perf_evsel *evsel, struct perf_sample *sample,
860 int max_stack_depth, void *arg)
861 {
862 int err, err2;
863
864 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
865 max_stack_depth);
866 if (err)
867 return err;
868
869 iter->evsel = evsel;
870 iter->sample = sample;
871
872 err = iter->ops->prepare_entry(iter, al);
873 if (err)
874 goto out;
875
876 err = iter->ops->add_single_entry(iter, al);
877 if (err)
878 goto out;
879
880 if (iter->he && iter->add_entry_cb) {
881 err = iter->add_entry_cb(iter, al, true, arg);
882 if (err)
883 goto out;
884 }
885
886 while (iter->ops->next_entry(iter, al)) {
887 err = iter->ops->add_next_entry(iter, al);
888 if (err)
889 break;
890
891 if (iter->he && iter->add_entry_cb) {
892 err = iter->add_entry_cb(iter, al, false, arg);
893 if (err)
894 goto out;
895 }
896 }
897
898 out:
899 err2 = iter->ops->finish_entry(iter, al);
900 if (!err)
901 err = err2;
902
903 return err;
904 }
905
906 int64_t
907 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
908 {
909 struct perf_hpp_fmt *fmt;
910 int64_t cmp = 0;
911
912 perf_hpp__for_each_sort_list(fmt) {
913 if (perf_hpp__should_skip(fmt))
914 continue;
915
916 cmp = fmt->cmp(left, right);
917 if (cmp)
918 break;
919 }
920
921 return cmp;
922 }
923
924 int64_t
925 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
926 {
927 struct perf_hpp_fmt *fmt;
928 int64_t cmp = 0;
929
930 perf_hpp__for_each_sort_list(fmt) {
931 if (perf_hpp__should_skip(fmt))
932 continue;
933
934 cmp = fmt->collapse(left, right);
935 if (cmp)
936 break;
937 }
938
939 return cmp;
940 }
941
942 void hist_entry__free(struct hist_entry *he)
943 {
944 zfree(&he->branch_info);
945 zfree(&he->mem_info);
946 zfree(&he->stat_acc);
947 free_srcline(he->srcline);
948 free(he);
949 }
950
951 /*
952 * collapse the histogram
953 */
954
955 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
956 struct rb_root *root,
957 struct hist_entry *he)
958 {
959 struct rb_node **p = &root->rb_node;
960 struct rb_node *parent = NULL;
961 struct hist_entry *iter;
962 int64_t cmp;
963
964 while (*p != NULL) {
965 parent = *p;
966 iter = rb_entry(parent, struct hist_entry, rb_node_in);
967
968 cmp = hist_entry__collapse(iter, he);
969
970 if (!cmp) {
971 he_stat__add_stat(&iter->stat, &he->stat);
972 if (symbol_conf.cumulate_callchain)
973 he_stat__add_stat(iter->stat_acc, he->stat_acc);
974
975 if (symbol_conf.use_callchain) {
976 callchain_cursor_reset(&callchain_cursor);
977 callchain_merge(&callchain_cursor,
978 iter->callchain,
979 he->callchain);
980 }
981 hist_entry__free(he);
982 return false;
983 }
984
985 if (cmp < 0)
986 p = &(*p)->rb_left;
987 else
988 p = &(*p)->rb_right;
989 }
990
991 rb_link_node(&he->rb_node_in, parent, p);
992 rb_insert_color(&he->rb_node_in, root);
993 return true;
994 }
995
996 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
997 {
998 struct rb_root *root;
999
1000 pthread_mutex_lock(&hists->lock);
1001
1002 root = hists->entries_in;
1003 if (++hists->entries_in > &hists->entries_in_array[1])
1004 hists->entries_in = &hists->entries_in_array[0];
1005
1006 pthread_mutex_unlock(&hists->lock);
1007
1008 return root;
1009 }
1010
1011 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1012 {
1013 hists__filter_entry_by_dso(hists, he);
1014 hists__filter_entry_by_thread(hists, he);
1015 hists__filter_entry_by_symbol(hists, he);
1016 }
1017
1018 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1019 {
1020 struct rb_root *root;
1021 struct rb_node *next;
1022 struct hist_entry *n;
1023
1024 if (!sort__need_collapse)
1025 return;
1026
1027 root = hists__get_rotate_entries_in(hists);
1028 next = rb_first(root);
1029
1030 while (next) {
1031 if (session_done())
1032 break;
1033 n = rb_entry(next, struct hist_entry, rb_node_in);
1034 next = rb_next(&n->rb_node_in);
1035
1036 rb_erase(&n->rb_node_in, root);
1037 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1038 /*
1039 * If it wasn't combined with one of the entries already
1040 * collapsed, we need to apply the filters that may have
1041 * been set by, say, the hist_browser.
1042 */
1043 hists__apply_filters(hists, n);
1044 }
1045 if (prog)
1046 ui_progress__update(prog, 1);
1047 }
1048 }
1049
1050 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1051 {
1052 struct perf_hpp_fmt *fmt;
1053 int64_t cmp = 0;
1054
1055 perf_hpp__for_each_sort_list(fmt) {
1056 if (perf_hpp__should_skip(fmt))
1057 continue;
1058
1059 cmp = fmt->sort(a, b);
1060 if (cmp)
1061 break;
1062 }
1063
1064 return cmp;
1065 }
1066
1067 static void hists__reset_filter_stats(struct hists *hists)
1068 {
1069 hists->nr_non_filtered_entries = 0;
1070 hists->stats.total_non_filtered_period = 0;
1071 }
1072
1073 void hists__reset_stats(struct hists *hists)
1074 {
1075 hists->nr_entries = 0;
1076 hists->stats.total_period = 0;
1077
1078 hists__reset_filter_stats(hists);
1079 }
1080
1081 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1082 {
1083 hists->nr_non_filtered_entries++;
1084 hists->stats.total_non_filtered_period += h->stat.period;
1085 }
1086
1087 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1088 {
1089 if (!h->filtered)
1090 hists__inc_filter_stats(hists, h);
1091
1092 hists->nr_entries++;
1093 hists->stats.total_period += h->stat.period;
1094 }
1095
1096 static void __hists__insert_output_entry(struct rb_root *entries,
1097 struct hist_entry *he,
1098 u64 min_callchain_hits)
1099 {
1100 struct rb_node **p = &entries->rb_node;
1101 struct rb_node *parent = NULL;
1102 struct hist_entry *iter;
1103
1104 if (symbol_conf.use_callchain)
1105 callchain_param.sort(&he->sorted_chain, he->callchain,
1106 min_callchain_hits, &callchain_param);
1107
1108 while (*p != NULL) {
1109 parent = *p;
1110 iter = rb_entry(parent, struct hist_entry, rb_node);
1111
1112 if (hist_entry__sort(he, iter) > 0)
1113 p = &(*p)->rb_left;
1114 else
1115 p = &(*p)->rb_right;
1116 }
1117
1118 rb_link_node(&he->rb_node, parent, p);
1119 rb_insert_color(&he->rb_node, entries);
1120 }
1121
1122 void hists__output_resort(struct hists *hists)
1123 {
1124 struct rb_root *root;
1125 struct rb_node *next;
1126 struct hist_entry *n;
1127 u64 min_callchain_hits;
1128
1129 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1130
1131 if (sort__need_collapse)
1132 root = &hists->entries_collapsed;
1133 else
1134 root = hists->entries_in;
1135
1136 next = rb_first(root);
1137 hists->entries = RB_ROOT;
1138
1139 hists__reset_stats(hists);
1140 hists__reset_col_len(hists);
1141
1142 while (next) {
1143 n = rb_entry(next, struct hist_entry, rb_node_in);
1144 next = rb_next(&n->rb_node_in);
1145
1146 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1147 hists__inc_stats(hists, n);
1148
1149 if (!n->filtered)
1150 hists__calc_col_len(hists, n);
1151 }
1152 }
1153
1154 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1155 enum hist_filter filter)
1156 {
1157 h->filtered &= ~(1 << filter);
1158 if (h->filtered)
1159 return;
1160
1161 /* force fold unfiltered entry for simplicity */
1162 h->ms.unfolded = false;
1163 h->row_offset = 0;
1164
1165 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1166
1167 hists__inc_filter_stats(hists, h);
1168 hists__calc_col_len(hists, h);
1169 }
1170
1171
1172 static bool hists__filter_entry_by_dso(struct hists *hists,
1173 struct hist_entry *he)
1174 {
1175 if (hists->dso_filter != NULL &&
1176 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1177 he->filtered |= (1 << HIST_FILTER__DSO);
1178 return true;
1179 }
1180
1181 return false;
1182 }
1183
1184 void hists__filter_by_dso(struct hists *hists)
1185 {
1186 struct rb_node *nd;
1187
1188 hists->stats.nr_non_filtered_samples = 0;
1189
1190 hists__reset_filter_stats(hists);
1191 hists__reset_col_len(hists);
1192
1193 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1194 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1195
1196 if (symbol_conf.exclude_other && !h->parent)
1197 continue;
1198
1199 if (hists__filter_entry_by_dso(hists, h))
1200 continue;
1201
1202 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1203 }
1204 }
1205
1206 static bool hists__filter_entry_by_thread(struct hists *hists,
1207 struct hist_entry *he)
1208 {
1209 if (hists->thread_filter != NULL &&
1210 he->thread != hists->thread_filter) {
1211 he->filtered |= (1 << HIST_FILTER__THREAD);
1212 return true;
1213 }
1214
1215 return false;
1216 }
1217
1218 void hists__filter_by_thread(struct hists *hists)
1219 {
1220 struct rb_node *nd;
1221
1222 hists->stats.nr_non_filtered_samples = 0;
1223
1224 hists__reset_filter_stats(hists);
1225 hists__reset_col_len(hists);
1226
1227 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1228 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1229
1230 if (hists__filter_entry_by_thread(hists, h))
1231 continue;
1232
1233 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1234 }
1235 }
1236
1237 static bool hists__filter_entry_by_symbol(struct hists *hists,
1238 struct hist_entry *he)
1239 {
1240 if (hists->symbol_filter_str != NULL &&
1241 (!he->ms.sym || strstr(he->ms.sym->name,
1242 hists->symbol_filter_str) == NULL)) {
1243 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1244 return true;
1245 }
1246
1247 return false;
1248 }
1249
1250 void hists__filter_by_symbol(struct hists *hists)
1251 {
1252 struct rb_node *nd;
1253
1254 hists->stats.nr_non_filtered_samples = 0;
1255
1256 hists__reset_filter_stats(hists);
1257 hists__reset_col_len(hists);
1258
1259 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1260 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1261
1262 if (hists__filter_entry_by_symbol(hists, h))
1263 continue;
1264
1265 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1266 }
1267 }
1268
1269 void events_stats__inc(struct events_stats *stats, u32 type)
1270 {
1271 ++stats->nr_events[0];
1272 ++stats->nr_events[type];
1273 }
1274
1275 void hists__inc_nr_events(struct hists *hists, u32 type)
1276 {
1277 events_stats__inc(&hists->stats, type);
1278 }
1279
1280 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1281 {
1282 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1283 if (!filtered)
1284 hists->stats.nr_non_filtered_samples++;
1285 }
1286
1287 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1288 struct hist_entry *pair)
1289 {
1290 struct rb_root *root;
1291 struct rb_node **p;
1292 struct rb_node *parent = NULL;
1293 struct hist_entry *he;
1294 int64_t cmp;
1295
1296 if (sort__need_collapse)
1297 root = &hists->entries_collapsed;
1298 else
1299 root = hists->entries_in;
1300
1301 p = &root->rb_node;
1302
1303 while (*p != NULL) {
1304 parent = *p;
1305 he = rb_entry(parent, struct hist_entry, rb_node_in);
1306
1307 cmp = hist_entry__collapse(he, pair);
1308
1309 if (!cmp)
1310 goto out;
1311
1312 if (cmp < 0)
1313 p = &(*p)->rb_left;
1314 else
1315 p = &(*p)->rb_right;
1316 }
1317
1318 he = hist_entry__new(pair, true);
1319 if (he) {
1320 memset(&he->stat, 0, sizeof(he->stat));
1321 he->hists = hists;
1322 rb_link_node(&he->rb_node_in, parent, p);
1323 rb_insert_color(&he->rb_node_in, root);
1324 hists__inc_stats(hists, he);
1325 he->dummy = true;
1326 }
1327 out:
1328 return he;
1329 }
1330
1331 static struct hist_entry *hists__find_entry(struct hists *hists,
1332 struct hist_entry *he)
1333 {
1334 struct rb_node *n;
1335
1336 if (sort__need_collapse)
1337 n = hists->entries_collapsed.rb_node;
1338 else
1339 n = hists->entries_in->rb_node;
1340
1341 while (n) {
1342 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1343 int64_t cmp = hist_entry__collapse(iter, he);
1344
1345 if (cmp < 0)
1346 n = n->rb_left;
1347 else if (cmp > 0)
1348 n = n->rb_right;
1349 else
1350 return iter;
1351 }
1352
1353 return NULL;
1354 }
1355
1356 /*
1357 * Look for pairs to link to the leader buckets (hist_entries):
1358 */
1359 void hists__match(struct hists *leader, struct hists *other)
1360 {
1361 struct rb_root *root;
1362 struct rb_node *nd;
1363 struct hist_entry *pos, *pair;
1364
1365 if (sort__need_collapse)
1366 root = &leader->entries_collapsed;
1367 else
1368 root = leader->entries_in;
1369
1370 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1371 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1372 pair = hists__find_entry(other, pos);
1373
1374 if (pair)
1375 hist_entry__add_pair(pair, pos);
1376 }
1377 }
1378
1379 /*
1380 * Look for entries in the other hists that are not present in the leader, if
1381 * we find them, just add a dummy entry on the leader hists, with period=0,
1382 * nr_events=0, to serve as the list header.
1383 */
1384 int hists__link(struct hists *leader, struct hists *other)
1385 {
1386 struct rb_root *root;
1387 struct rb_node *nd;
1388 struct hist_entry *pos, *pair;
1389
1390 if (sort__need_collapse)
1391 root = &other->entries_collapsed;
1392 else
1393 root = other->entries_in;
1394
1395 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1396 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1397
1398 if (!hist_entry__has_pairs(pos)) {
1399 pair = hists__add_dummy_entry(leader, pos);
1400 if (pair == NULL)
1401 return -1;
1402 hist_entry__add_pair(pos, pair);
1403 }
1404 }
1405
1406 return 0;
1407 }
1408
1409
1410 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1411 {
1412 struct perf_evsel *pos;
1413 size_t ret = 0;
1414
1415 evlist__for_each(evlist, pos) {
1416 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1417 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1418 }
1419
1420 return ret;
1421 }
1422
1423
1424 u64 hists__total_period(struct hists *hists)
1425 {
1426 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1427 hists->stats.total_period;
1428 }
1429
1430 int parse_filter_percentage(const struct option *opt __maybe_unused,
1431 const char *arg, int unset __maybe_unused)
1432 {
1433 if (!strcmp(arg, "relative"))
1434 symbol_conf.filter_relative = true;
1435 else if (!strcmp(arg, "absolute"))
1436 symbol_conf.filter_relative = false;
1437 else
1438 return -1;
1439
1440 return 0;
1441 }
1442
1443 int perf_hist_config(const char *var, const char *value)
1444 {
1445 if (!strcmp(var, "hist.percentage"))
1446 return parse_filter_percentage(NULL, value, 0);
1447
1448 return 0;
1449 }
This page took 0.063563 seconds and 5 git commands to generate.