perf hists: Export a couple of hist functions
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
35 return true;
36 }
37 return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42 enum hist_column col;
43
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 int symlen;
62 u16 len;
63
64 /*
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
68 */
69 if (h->ms.sym) {
70 symlen = h->ms.sym->namelen + 4;
71 if (verbose)
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 } else {
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 }
79
80 len = thread__comm_len(h->thread);
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84 if (h->ms.map) {
85 len = dso__name_len(h->ms.map->dso);
86 hists__new_col_len(hists, HISTC_DSO, len);
87 }
88
89 if (h->parent)
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92 if (h->branch_info) {
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
95 if (verbose)
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 } else {
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 }
106
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
109 if (verbose)
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 } else {
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 }
120 }
121
122 if (h->mem_info) {
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 symlen);
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen + 1);
130 } else {
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 symlen);
134 }
135
136 if (h->mem_info->iaddr.sym) {
137 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
138 + unresolved_col_width + 2;
139 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
140 symlen);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
144 symlen);
145 }
146
147 if (h->mem_info->daddr.map) {
148 symlen = dso__name_len(h->mem_info->daddr.map->dso);
149 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
150 symlen);
151 } else {
152 symlen = unresolved_col_width + 4 + 2;
153 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154 }
155 } else {
156 symlen = unresolved_col_width + 4 + 2;
157 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
158 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
159 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
160 }
161
162 hists__new_col_len(hists, HISTC_CPU, 3);
163 hists__new_col_len(hists, HISTC_SOCKET, 6);
164 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
165 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
166 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
167 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
168 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
169 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
170
171 if (h->srcline)
172 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
173
174 if (h->srcfile)
175 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
176
177 if (h->transaction)
178 hists__new_col_len(hists, HISTC_TRANSACTION,
179 hist_entry__transaction_len());
180 }
181
182 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
183 {
184 struct rb_node *next = rb_first(&hists->entries);
185 struct hist_entry *n;
186 int row = 0;
187
188 hists__reset_col_len(hists);
189
190 while (next && row++ < max_rows) {
191 n = rb_entry(next, struct hist_entry, rb_node);
192 if (!n->filtered)
193 hists__calc_col_len(hists, n);
194 next = rb_next(&n->rb_node);
195 }
196 }
197
198 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
199 unsigned int cpumode, u64 period)
200 {
201 switch (cpumode) {
202 case PERF_RECORD_MISC_KERNEL:
203 he_stat->period_sys += period;
204 break;
205 case PERF_RECORD_MISC_USER:
206 he_stat->period_us += period;
207 break;
208 case PERF_RECORD_MISC_GUEST_KERNEL:
209 he_stat->period_guest_sys += period;
210 break;
211 case PERF_RECORD_MISC_GUEST_USER:
212 he_stat->period_guest_us += period;
213 break;
214 default:
215 break;
216 }
217 }
218
219 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
220 u64 weight)
221 {
222
223 he_stat->period += period;
224 he_stat->weight += weight;
225 he_stat->nr_events += 1;
226 }
227
228 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
229 {
230 dest->period += src->period;
231 dest->period_sys += src->period_sys;
232 dest->period_us += src->period_us;
233 dest->period_guest_sys += src->period_guest_sys;
234 dest->period_guest_us += src->period_guest_us;
235 dest->nr_events += src->nr_events;
236 dest->weight += src->weight;
237 }
238
239 static void he_stat__decay(struct he_stat *he_stat)
240 {
241 he_stat->period = (he_stat->period * 7) / 8;
242 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
243 /* XXX need decay for weight too? */
244 }
245
246 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
247 {
248 u64 prev_period = he->stat.period;
249 u64 diff;
250
251 if (prev_period == 0)
252 return true;
253
254 he_stat__decay(&he->stat);
255 if (symbol_conf.cumulate_callchain)
256 he_stat__decay(he->stat_acc);
257 decay_callchain(he->callchain);
258
259 diff = prev_period - he->stat.period;
260
261 hists->stats.total_period -= diff;
262 if (!he->filtered)
263 hists->stats.total_non_filtered_period -= diff;
264
265 return he->stat.period == 0;
266 }
267
268 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
269 {
270 rb_erase(&he->rb_node, &hists->entries);
271
272 if (sort__need_collapse)
273 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
274 else
275 rb_erase(&he->rb_node_in, hists->entries_in);
276
277 --hists->nr_entries;
278 if (!he->filtered)
279 --hists->nr_non_filtered_entries;
280
281 hist_entry__delete(he);
282 }
283
284 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
285 {
286 struct rb_node *next = rb_first(&hists->entries);
287 struct hist_entry *n;
288
289 while (next) {
290 n = rb_entry(next, struct hist_entry, rb_node);
291 next = rb_next(&n->rb_node);
292 if (((zap_user && n->level == '.') ||
293 (zap_kernel && n->level != '.') ||
294 hists__decay_entry(hists, n))) {
295 hists__delete_entry(hists, n);
296 }
297 }
298 }
299
300 void hists__delete_entries(struct hists *hists)
301 {
302 struct rb_node *next = rb_first(&hists->entries);
303 struct hist_entry *n;
304
305 while (next) {
306 n = rb_entry(next, struct hist_entry, rb_node);
307 next = rb_next(&n->rb_node);
308
309 hists__delete_entry(hists, n);
310 }
311 }
312
313 /*
314 * histogram, sorted on item, collects periods
315 */
316
317 static struct hist_entry *hist_entry__new(struct hist_entry *template,
318 bool sample_self)
319 {
320 size_t callchain_size = 0;
321 struct hist_entry *he;
322
323 if (symbol_conf.use_callchain)
324 callchain_size = sizeof(struct callchain_root);
325
326 he = zalloc(sizeof(*he) + callchain_size);
327
328 if (he != NULL) {
329 *he = *template;
330
331 if (symbol_conf.cumulate_callchain) {
332 he->stat_acc = malloc(sizeof(he->stat));
333 if (he->stat_acc == NULL) {
334 free(he);
335 return NULL;
336 }
337 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
338 if (!sample_self)
339 memset(&he->stat, 0, sizeof(he->stat));
340 }
341
342 map__get(he->ms.map);
343
344 if (he->branch_info) {
345 /*
346 * This branch info is (a part of) allocated from
347 * sample__resolve_bstack() and will be freed after
348 * adding new entries. So we need to save a copy.
349 */
350 he->branch_info = malloc(sizeof(*he->branch_info));
351 if (he->branch_info == NULL) {
352 map__zput(he->ms.map);
353 free(he->stat_acc);
354 free(he);
355 return NULL;
356 }
357
358 memcpy(he->branch_info, template->branch_info,
359 sizeof(*he->branch_info));
360
361 map__get(he->branch_info->from.map);
362 map__get(he->branch_info->to.map);
363 }
364
365 if (he->mem_info) {
366 map__get(he->mem_info->iaddr.map);
367 map__get(he->mem_info->daddr.map);
368 }
369
370 if (symbol_conf.use_callchain)
371 callchain_init(he->callchain);
372
373 if (he->raw_data) {
374 he->raw_data = memdup(he->raw_data, he->raw_size);
375
376 if (he->raw_data == NULL) {
377 map__put(he->ms.map);
378 if (he->branch_info) {
379 map__put(he->branch_info->from.map);
380 map__put(he->branch_info->to.map);
381 free(he->branch_info);
382 }
383 if (he->mem_info) {
384 map__put(he->mem_info->iaddr.map);
385 map__put(he->mem_info->daddr.map);
386 }
387 free(he->stat_acc);
388 free(he);
389 return NULL;
390 }
391 }
392 INIT_LIST_HEAD(&he->pairs.node);
393 thread__get(he->thread);
394 }
395
396 return he;
397 }
398
399 static u8 symbol__parent_filter(const struct symbol *parent)
400 {
401 if (symbol_conf.exclude_other && parent == NULL)
402 return 1 << HIST_FILTER__PARENT;
403 return 0;
404 }
405
406 static struct hist_entry *hists__findnew_entry(struct hists *hists,
407 struct hist_entry *entry,
408 struct addr_location *al,
409 bool sample_self)
410 {
411 struct rb_node **p;
412 struct rb_node *parent = NULL;
413 struct hist_entry *he;
414 int64_t cmp;
415 u64 period = entry->stat.period;
416 u64 weight = entry->stat.weight;
417
418 p = &hists->entries_in->rb_node;
419
420 while (*p != NULL) {
421 parent = *p;
422 he = rb_entry(parent, struct hist_entry, rb_node_in);
423
424 /*
425 * Make sure that it receives arguments in a same order as
426 * hist_entry__collapse() so that we can use an appropriate
427 * function when searching an entry regardless which sort
428 * keys were used.
429 */
430 cmp = hist_entry__cmp(he, entry);
431
432 if (!cmp) {
433 if (sample_self)
434 he_stat__add_period(&he->stat, period, weight);
435 if (symbol_conf.cumulate_callchain)
436 he_stat__add_period(he->stat_acc, period, weight);
437
438 /*
439 * This mem info was allocated from sample__resolve_mem
440 * and will not be used anymore.
441 */
442 zfree(&entry->mem_info);
443
444 /* If the map of an existing hist_entry has
445 * become out-of-date due to an exec() or
446 * similar, update it. Otherwise we will
447 * mis-adjust symbol addresses when computing
448 * the history counter to increment.
449 */
450 if (he->ms.map != entry->ms.map) {
451 map__put(he->ms.map);
452 he->ms.map = map__get(entry->ms.map);
453 }
454 goto out;
455 }
456
457 if (cmp < 0)
458 p = &(*p)->rb_left;
459 else
460 p = &(*p)->rb_right;
461 }
462
463 he = hist_entry__new(entry, sample_self);
464 if (!he)
465 return NULL;
466
467 hists->nr_entries++;
468
469 rb_link_node(&he->rb_node_in, parent, p);
470 rb_insert_color(&he->rb_node_in, hists->entries_in);
471 out:
472 if (sample_self)
473 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
474 if (symbol_conf.cumulate_callchain)
475 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
476 return he;
477 }
478
479 struct hist_entry *__hists__add_entry(struct hists *hists,
480 struct addr_location *al,
481 struct symbol *sym_parent,
482 struct branch_info *bi,
483 struct mem_info *mi,
484 struct perf_sample *sample,
485 bool sample_self)
486 {
487 struct hist_entry entry = {
488 .thread = al->thread,
489 .comm = thread__comm(al->thread),
490 .ms = {
491 .map = al->map,
492 .sym = al->sym,
493 },
494 .socket = al->socket,
495 .cpu = al->cpu,
496 .cpumode = al->cpumode,
497 .ip = al->addr,
498 .level = al->level,
499 .stat = {
500 .nr_events = 1,
501 .period = sample->period,
502 .weight = sample->weight,
503 },
504 .parent = sym_parent,
505 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
506 .hists = hists,
507 .branch_info = bi,
508 .mem_info = mi,
509 .transaction = sample->transaction,
510 .raw_data = sample->raw_data,
511 .raw_size = sample->raw_size,
512 };
513
514 return hists__findnew_entry(hists, &entry, al, sample_self);
515 }
516
517 static int
518 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
519 struct addr_location *al __maybe_unused)
520 {
521 return 0;
522 }
523
524 static int
525 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
526 struct addr_location *al __maybe_unused)
527 {
528 return 0;
529 }
530
531 static int
532 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
533 {
534 struct perf_sample *sample = iter->sample;
535 struct mem_info *mi;
536
537 mi = sample__resolve_mem(sample, al);
538 if (mi == NULL)
539 return -ENOMEM;
540
541 iter->priv = mi;
542 return 0;
543 }
544
545 static int
546 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
547 {
548 u64 cost;
549 struct mem_info *mi = iter->priv;
550 struct hists *hists = evsel__hists(iter->evsel);
551 struct perf_sample *sample = iter->sample;
552 struct hist_entry *he;
553
554 if (mi == NULL)
555 return -EINVAL;
556
557 cost = sample->weight;
558 if (!cost)
559 cost = 1;
560
561 /*
562 * must pass period=weight in order to get the correct
563 * sorting from hists__collapse_resort() which is solely
564 * based on periods. We want sorting be done on nr_events * weight
565 * and this is indirectly achieved by passing period=weight here
566 * and the he_stat__add_period() function.
567 */
568 sample->period = cost;
569
570 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
571 sample, true);
572 if (!he)
573 return -ENOMEM;
574
575 iter->he = he;
576 return 0;
577 }
578
579 static int
580 iter_finish_mem_entry(struct hist_entry_iter *iter,
581 struct addr_location *al __maybe_unused)
582 {
583 struct perf_evsel *evsel = iter->evsel;
584 struct hists *hists = evsel__hists(evsel);
585 struct hist_entry *he = iter->he;
586 int err = -EINVAL;
587
588 if (he == NULL)
589 goto out;
590
591 hists__inc_nr_samples(hists, he->filtered);
592
593 err = hist_entry__append_callchain(he, iter->sample);
594
595 out:
596 /*
597 * We don't need to free iter->priv (mem_info) here since the mem info
598 * was either already freed in hists__findnew_entry() or passed to a
599 * new hist entry by hist_entry__new().
600 */
601 iter->priv = NULL;
602
603 iter->he = NULL;
604 return err;
605 }
606
607 static int
608 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
609 {
610 struct branch_info *bi;
611 struct perf_sample *sample = iter->sample;
612
613 bi = sample__resolve_bstack(sample, al);
614 if (!bi)
615 return -ENOMEM;
616
617 iter->curr = 0;
618 iter->total = sample->branch_stack->nr;
619
620 iter->priv = bi;
621 return 0;
622 }
623
624 static int
625 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
626 struct addr_location *al __maybe_unused)
627 {
628 /* to avoid calling callback function */
629 iter->he = NULL;
630
631 return 0;
632 }
633
634 static int
635 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
636 {
637 struct branch_info *bi = iter->priv;
638 int i = iter->curr;
639
640 if (bi == NULL)
641 return 0;
642
643 if (iter->curr >= iter->total)
644 return 0;
645
646 al->map = bi[i].to.map;
647 al->sym = bi[i].to.sym;
648 al->addr = bi[i].to.addr;
649 return 1;
650 }
651
652 static int
653 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
654 {
655 struct branch_info *bi;
656 struct perf_evsel *evsel = iter->evsel;
657 struct hists *hists = evsel__hists(evsel);
658 struct perf_sample *sample = iter->sample;
659 struct hist_entry *he = NULL;
660 int i = iter->curr;
661 int err = 0;
662
663 bi = iter->priv;
664
665 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
666 goto out;
667
668 /*
669 * The report shows the percentage of total branches captured
670 * and not events sampled. Thus we use a pseudo period of 1.
671 */
672 sample->period = 1;
673 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
674
675 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
676 sample, true);
677 if (he == NULL)
678 return -ENOMEM;
679
680 hists__inc_nr_samples(hists, he->filtered);
681
682 out:
683 iter->he = he;
684 iter->curr++;
685 return err;
686 }
687
688 static int
689 iter_finish_branch_entry(struct hist_entry_iter *iter,
690 struct addr_location *al __maybe_unused)
691 {
692 zfree(&iter->priv);
693 iter->he = NULL;
694
695 return iter->curr >= iter->total ? 0 : -1;
696 }
697
698 static int
699 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
700 struct addr_location *al __maybe_unused)
701 {
702 return 0;
703 }
704
705 static int
706 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
707 {
708 struct perf_evsel *evsel = iter->evsel;
709 struct perf_sample *sample = iter->sample;
710 struct hist_entry *he;
711
712 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
713 sample, true);
714 if (he == NULL)
715 return -ENOMEM;
716
717 iter->he = he;
718 return 0;
719 }
720
721 static int
722 iter_finish_normal_entry(struct hist_entry_iter *iter,
723 struct addr_location *al __maybe_unused)
724 {
725 struct hist_entry *he = iter->he;
726 struct perf_evsel *evsel = iter->evsel;
727 struct perf_sample *sample = iter->sample;
728
729 if (he == NULL)
730 return 0;
731
732 iter->he = NULL;
733
734 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
735
736 return hist_entry__append_callchain(he, sample);
737 }
738
739 static int
740 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
741 struct addr_location *al __maybe_unused)
742 {
743 struct hist_entry **he_cache;
744
745 callchain_cursor_commit(&callchain_cursor);
746
747 /*
748 * This is for detecting cycles or recursions so that they're
749 * cumulated only one time to prevent entries more than 100%
750 * overhead.
751 */
752 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
753 if (he_cache == NULL)
754 return -ENOMEM;
755
756 iter->priv = he_cache;
757 iter->curr = 0;
758
759 return 0;
760 }
761
762 static int
763 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
764 struct addr_location *al)
765 {
766 struct perf_evsel *evsel = iter->evsel;
767 struct hists *hists = evsel__hists(evsel);
768 struct perf_sample *sample = iter->sample;
769 struct hist_entry **he_cache = iter->priv;
770 struct hist_entry *he;
771 int err = 0;
772
773 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
774 sample, true);
775 if (he == NULL)
776 return -ENOMEM;
777
778 iter->he = he;
779 he_cache[iter->curr++] = he;
780
781 hist_entry__append_callchain(he, sample);
782
783 /*
784 * We need to re-initialize the cursor since callchain_append()
785 * advanced the cursor to the end.
786 */
787 callchain_cursor_commit(&callchain_cursor);
788
789 hists__inc_nr_samples(hists, he->filtered);
790
791 return err;
792 }
793
794 static int
795 iter_next_cumulative_entry(struct hist_entry_iter *iter,
796 struct addr_location *al)
797 {
798 struct callchain_cursor_node *node;
799
800 node = callchain_cursor_current(&callchain_cursor);
801 if (node == NULL)
802 return 0;
803
804 return fill_callchain_info(al, node, iter->hide_unresolved);
805 }
806
807 static int
808 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
809 struct addr_location *al)
810 {
811 struct perf_evsel *evsel = iter->evsel;
812 struct perf_sample *sample = iter->sample;
813 struct hist_entry **he_cache = iter->priv;
814 struct hist_entry *he;
815 struct hist_entry he_tmp = {
816 .hists = evsel__hists(evsel),
817 .cpu = al->cpu,
818 .thread = al->thread,
819 .comm = thread__comm(al->thread),
820 .ip = al->addr,
821 .ms = {
822 .map = al->map,
823 .sym = al->sym,
824 },
825 .parent = iter->parent,
826 .raw_data = sample->raw_data,
827 .raw_size = sample->raw_size,
828 };
829 int i;
830 struct callchain_cursor cursor;
831
832 callchain_cursor_snapshot(&cursor, &callchain_cursor);
833
834 callchain_cursor_advance(&callchain_cursor);
835
836 /*
837 * Check if there's duplicate entries in the callchain.
838 * It's possible that it has cycles or recursive calls.
839 */
840 for (i = 0; i < iter->curr; i++) {
841 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
842 /* to avoid calling callback function */
843 iter->he = NULL;
844 return 0;
845 }
846 }
847
848 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
849 sample, false);
850 if (he == NULL)
851 return -ENOMEM;
852
853 iter->he = he;
854 he_cache[iter->curr++] = he;
855
856 if (symbol_conf.use_callchain)
857 callchain_append(he->callchain, &cursor, sample->period);
858 return 0;
859 }
860
861 static int
862 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
863 struct addr_location *al __maybe_unused)
864 {
865 zfree(&iter->priv);
866 iter->he = NULL;
867
868 return 0;
869 }
870
871 const struct hist_iter_ops hist_iter_mem = {
872 .prepare_entry = iter_prepare_mem_entry,
873 .add_single_entry = iter_add_single_mem_entry,
874 .next_entry = iter_next_nop_entry,
875 .add_next_entry = iter_add_next_nop_entry,
876 .finish_entry = iter_finish_mem_entry,
877 };
878
879 const struct hist_iter_ops hist_iter_branch = {
880 .prepare_entry = iter_prepare_branch_entry,
881 .add_single_entry = iter_add_single_branch_entry,
882 .next_entry = iter_next_branch_entry,
883 .add_next_entry = iter_add_next_branch_entry,
884 .finish_entry = iter_finish_branch_entry,
885 };
886
887 const struct hist_iter_ops hist_iter_normal = {
888 .prepare_entry = iter_prepare_normal_entry,
889 .add_single_entry = iter_add_single_normal_entry,
890 .next_entry = iter_next_nop_entry,
891 .add_next_entry = iter_add_next_nop_entry,
892 .finish_entry = iter_finish_normal_entry,
893 };
894
895 const struct hist_iter_ops hist_iter_cumulative = {
896 .prepare_entry = iter_prepare_cumulative_entry,
897 .add_single_entry = iter_add_single_cumulative_entry,
898 .next_entry = iter_next_cumulative_entry,
899 .add_next_entry = iter_add_next_cumulative_entry,
900 .finish_entry = iter_finish_cumulative_entry,
901 };
902
903 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
904 int max_stack_depth, void *arg)
905 {
906 int err, err2;
907
908 err = sample__resolve_callchain(iter->sample, &iter->parent,
909 iter->evsel, al, max_stack_depth);
910 if (err)
911 return err;
912
913 iter->max_stack = max_stack_depth;
914
915 err = iter->ops->prepare_entry(iter, al);
916 if (err)
917 goto out;
918
919 err = iter->ops->add_single_entry(iter, al);
920 if (err)
921 goto out;
922
923 if (iter->he && iter->add_entry_cb) {
924 err = iter->add_entry_cb(iter, al, true, arg);
925 if (err)
926 goto out;
927 }
928
929 while (iter->ops->next_entry(iter, al)) {
930 err = iter->ops->add_next_entry(iter, al);
931 if (err)
932 break;
933
934 if (iter->he && iter->add_entry_cb) {
935 err = iter->add_entry_cb(iter, al, false, arg);
936 if (err)
937 goto out;
938 }
939 }
940
941 out:
942 err2 = iter->ops->finish_entry(iter, al);
943 if (!err)
944 err = err2;
945
946 return err;
947 }
948
949 int64_t
950 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
951 {
952 struct perf_hpp_fmt *fmt;
953 int64_t cmp = 0;
954
955 perf_hpp__for_each_sort_list(fmt) {
956 cmp = fmt->cmp(fmt, left, right);
957 if (cmp)
958 break;
959 }
960
961 return cmp;
962 }
963
964 int64_t
965 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
966 {
967 struct perf_hpp_fmt *fmt;
968 int64_t cmp = 0;
969
970 perf_hpp__for_each_sort_list(fmt) {
971 cmp = fmt->collapse(fmt, left, right);
972 if (cmp)
973 break;
974 }
975
976 return cmp;
977 }
978
979 void hist_entry__delete(struct hist_entry *he)
980 {
981 thread__zput(he->thread);
982 map__zput(he->ms.map);
983
984 if (he->branch_info) {
985 map__zput(he->branch_info->from.map);
986 map__zput(he->branch_info->to.map);
987 zfree(&he->branch_info);
988 }
989
990 if (he->mem_info) {
991 map__zput(he->mem_info->iaddr.map);
992 map__zput(he->mem_info->daddr.map);
993 zfree(&he->mem_info);
994 }
995
996 zfree(&he->stat_acc);
997 free_srcline(he->srcline);
998 if (he->srcfile && he->srcfile[0])
999 free(he->srcfile);
1000 free_callchain(he->callchain);
1001 free(he->trace_output);
1002 free(he->raw_data);
1003 free(he);
1004 }
1005
1006 /*
1007 * collapse the histogram
1008 */
1009
1010 bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1011 struct rb_root *root, struct hist_entry *he)
1012 {
1013 struct rb_node **p = &root->rb_node;
1014 struct rb_node *parent = NULL;
1015 struct hist_entry *iter;
1016 int64_t cmp;
1017
1018 while (*p != NULL) {
1019 parent = *p;
1020 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1021
1022 cmp = hist_entry__collapse(iter, he);
1023
1024 if (!cmp) {
1025 he_stat__add_stat(&iter->stat, &he->stat);
1026 if (symbol_conf.cumulate_callchain)
1027 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1028
1029 if (symbol_conf.use_callchain) {
1030 callchain_cursor_reset(&callchain_cursor);
1031 callchain_merge(&callchain_cursor,
1032 iter->callchain,
1033 he->callchain);
1034 }
1035 hist_entry__delete(he);
1036 return false;
1037 }
1038
1039 if (cmp < 0)
1040 p = &(*p)->rb_left;
1041 else
1042 p = &(*p)->rb_right;
1043 }
1044 hists->nr_entries++;
1045
1046 rb_link_node(&he->rb_node_in, parent, p);
1047 rb_insert_color(&he->rb_node_in, root);
1048 return true;
1049 }
1050
1051 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1052 {
1053 struct rb_root *root;
1054
1055 pthread_mutex_lock(&hists->lock);
1056
1057 root = hists->entries_in;
1058 if (++hists->entries_in > &hists->entries_in_array[1])
1059 hists->entries_in = &hists->entries_in_array[0];
1060
1061 pthread_mutex_unlock(&hists->lock);
1062
1063 return root;
1064 }
1065
1066 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1067 {
1068 hists__filter_entry_by_dso(hists, he);
1069 hists__filter_entry_by_thread(hists, he);
1070 hists__filter_entry_by_symbol(hists, he);
1071 hists__filter_entry_by_socket(hists, he);
1072 }
1073
1074 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1075 {
1076 struct rb_root *root;
1077 struct rb_node *next;
1078 struct hist_entry *n;
1079
1080 if (!sort__need_collapse)
1081 return;
1082
1083 hists->nr_entries = 0;
1084
1085 root = hists__get_rotate_entries_in(hists);
1086
1087 next = rb_first(root);
1088
1089 while (next) {
1090 if (session_done())
1091 break;
1092 n = rb_entry(next, struct hist_entry, rb_node_in);
1093 next = rb_next(&n->rb_node_in);
1094
1095 rb_erase(&n->rb_node_in, root);
1096 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1097 /*
1098 * If it wasn't combined with one of the entries already
1099 * collapsed, we need to apply the filters that may have
1100 * been set by, say, the hist_browser.
1101 */
1102 hists__apply_filters(hists, n);
1103 }
1104 if (prog)
1105 ui_progress__update(prog, 1);
1106 }
1107 }
1108
1109 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1110 {
1111 struct perf_hpp_fmt *fmt;
1112 int64_t cmp = 0;
1113
1114 perf_hpp__for_each_sort_list(fmt) {
1115 if (perf_hpp__should_skip(fmt, a->hists))
1116 continue;
1117
1118 cmp = fmt->sort(fmt, a, b);
1119 if (cmp)
1120 break;
1121 }
1122
1123 return cmp;
1124 }
1125
1126 static void hists__reset_filter_stats(struct hists *hists)
1127 {
1128 hists->nr_non_filtered_entries = 0;
1129 hists->stats.total_non_filtered_period = 0;
1130 }
1131
1132 void hists__reset_stats(struct hists *hists)
1133 {
1134 hists->nr_entries = 0;
1135 hists->stats.total_period = 0;
1136
1137 hists__reset_filter_stats(hists);
1138 }
1139
1140 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1141 {
1142 hists->nr_non_filtered_entries++;
1143 hists->stats.total_non_filtered_period += h->stat.period;
1144 }
1145
1146 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1147 {
1148 if (!h->filtered)
1149 hists__inc_filter_stats(hists, h);
1150
1151 hists->nr_entries++;
1152 hists->stats.total_period += h->stat.period;
1153 }
1154
1155 static void __hists__insert_output_entry(struct rb_root *entries,
1156 struct hist_entry *he,
1157 u64 min_callchain_hits,
1158 bool use_callchain)
1159 {
1160 struct rb_node **p = &entries->rb_node;
1161 struct rb_node *parent = NULL;
1162 struct hist_entry *iter;
1163
1164 if (use_callchain)
1165 callchain_param.sort(&he->sorted_chain, he->callchain,
1166 min_callchain_hits, &callchain_param);
1167
1168 while (*p != NULL) {
1169 parent = *p;
1170 iter = rb_entry(parent, struct hist_entry, rb_node);
1171
1172 if (hist_entry__sort(he, iter) > 0)
1173 p = &(*p)->rb_left;
1174 else
1175 p = &(*p)->rb_right;
1176 }
1177
1178 rb_link_node(&he->rb_node, parent, p);
1179 rb_insert_color(&he->rb_node, entries);
1180 }
1181
1182 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1183 {
1184 struct rb_root *root;
1185 struct rb_node *next;
1186 struct hist_entry *n;
1187 u64 min_callchain_hits;
1188 struct perf_evsel *evsel = hists_to_evsel(hists);
1189 bool use_callchain;
1190
1191 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1192 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1193 else
1194 use_callchain = symbol_conf.use_callchain;
1195
1196 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1197
1198 if (sort__need_collapse)
1199 root = &hists->entries_collapsed;
1200 else
1201 root = hists->entries_in;
1202
1203 next = rb_first(root);
1204 hists->entries = RB_ROOT;
1205
1206 hists__reset_stats(hists);
1207 hists__reset_col_len(hists);
1208
1209 while (next) {
1210 n = rb_entry(next, struct hist_entry, rb_node_in);
1211 next = rb_next(&n->rb_node_in);
1212
1213 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1214 hists__inc_stats(hists, n);
1215
1216 if (!n->filtered)
1217 hists__calc_col_len(hists, n);
1218
1219 if (prog)
1220 ui_progress__update(prog, 1);
1221 }
1222 }
1223
1224 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1225 enum hist_filter filter)
1226 {
1227 h->filtered &= ~(1 << filter);
1228 if (h->filtered)
1229 return;
1230
1231 /* force fold unfiltered entry for simplicity */
1232 h->unfolded = false;
1233 h->row_offset = 0;
1234 h->nr_rows = 0;
1235
1236 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1237
1238 hists__inc_filter_stats(hists, h);
1239 hists__calc_col_len(hists, h);
1240 }
1241
1242
1243 static bool hists__filter_entry_by_dso(struct hists *hists,
1244 struct hist_entry *he)
1245 {
1246 if (hists->dso_filter != NULL &&
1247 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1248 he->filtered |= (1 << HIST_FILTER__DSO);
1249 return true;
1250 }
1251
1252 return false;
1253 }
1254
1255 void hists__filter_by_dso(struct hists *hists)
1256 {
1257 struct rb_node *nd;
1258
1259 hists->stats.nr_non_filtered_samples = 0;
1260
1261 hists__reset_filter_stats(hists);
1262 hists__reset_col_len(hists);
1263
1264 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1265 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1266
1267 if (symbol_conf.exclude_other && !h->parent)
1268 continue;
1269
1270 if (hists__filter_entry_by_dso(hists, h))
1271 continue;
1272
1273 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1274 }
1275 }
1276
1277 static bool hists__filter_entry_by_thread(struct hists *hists,
1278 struct hist_entry *he)
1279 {
1280 if (hists->thread_filter != NULL &&
1281 he->thread != hists->thread_filter) {
1282 he->filtered |= (1 << HIST_FILTER__THREAD);
1283 return true;
1284 }
1285
1286 return false;
1287 }
1288
1289 void hists__filter_by_thread(struct hists *hists)
1290 {
1291 struct rb_node *nd;
1292
1293 hists->stats.nr_non_filtered_samples = 0;
1294
1295 hists__reset_filter_stats(hists);
1296 hists__reset_col_len(hists);
1297
1298 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1299 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1300
1301 if (hists__filter_entry_by_thread(hists, h))
1302 continue;
1303
1304 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1305 }
1306 }
1307
1308 static bool hists__filter_entry_by_symbol(struct hists *hists,
1309 struct hist_entry *he)
1310 {
1311 if (hists->symbol_filter_str != NULL &&
1312 (!he->ms.sym || strstr(he->ms.sym->name,
1313 hists->symbol_filter_str) == NULL)) {
1314 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1315 return true;
1316 }
1317
1318 return false;
1319 }
1320
1321 void hists__filter_by_symbol(struct hists *hists)
1322 {
1323 struct rb_node *nd;
1324
1325 hists->stats.nr_non_filtered_samples = 0;
1326
1327 hists__reset_filter_stats(hists);
1328 hists__reset_col_len(hists);
1329
1330 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1331 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1332
1333 if (hists__filter_entry_by_symbol(hists, h))
1334 continue;
1335
1336 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1337 }
1338 }
1339
1340 static bool hists__filter_entry_by_socket(struct hists *hists,
1341 struct hist_entry *he)
1342 {
1343 if ((hists->socket_filter > -1) &&
1344 (he->socket != hists->socket_filter)) {
1345 he->filtered |= (1 << HIST_FILTER__SOCKET);
1346 return true;
1347 }
1348
1349 return false;
1350 }
1351
1352 void hists__filter_by_socket(struct hists *hists)
1353 {
1354 struct rb_node *nd;
1355
1356 hists->stats.nr_non_filtered_samples = 0;
1357
1358 hists__reset_filter_stats(hists);
1359 hists__reset_col_len(hists);
1360
1361 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1362 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1363
1364 if (hists__filter_entry_by_socket(hists, h))
1365 continue;
1366
1367 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1368 }
1369 }
1370
1371 void events_stats__inc(struct events_stats *stats, u32 type)
1372 {
1373 ++stats->nr_events[0];
1374 ++stats->nr_events[type];
1375 }
1376
1377 void hists__inc_nr_events(struct hists *hists, u32 type)
1378 {
1379 events_stats__inc(&hists->stats, type);
1380 }
1381
1382 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1383 {
1384 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1385 if (!filtered)
1386 hists->stats.nr_non_filtered_samples++;
1387 }
1388
1389 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1390 struct hist_entry *pair)
1391 {
1392 struct rb_root *root;
1393 struct rb_node **p;
1394 struct rb_node *parent = NULL;
1395 struct hist_entry *he;
1396 int64_t cmp;
1397
1398 if (sort__need_collapse)
1399 root = &hists->entries_collapsed;
1400 else
1401 root = hists->entries_in;
1402
1403 p = &root->rb_node;
1404
1405 while (*p != NULL) {
1406 parent = *p;
1407 he = rb_entry(parent, struct hist_entry, rb_node_in);
1408
1409 cmp = hist_entry__collapse(he, pair);
1410
1411 if (!cmp)
1412 goto out;
1413
1414 if (cmp < 0)
1415 p = &(*p)->rb_left;
1416 else
1417 p = &(*p)->rb_right;
1418 }
1419
1420 he = hist_entry__new(pair, true);
1421 if (he) {
1422 memset(&he->stat, 0, sizeof(he->stat));
1423 he->hists = hists;
1424 rb_link_node(&he->rb_node_in, parent, p);
1425 rb_insert_color(&he->rb_node_in, root);
1426 hists__inc_stats(hists, he);
1427 he->dummy = true;
1428 }
1429 out:
1430 return he;
1431 }
1432
1433 static struct hist_entry *hists__find_entry(struct hists *hists,
1434 struct hist_entry *he)
1435 {
1436 struct rb_node *n;
1437
1438 if (sort__need_collapse)
1439 n = hists->entries_collapsed.rb_node;
1440 else
1441 n = hists->entries_in->rb_node;
1442
1443 while (n) {
1444 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1445 int64_t cmp = hist_entry__collapse(iter, he);
1446
1447 if (cmp < 0)
1448 n = n->rb_left;
1449 else if (cmp > 0)
1450 n = n->rb_right;
1451 else
1452 return iter;
1453 }
1454
1455 return NULL;
1456 }
1457
1458 /*
1459 * Look for pairs to link to the leader buckets (hist_entries):
1460 */
1461 void hists__match(struct hists *leader, struct hists *other)
1462 {
1463 struct rb_root *root;
1464 struct rb_node *nd;
1465 struct hist_entry *pos, *pair;
1466
1467 if (sort__need_collapse)
1468 root = &leader->entries_collapsed;
1469 else
1470 root = leader->entries_in;
1471
1472 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1473 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1474 pair = hists__find_entry(other, pos);
1475
1476 if (pair)
1477 hist_entry__add_pair(pair, pos);
1478 }
1479 }
1480
1481 /*
1482 * Look for entries in the other hists that are not present in the leader, if
1483 * we find them, just add a dummy entry on the leader hists, with period=0,
1484 * nr_events=0, to serve as the list header.
1485 */
1486 int hists__link(struct hists *leader, struct hists *other)
1487 {
1488 struct rb_root *root;
1489 struct rb_node *nd;
1490 struct hist_entry *pos, *pair;
1491
1492 if (sort__need_collapse)
1493 root = &other->entries_collapsed;
1494 else
1495 root = other->entries_in;
1496
1497 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1498 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1499
1500 if (!hist_entry__has_pairs(pos)) {
1501 pair = hists__add_dummy_entry(leader, pos);
1502 if (pair == NULL)
1503 return -1;
1504 hist_entry__add_pair(pos, pair);
1505 }
1506 }
1507
1508 return 0;
1509 }
1510
1511 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1512 struct perf_sample *sample, bool nonany_branch_mode)
1513 {
1514 struct branch_info *bi;
1515
1516 /* If we have branch cycles always annotate them. */
1517 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1518 int i;
1519
1520 bi = sample__resolve_bstack(sample, al);
1521 if (bi) {
1522 struct addr_map_symbol *prev = NULL;
1523
1524 /*
1525 * Ignore errors, still want to process the
1526 * other entries.
1527 *
1528 * For non standard branch modes always
1529 * force no IPC (prev == NULL)
1530 *
1531 * Note that perf stores branches reversed from
1532 * program order!
1533 */
1534 for (i = bs->nr - 1; i >= 0; i--) {
1535 addr_map_symbol__account_cycles(&bi[i].from,
1536 nonany_branch_mode ? NULL : prev,
1537 bi[i].flags.cycles);
1538 prev = &bi[i].to;
1539 }
1540 free(bi);
1541 }
1542 }
1543 }
1544
1545 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1546 {
1547 struct perf_evsel *pos;
1548 size_t ret = 0;
1549
1550 evlist__for_each(evlist, pos) {
1551 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1552 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1553 }
1554
1555 return ret;
1556 }
1557
1558
1559 u64 hists__total_period(struct hists *hists)
1560 {
1561 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1562 hists->stats.total_period;
1563 }
1564
1565 int parse_filter_percentage(const struct option *opt __maybe_unused,
1566 const char *arg, int unset __maybe_unused)
1567 {
1568 if (!strcmp(arg, "relative"))
1569 symbol_conf.filter_relative = true;
1570 else if (!strcmp(arg, "absolute"))
1571 symbol_conf.filter_relative = false;
1572 else
1573 return -1;
1574
1575 return 0;
1576 }
1577
1578 int perf_hist_config(const char *var, const char *value)
1579 {
1580 if (!strcmp(var, "hist.percentage"))
1581 return parse_filter_percentage(NULL, value, 0);
1582
1583 return 0;
1584 }
1585
1586 int __hists__init(struct hists *hists)
1587 {
1588 memset(hists, 0, sizeof(*hists));
1589 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1590 hists->entries_in = &hists->entries_in_array[0];
1591 hists->entries_collapsed = RB_ROOT;
1592 hists->entries = RB_ROOT;
1593 pthread_mutex_init(&hists->lock, NULL);
1594 hists->socket_filter = -1;
1595 return 0;
1596 }
1597
1598 static void hists__delete_remaining_entries(struct rb_root *root)
1599 {
1600 struct rb_node *node;
1601 struct hist_entry *he;
1602
1603 while (!RB_EMPTY_ROOT(root)) {
1604 node = rb_first(root);
1605 rb_erase(node, root);
1606
1607 he = rb_entry(node, struct hist_entry, rb_node_in);
1608 hist_entry__delete(he);
1609 }
1610 }
1611
1612 static void hists__delete_all_entries(struct hists *hists)
1613 {
1614 hists__delete_entries(hists);
1615 hists__delete_remaining_entries(&hists->entries_in_array[0]);
1616 hists__delete_remaining_entries(&hists->entries_in_array[1]);
1617 hists__delete_remaining_entries(&hists->entries_collapsed);
1618 }
1619
1620 static void hists_evsel__exit(struct perf_evsel *evsel)
1621 {
1622 struct hists *hists = evsel__hists(evsel);
1623
1624 hists__delete_all_entries(hists);
1625 }
1626
1627 static int hists_evsel__init(struct perf_evsel *evsel)
1628 {
1629 struct hists *hists = evsel__hists(evsel);
1630
1631 __hists__init(hists);
1632 return 0;
1633 }
1634
1635 /*
1636 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1637 * stored in the rbtree...
1638 */
1639
1640 int hists__init(void)
1641 {
1642 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1643 hists_evsel__init,
1644 hists_evsel__exit);
1645 if (err)
1646 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1647
1648 return err;
1649 }
This page took 0.067041 seconds and 5 git commands to generate.