perf hists: Support filtering in hierarchy mode
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
35 return true;
36 }
37 return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42 enum hist_column col;
43
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 int symlen;
62 u16 len;
63
64 /*
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
68 */
69 if (h->ms.sym) {
70 symlen = h->ms.sym->namelen + 4;
71 if (verbose)
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 } else {
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 }
79
80 len = thread__comm_len(h->thread);
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84 if (h->ms.map) {
85 len = dso__name_len(h->ms.map->dso);
86 hists__new_col_len(hists, HISTC_DSO, len);
87 }
88
89 if (h->parent)
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92 if (h->branch_info) {
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
95 if (verbose)
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 } else {
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 }
106
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
109 if (verbose)
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 } else {
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 }
120 }
121
122 if (h->mem_info) {
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 symlen);
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen + 1);
130 } else {
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 symlen);
134 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
135 symlen);
136 }
137
138 if (h->mem_info->iaddr.sym) {
139 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140 + unresolved_col_width + 2;
141 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
142 symlen);
143 } else {
144 symlen = unresolved_col_width + 4 + 2;
145 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
146 symlen);
147 }
148
149 if (h->mem_info->daddr.map) {
150 symlen = dso__name_len(h->mem_info->daddr.map->dso);
151 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
152 symlen);
153 } else {
154 symlen = unresolved_col_width + 4 + 2;
155 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156 }
157 } else {
158 symlen = unresolved_col_width + 4 + 2;
159 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
162 }
163
164 hists__new_col_len(hists, HISTC_CPU, 3);
165 hists__new_col_len(hists, HISTC_SOCKET, 6);
166 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
172
173 if (h->srcline)
174 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
175
176 if (h->srcfile)
177 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
178
179 if (h->transaction)
180 hists__new_col_len(hists, HISTC_TRANSACTION,
181 hist_entry__transaction_len());
182
183 if (h->trace_output)
184 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
185 }
186
187 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
188 {
189 struct rb_node *next = rb_first(&hists->entries);
190 struct hist_entry *n;
191 int row = 0;
192
193 hists__reset_col_len(hists);
194
195 while (next && row++ < max_rows) {
196 n = rb_entry(next, struct hist_entry, rb_node);
197 if (!n->filtered)
198 hists__calc_col_len(hists, n);
199 next = rb_next(&n->rb_node);
200 }
201 }
202
203 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
204 unsigned int cpumode, u64 period)
205 {
206 switch (cpumode) {
207 case PERF_RECORD_MISC_KERNEL:
208 he_stat->period_sys += period;
209 break;
210 case PERF_RECORD_MISC_USER:
211 he_stat->period_us += period;
212 break;
213 case PERF_RECORD_MISC_GUEST_KERNEL:
214 he_stat->period_guest_sys += period;
215 break;
216 case PERF_RECORD_MISC_GUEST_USER:
217 he_stat->period_guest_us += period;
218 break;
219 default:
220 break;
221 }
222 }
223
224 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
225 u64 weight)
226 {
227
228 he_stat->period += period;
229 he_stat->weight += weight;
230 he_stat->nr_events += 1;
231 }
232
233 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
234 {
235 dest->period += src->period;
236 dest->period_sys += src->period_sys;
237 dest->period_us += src->period_us;
238 dest->period_guest_sys += src->period_guest_sys;
239 dest->period_guest_us += src->period_guest_us;
240 dest->nr_events += src->nr_events;
241 dest->weight += src->weight;
242 }
243
244 static void he_stat__decay(struct he_stat *he_stat)
245 {
246 he_stat->period = (he_stat->period * 7) / 8;
247 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
248 /* XXX need decay for weight too? */
249 }
250
251 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
252 {
253 u64 prev_period = he->stat.period;
254 u64 diff;
255
256 if (prev_period == 0)
257 return true;
258
259 he_stat__decay(&he->stat);
260 if (symbol_conf.cumulate_callchain)
261 he_stat__decay(he->stat_acc);
262 decay_callchain(he->callchain);
263
264 diff = prev_period - he->stat.period;
265
266 hists->stats.total_period -= diff;
267 if (!he->filtered)
268 hists->stats.total_non_filtered_period -= diff;
269
270 return he->stat.period == 0;
271 }
272
273 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
274 {
275 rb_erase(&he->rb_node, &hists->entries);
276
277 if (sort__need_collapse)
278 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
279 else
280 rb_erase(&he->rb_node_in, hists->entries_in);
281
282 --hists->nr_entries;
283 if (!he->filtered)
284 --hists->nr_non_filtered_entries;
285
286 hist_entry__delete(he);
287 }
288
289 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
290 {
291 struct rb_node *next = rb_first(&hists->entries);
292 struct hist_entry *n;
293
294 while (next) {
295 n = rb_entry(next, struct hist_entry, rb_node);
296 next = rb_next(&n->rb_node);
297 if (((zap_user && n->level == '.') ||
298 (zap_kernel && n->level != '.') ||
299 hists__decay_entry(hists, n))) {
300 hists__delete_entry(hists, n);
301 }
302 }
303 }
304
305 void hists__delete_entries(struct hists *hists)
306 {
307 struct rb_node *next = rb_first(&hists->entries);
308 struct hist_entry *n;
309
310 while (next) {
311 n = rb_entry(next, struct hist_entry, rb_node);
312 next = rb_next(&n->rb_node);
313
314 hists__delete_entry(hists, n);
315 }
316 }
317
318 /*
319 * histogram, sorted on item, collects periods
320 */
321
322 static struct hist_entry *hist_entry__new(struct hist_entry *template,
323 bool sample_self)
324 {
325 size_t callchain_size = 0;
326 struct hist_entry *he;
327
328 if (symbol_conf.use_callchain)
329 callchain_size = sizeof(struct callchain_root);
330
331 he = zalloc(sizeof(*he) + callchain_size);
332
333 if (he != NULL) {
334 *he = *template;
335
336 if (symbol_conf.cumulate_callchain) {
337 he->stat_acc = malloc(sizeof(he->stat));
338 if (he->stat_acc == NULL) {
339 free(he);
340 return NULL;
341 }
342 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
343 if (!sample_self)
344 memset(&he->stat, 0, sizeof(he->stat));
345 }
346
347 map__get(he->ms.map);
348
349 if (he->branch_info) {
350 /*
351 * This branch info is (a part of) allocated from
352 * sample__resolve_bstack() and will be freed after
353 * adding new entries. So we need to save a copy.
354 */
355 he->branch_info = malloc(sizeof(*he->branch_info));
356 if (he->branch_info == NULL) {
357 map__zput(he->ms.map);
358 free(he->stat_acc);
359 free(he);
360 return NULL;
361 }
362
363 memcpy(he->branch_info, template->branch_info,
364 sizeof(*he->branch_info));
365
366 map__get(he->branch_info->from.map);
367 map__get(he->branch_info->to.map);
368 }
369
370 if (he->mem_info) {
371 map__get(he->mem_info->iaddr.map);
372 map__get(he->mem_info->daddr.map);
373 }
374
375 if (symbol_conf.use_callchain)
376 callchain_init(he->callchain);
377
378 if (he->raw_data) {
379 he->raw_data = memdup(he->raw_data, he->raw_size);
380
381 if (he->raw_data == NULL) {
382 map__put(he->ms.map);
383 if (he->branch_info) {
384 map__put(he->branch_info->from.map);
385 map__put(he->branch_info->to.map);
386 free(he->branch_info);
387 }
388 if (he->mem_info) {
389 map__put(he->mem_info->iaddr.map);
390 map__put(he->mem_info->daddr.map);
391 }
392 free(he->stat_acc);
393 free(he);
394 return NULL;
395 }
396 }
397 INIT_LIST_HEAD(&he->pairs.node);
398 thread__get(he->thread);
399
400 if (!symbol_conf.report_hierarchy)
401 he->leaf = true;
402 }
403
404 return he;
405 }
406
407 static u8 symbol__parent_filter(const struct symbol *parent)
408 {
409 if (symbol_conf.exclude_other && parent == NULL)
410 return 1 << HIST_FILTER__PARENT;
411 return 0;
412 }
413
414 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
415 {
416 if (!symbol_conf.use_callchain)
417 return;
418
419 he->hists->callchain_period += period;
420 if (!he->filtered)
421 he->hists->callchain_non_filtered_period += period;
422 }
423
424 static struct hist_entry *hists__findnew_entry(struct hists *hists,
425 struct hist_entry *entry,
426 struct addr_location *al,
427 bool sample_self)
428 {
429 struct rb_node **p;
430 struct rb_node *parent = NULL;
431 struct hist_entry *he;
432 int64_t cmp;
433 u64 period = entry->stat.period;
434 u64 weight = entry->stat.weight;
435
436 p = &hists->entries_in->rb_node;
437
438 while (*p != NULL) {
439 parent = *p;
440 he = rb_entry(parent, struct hist_entry, rb_node_in);
441
442 /*
443 * Make sure that it receives arguments in a same order as
444 * hist_entry__collapse() so that we can use an appropriate
445 * function when searching an entry regardless which sort
446 * keys were used.
447 */
448 cmp = hist_entry__cmp(he, entry);
449
450 if (!cmp) {
451 if (sample_self) {
452 he_stat__add_period(&he->stat, period, weight);
453 hist_entry__add_callchain_period(he, period);
454 }
455 if (symbol_conf.cumulate_callchain)
456 he_stat__add_period(he->stat_acc, period, weight);
457
458 /*
459 * This mem info was allocated from sample__resolve_mem
460 * and will not be used anymore.
461 */
462 zfree(&entry->mem_info);
463
464 /* If the map of an existing hist_entry has
465 * become out-of-date due to an exec() or
466 * similar, update it. Otherwise we will
467 * mis-adjust symbol addresses when computing
468 * the history counter to increment.
469 */
470 if (he->ms.map != entry->ms.map) {
471 map__put(he->ms.map);
472 he->ms.map = map__get(entry->ms.map);
473 }
474 goto out;
475 }
476
477 if (cmp < 0)
478 p = &(*p)->rb_left;
479 else
480 p = &(*p)->rb_right;
481 }
482
483 he = hist_entry__new(entry, sample_self);
484 if (!he)
485 return NULL;
486
487 if (sample_self)
488 hist_entry__add_callchain_period(he, period);
489 hists->nr_entries++;
490
491 rb_link_node(&he->rb_node_in, parent, p);
492 rb_insert_color(&he->rb_node_in, hists->entries_in);
493 out:
494 if (sample_self)
495 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
496 if (symbol_conf.cumulate_callchain)
497 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
498 return he;
499 }
500
501 struct hist_entry *__hists__add_entry(struct hists *hists,
502 struct addr_location *al,
503 struct symbol *sym_parent,
504 struct branch_info *bi,
505 struct mem_info *mi,
506 struct perf_sample *sample,
507 bool sample_self)
508 {
509 struct hist_entry entry = {
510 .thread = al->thread,
511 .comm = thread__comm(al->thread),
512 .ms = {
513 .map = al->map,
514 .sym = al->sym,
515 },
516 .socket = al->socket,
517 .cpu = al->cpu,
518 .cpumode = al->cpumode,
519 .ip = al->addr,
520 .level = al->level,
521 .stat = {
522 .nr_events = 1,
523 .period = sample->period,
524 .weight = sample->weight,
525 },
526 .parent = sym_parent,
527 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
528 .hists = hists,
529 .branch_info = bi,
530 .mem_info = mi,
531 .transaction = sample->transaction,
532 .raw_data = sample->raw_data,
533 .raw_size = sample->raw_size,
534 };
535
536 return hists__findnew_entry(hists, &entry, al, sample_self);
537 }
538
539 static int
540 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
541 struct addr_location *al __maybe_unused)
542 {
543 return 0;
544 }
545
546 static int
547 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
548 struct addr_location *al __maybe_unused)
549 {
550 return 0;
551 }
552
553 static int
554 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
555 {
556 struct perf_sample *sample = iter->sample;
557 struct mem_info *mi;
558
559 mi = sample__resolve_mem(sample, al);
560 if (mi == NULL)
561 return -ENOMEM;
562
563 iter->priv = mi;
564 return 0;
565 }
566
567 static int
568 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
569 {
570 u64 cost;
571 struct mem_info *mi = iter->priv;
572 struct hists *hists = evsel__hists(iter->evsel);
573 struct perf_sample *sample = iter->sample;
574 struct hist_entry *he;
575
576 if (mi == NULL)
577 return -EINVAL;
578
579 cost = sample->weight;
580 if (!cost)
581 cost = 1;
582
583 /*
584 * must pass period=weight in order to get the correct
585 * sorting from hists__collapse_resort() which is solely
586 * based on periods. We want sorting be done on nr_events * weight
587 * and this is indirectly achieved by passing period=weight here
588 * and the he_stat__add_period() function.
589 */
590 sample->period = cost;
591
592 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
593 sample, true);
594 if (!he)
595 return -ENOMEM;
596
597 iter->he = he;
598 return 0;
599 }
600
601 static int
602 iter_finish_mem_entry(struct hist_entry_iter *iter,
603 struct addr_location *al __maybe_unused)
604 {
605 struct perf_evsel *evsel = iter->evsel;
606 struct hists *hists = evsel__hists(evsel);
607 struct hist_entry *he = iter->he;
608 int err = -EINVAL;
609
610 if (he == NULL)
611 goto out;
612
613 hists__inc_nr_samples(hists, he->filtered);
614
615 err = hist_entry__append_callchain(he, iter->sample);
616
617 out:
618 /*
619 * We don't need to free iter->priv (mem_info) here since the mem info
620 * was either already freed in hists__findnew_entry() or passed to a
621 * new hist entry by hist_entry__new().
622 */
623 iter->priv = NULL;
624
625 iter->he = NULL;
626 return err;
627 }
628
629 static int
630 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
631 {
632 struct branch_info *bi;
633 struct perf_sample *sample = iter->sample;
634
635 bi = sample__resolve_bstack(sample, al);
636 if (!bi)
637 return -ENOMEM;
638
639 iter->curr = 0;
640 iter->total = sample->branch_stack->nr;
641
642 iter->priv = bi;
643 return 0;
644 }
645
646 static int
647 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
648 struct addr_location *al __maybe_unused)
649 {
650 /* to avoid calling callback function */
651 iter->he = NULL;
652
653 return 0;
654 }
655
656 static int
657 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
658 {
659 struct branch_info *bi = iter->priv;
660 int i = iter->curr;
661
662 if (bi == NULL)
663 return 0;
664
665 if (iter->curr >= iter->total)
666 return 0;
667
668 al->map = bi[i].to.map;
669 al->sym = bi[i].to.sym;
670 al->addr = bi[i].to.addr;
671 return 1;
672 }
673
674 static int
675 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
676 {
677 struct branch_info *bi;
678 struct perf_evsel *evsel = iter->evsel;
679 struct hists *hists = evsel__hists(evsel);
680 struct perf_sample *sample = iter->sample;
681 struct hist_entry *he = NULL;
682 int i = iter->curr;
683 int err = 0;
684
685 bi = iter->priv;
686
687 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
688 goto out;
689
690 /*
691 * The report shows the percentage of total branches captured
692 * and not events sampled. Thus we use a pseudo period of 1.
693 */
694 sample->period = 1;
695 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
696
697 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
698 sample, true);
699 if (he == NULL)
700 return -ENOMEM;
701
702 hists__inc_nr_samples(hists, he->filtered);
703
704 out:
705 iter->he = he;
706 iter->curr++;
707 return err;
708 }
709
710 static int
711 iter_finish_branch_entry(struct hist_entry_iter *iter,
712 struct addr_location *al __maybe_unused)
713 {
714 zfree(&iter->priv);
715 iter->he = NULL;
716
717 return iter->curr >= iter->total ? 0 : -1;
718 }
719
720 static int
721 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
722 struct addr_location *al __maybe_unused)
723 {
724 return 0;
725 }
726
727 static int
728 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
729 {
730 struct perf_evsel *evsel = iter->evsel;
731 struct perf_sample *sample = iter->sample;
732 struct hist_entry *he;
733
734 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
735 sample, true);
736 if (he == NULL)
737 return -ENOMEM;
738
739 iter->he = he;
740 return 0;
741 }
742
743 static int
744 iter_finish_normal_entry(struct hist_entry_iter *iter,
745 struct addr_location *al __maybe_unused)
746 {
747 struct hist_entry *he = iter->he;
748 struct perf_evsel *evsel = iter->evsel;
749 struct perf_sample *sample = iter->sample;
750
751 if (he == NULL)
752 return 0;
753
754 iter->he = NULL;
755
756 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
757
758 return hist_entry__append_callchain(he, sample);
759 }
760
761 static int
762 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
763 struct addr_location *al __maybe_unused)
764 {
765 struct hist_entry **he_cache;
766
767 callchain_cursor_commit(&callchain_cursor);
768
769 /*
770 * This is for detecting cycles or recursions so that they're
771 * cumulated only one time to prevent entries more than 100%
772 * overhead.
773 */
774 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
775 if (he_cache == NULL)
776 return -ENOMEM;
777
778 iter->priv = he_cache;
779 iter->curr = 0;
780
781 return 0;
782 }
783
784 static int
785 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
786 struct addr_location *al)
787 {
788 struct perf_evsel *evsel = iter->evsel;
789 struct hists *hists = evsel__hists(evsel);
790 struct perf_sample *sample = iter->sample;
791 struct hist_entry **he_cache = iter->priv;
792 struct hist_entry *he;
793 int err = 0;
794
795 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
796 sample, true);
797 if (he == NULL)
798 return -ENOMEM;
799
800 iter->he = he;
801 he_cache[iter->curr++] = he;
802
803 hist_entry__append_callchain(he, sample);
804
805 /*
806 * We need to re-initialize the cursor since callchain_append()
807 * advanced the cursor to the end.
808 */
809 callchain_cursor_commit(&callchain_cursor);
810
811 hists__inc_nr_samples(hists, he->filtered);
812
813 return err;
814 }
815
816 static int
817 iter_next_cumulative_entry(struct hist_entry_iter *iter,
818 struct addr_location *al)
819 {
820 struct callchain_cursor_node *node;
821
822 node = callchain_cursor_current(&callchain_cursor);
823 if (node == NULL)
824 return 0;
825
826 return fill_callchain_info(al, node, iter->hide_unresolved);
827 }
828
829 static int
830 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
831 struct addr_location *al)
832 {
833 struct perf_evsel *evsel = iter->evsel;
834 struct perf_sample *sample = iter->sample;
835 struct hist_entry **he_cache = iter->priv;
836 struct hist_entry *he;
837 struct hist_entry he_tmp = {
838 .hists = evsel__hists(evsel),
839 .cpu = al->cpu,
840 .thread = al->thread,
841 .comm = thread__comm(al->thread),
842 .ip = al->addr,
843 .ms = {
844 .map = al->map,
845 .sym = al->sym,
846 },
847 .parent = iter->parent,
848 .raw_data = sample->raw_data,
849 .raw_size = sample->raw_size,
850 };
851 int i;
852 struct callchain_cursor cursor;
853
854 callchain_cursor_snapshot(&cursor, &callchain_cursor);
855
856 callchain_cursor_advance(&callchain_cursor);
857
858 /*
859 * Check if there's duplicate entries in the callchain.
860 * It's possible that it has cycles or recursive calls.
861 */
862 for (i = 0; i < iter->curr; i++) {
863 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
864 /* to avoid calling callback function */
865 iter->he = NULL;
866 return 0;
867 }
868 }
869
870 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
871 sample, false);
872 if (he == NULL)
873 return -ENOMEM;
874
875 iter->he = he;
876 he_cache[iter->curr++] = he;
877
878 if (symbol_conf.use_callchain)
879 callchain_append(he->callchain, &cursor, sample->period);
880 return 0;
881 }
882
883 static int
884 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
885 struct addr_location *al __maybe_unused)
886 {
887 zfree(&iter->priv);
888 iter->he = NULL;
889
890 return 0;
891 }
892
893 const struct hist_iter_ops hist_iter_mem = {
894 .prepare_entry = iter_prepare_mem_entry,
895 .add_single_entry = iter_add_single_mem_entry,
896 .next_entry = iter_next_nop_entry,
897 .add_next_entry = iter_add_next_nop_entry,
898 .finish_entry = iter_finish_mem_entry,
899 };
900
901 const struct hist_iter_ops hist_iter_branch = {
902 .prepare_entry = iter_prepare_branch_entry,
903 .add_single_entry = iter_add_single_branch_entry,
904 .next_entry = iter_next_branch_entry,
905 .add_next_entry = iter_add_next_branch_entry,
906 .finish_entry = iter_finish_branch_entry,
907 };
908
909 const struct hist_iter_ops hist_iter_normal = {
910 .prepare_entry = iter_prepare_normal_entry,
911 .add_single_entry = iter_add_single_normal_entry,
912 .next_entry = iter_next_nop_entry,
913 .add_next_entry = iter_add_next_nop_entry,
914 .finish_entry = iter_finish_normal_entry,
915 };
916
917 const struct hist_iter_ops hist_iter_cumulative = {
918 .prepare_entry = iter_prepare_cumulative_entry,
919 .add_single_entry = iter_add_single_cumulative_entry,
920 .next_entry = iter_next_cumulative_entry,
921 .add_next_entry = iter_add_next_cumulative_entry,
922 .finish_entry = iter_finish_cumulative_entry,
923 };
924
925 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
926 int max_stack_depth, void *arg)
927 {
928 int err, err2;
929
930 err = sample__resolve_callchain(iter->sample, &iter->parent,
931 iter->evsel, al, max_stack_depth);
932 if (err)
933 return err;
934
935 iter->max_stack = max_stack_depth;
936
937 err = iter->ops->prepare_entry(iter, al);
938 if (err)
939 goto out;
940
941 err = iter->ops->add_single_entry(iter, al);
942 if (err)
943 goto out;
944
945 if (iter->he && iter->add_entry_cb) {
946 err = iter->add_entry_cb(iter, al, true, arg);
947 if (err)
948 goto out;
949 }
950
951 while (iter->ops->next_entry(iter, al)) {
952 err = iter->ops->add_next_entry(iter, al);
953 if (err)
954 break;
955
956 if (iter->he && iter->add_entry_cb) {
957 err = iter->add_entry_cb(iter, al, false, arg);
958 if (err)
959 goto out;
960 }
961 }
962
963 out:
964 err2 = iter->ops->finish_entry(iter, al);
965 if (!err)
966 err = err2;
967
968 return err;
969 }
970
971 int64_t
972 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
973 {
974 struct hists *hists = left->hists;
975 struct perf_hpp_fmt *fmt;
976 int64_t cmp = 0;
977
978 hists__for_each_sort_list(hists, fmt) {
979 cmp = fmt->cmp(fmt, left, right);
980 if (cmp)
981 break;
982 }
983
984 return cmp;
985 }
986
987 int64_t
988 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
989 {
990 struct hists *hists = left->hists;
991 struct perf_hpp_fmt *fmt;
992 int64_t cmp = 0;
993
994 hists__for_each_sort_list(hists, fmt) {
995 cmp = fmt->collapse(fmt, left, right);
996 if (cmp)
997 break;
998 }
999
1000 return cmp;
1001 }
1002
1003 void hist_entry__delete(struct hist_entry *he)
1004 {
1005 thread__zput(he->thread);
1006 map__zput(he->ms.map);
1007
1008 if (he->branch_info) {
1009 map__zput(he->branch_info->from.map);
1010 map__zput(he->branch_info->to.map);
1011 zfree(&he->branch_info);
1012 }
1013
1014 if (he->mem_info) {
1015 map__zput(he->mem_info->iaddr.map);
1016 map__zput(he->mem_info->daddr.map);
1017 zfree(&he->mem_info);
1018 }
1019
1020 zfree(&he->stat_acc);
1021 free_srcline(he->srcline);
1022 if (he->srcfile && he->srcfile[0])
1023 free(he->srcfile);
1024 free_callchain(he->callchain);
1025 free(he->trace_output);
1026 free(he->raw_data);
1027 free(he);
1028 }
1029
1030 /*
1031 * If this is not the last column, then we need to pad it according to the
1032 * pre-calculated max lenght for this column, otherwise don't bother adding
1033 * spaces because that would break viewing this with, for instance, 'less',
1034 * that would show tons of trailing spaces when a long C++ demangled method
1035 * names is sampled.
1036 */
1037 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1038 struct perf_hpp_fmt *fmt, int printed)
1039 {
1040 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1041 const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1042 if (printed < width) {
1043 advance_hpp(hpp, printed);
1044 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1045 }
1046 }
1047
1048 return printed;
1049 }
1050
1051 /*
1052 * collapse the histogram
1053 */
1054
1055 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1056
1057 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1058 struct rb_root *root,
1059 struct hist_entry *he,
1060 struct perf_hpp_fmt *fmt)
1061 {
1062 struct rb_node **p = &root->rb_node;
1063 struct rb_node *parent = NULL;
1064 struct hist_entry *iter, *new;
1065 int64_t cmp;
1066
1067 while (*p != NULL) {
1068 parent = *p;
1069 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1070
1071 cmp = fmt->collapse(fmt, iter, he);
1072 if (!cmp) {
1073 he_stat__add_stat(&iter->stat, &he->stat);
1074 return iter;
1075 }
1076
1077 if (cmp < 0)
1078 p = &parent->rb_left;
1079 else
1080 p = &parent->rb_right;
1081 }
1082
1083 new = hist_entry__new(he, true);
1084 if (new == NULL)
1085 return NULL;
1086
1087 hists__apply_filters(hists, new);
1088 hists->nr_entries++;
1089
1090 /* save related format for output */
1091 new->fmt = fmt;
1092
1093 /* some fields are now passed to 'new' */
1094 if (perf_hpp__is_trace_entry(fmt))
1095 he->trace_output = NULL;
1096 else
1097 new->trace_output = NULL;
1098
1099 if (perf_hpp__is_srcline_entry(fmt))
1100 he->srcline = NULL;
1101 else
1102 new->srcline = NULL;
1103
1104 if (perf_hpp__is_srcfile_entry(fmt))
1105 he->srcfile = NULL;
1106 else
1107 new->srcfile = NULL;
1108
1109 rb_link_node(&new->rb_node_in, parent, p);
1110 rb_insert_color(&new->rb_node_in, root);
1111 return new;
1112 }
1113
1114 static int hists__hierarchy_insert_entry(struct hists *hists,
1115 struct rb_root *root,
1116 struct hist_entry *he)
1117 {
1118 struct perf_hpp_fmt *fmt;
1119 struct hist_entry *new_he = NULL;
1120 struct hist_entry *parent = NULL;
1121 int depth = 0;
1122 int ret = 0;
1123
1124 hists__for_each_sort_list(hists, fmt) {
1125 if (!perf_hpp__is_sort_entry(fmt) &&
1126 !perf_hpp__is_dynamic_entry(fmt))
1127 continue;
1128 if (perf_hpp__should_skip(fmt, hists))
1129 continue;
1130
1131 /* insert copy of 'he' for each fmt into the hierarchy */
1132 new_he = hierarchy_insert_entry(hists, root, he, fmt);
1133 if (new_he == NULL) {
1134 ret = -1;
1135 break;
1136 }
1137
1138 root = &new_he->hroot_in;
1139 new_he->parent_he = parent;
1140 new_he->depth = depth++;
1141 parent = new_he;
1142 }
1143
1144 if (new_he) {
1145 new_he->leaf = true;
1146
1147 if (symbol_conf.use_callchain) {
1148 callchain_cursor_reset(&callchain_cursor);
1149 if (callchain_merge(&callchain_cursor,
1150 new_he->callchain,
1151 he->callchain) < 0)
1152 ret = -1;
1153 }
1154 }
1155
1156 /* 'he' is no longer used */
1157 hist_entry__delete(he);
1158
1159 /* return 0 (or -1) since it already applied filters */
1160 return ret;
1161 }
1162
1163 int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
1164 struct hist_entry *he)
1165 {
1166 struct rb_node **p = &root->rb_node;
1167 struct rb_node *parent = NULL;
1168 struct hist_entry *iter;
1169 int64_t cmp;
1170
1171 if (symbol_conf.report_hierarchy)
1172 return hists__hierarchy_insert_entry(hists, root, he);
1173
1174 while (*p != NULL) {
1175 parent = *p;
1176 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1177
1178 cmp = hist_entry__collapse(iter, he);
1179
1180 if (!cmp) {
1181 int ret = 0;
1182
1183 he_stat__add_stat(&iter->stat, &he->stat);
1184 if (symbol_conf.cumulate_callchain)
1185 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1186
1187 if (symbol_conf.use_callchain) {
1188 callchain_cursor_reset(&callchain_cursor);
1189 if (callchain_merge(&callchain_cursor,
1190 iter->callchain,
1191 he->callchain) < 0)
1192 ret = -1;
1193 }
1194 hist_entry__delete(he);
1195 return ret;
1196 }
1197
1198 if (cmp < 0)
1199 p = &(*p)->rb_left;
1200 else
1201 p = &(*p)->rb_right;
1202 }
1203 hists->nr_entries++;
1204
1205 rb_link_node(&he->rb_node_in, parent, p);
1206 rb_insert_color(&he->rb_node_in, root);
1207 return 1;
1208 }
1209
1210 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1211 {
1212 struct rb_root *root;
1213
1214 pthread_mutex_lock(&hists->lock);
1215
1216 root = hists->entries_in;
1217 if (++hists->entries_in > &hists->entries_in_array[1])
1218 hists->entries_in = &hists->entries_in_array[0];
1219
1220 pthread_mutex_unlock(&hists->lock);
1221
1222 return root;
1223 }
1224
1225 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1226 {
1227 hists__filter_entry_by_dso(hists, he);
1228 hists__filter_entry_by_thread(hists, he);
1229 hists__filter_entry_by_symbol(hists, he);
1230 hists__filter_entry_by_socket(hists, he);
1231 }
1232
1233 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1234 {
1235 struct rb_root *root;
1236 struct rb_node *next;
1237 struct hist_entry *n;
1238 int ret;
1239
1240 if (!sort__need_collapse)
1241 return 0;
1242
1243 hists->nr_entries = 0;
1244
1245 root = hists__get_rotate_entries_in(hists);
1246
1247 next = rb_first(root);
1248
1249 while (next) {
1250 if (session_done())
1251 break;
1252 n = rb_entry(next, struct hist_entry, rb_node_in);
1253 next = rb_next(&n->rb_node_in);
1254
1255 rb_erase(&n->rb_node_in, root);
1256 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1257 if (ret < 0)
1258 return -1;
1259
1260 if (ret) {
1261 /*
1262 * If it wasn't combined with one of the entries already
1263 * collapsed, we need to apply the filters that may have
1264 * been set by, say, the hist_browser.
1265 */
1266 hists__apply_filters(hists, n);
1267 }
1268 if (prog)
1269 ui_progress__update(prog, 1);
1270 }
1271 return 0;
1272 }
1273
1274 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1275 {
1276 struct hists *hists = a->hists;
1277 struct perf_hpp_fmt *fmt;
1278 int64_t cmp = 0;
1279
1280 hists__for_each_sort_list(hists, fmt) {
1281 if (perf_hpp__should_skip(fmt, a->hists))
1282 continue;
1283
1284 cmp = fmt->sort(fmt, a, b);
1285 if (cmp)
1286 break;
1287 }
1288
1289 return cmp;
1290 }
1291
1292 static void hists__reset_filter_stats(struct hists *hists)
1293 {
1294 hists->nr_non_filtered_entries = 0;
1295 hists->stats.total_non_filtered_period = 0;
1296 }
1297
1298 void hists__reset_stats(struct hists *hists)
1299 {
1300 hists->nr_entries = 0;
1301 hists->stats.total_period = 0;
1302
1303 hists__reset_filter_stats(hists);
1304 }
1305
1306 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1307 {
1308 hists->nr_non_filtered_entries++;
1309 hists->stats.total_non_filtered_period += h->stat.period;
1310 }
1311
1312 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1313 {
1314 if (!h->filtered)
1315 hists__inc_filter_stats(hists, h);
1316
1317 hists->nr_entries++;
1318 hists->stats.total_period += h->stat.period;
1319 }
1320
1321 static void hierarchy_insert_output_entry(struct rb_root *root,
1322 struct hist_entry *he)
1323 {
1324 struct rb_node **p = &root->rb_node;
1325 struct rb_node *parent = NULL;
1326 struct hist_entry *iter;
1327
1328 while (*p != NULL) {
1329 parent = *p;
1330 iter = rb_entry(parent, struct hist_entry, rb_node);
1331
1332 if (hist_entry__sort(he, iter) > 0)
1333 p = &parent->rb_left;
1334 else
1335 p = &parent->rb_right;
1336 }
1337
1338 rb_link_node(&he->rb_node, parent, p);
1339 rb_insert_color(&he->rb_node, root);
1340 }
1341
1342 static void hists__hierarchy_output_resort(struct hists *hists,
1343 struct ui_progress *prog,
1344 struct rb_root *root_in,
1345 struct rb_root *root_out,
1346 u64 min_callchain_hits,
1347 bool use_callchain)
1348 {
1349 struct rb_node *node;
1350 struct hist_entry *he;
1351
1352 *root_out = RB_ROOT;
1353 node = rb_first(root_in);
1354
1355 while (node) {
1356 he = rb_entry(node, struct hist_entry, rb_node_in);
1357 node = rb_next(node);
1358
1359 hierarchy_insert_output_entry(root_out, he);
1360
1361 if (prog)
1362 ui_progress__update(prog, 1);
1363
1364 if (!he->leaf) {
1365 hists__hierarchy_output_resort(hists, prog,
1366 &he->hroot_in,
1367 &he->hroot_out,
1368 min_callchain_hits,
1369 use_callchain);
1370 hists->nr_entries++;
1371 if (!he->filtered) {
1372 hists->nr_non_filtered_entries++;
1373 hists__calc_col_len(hists, he);
1374 }
1375
1376 continue;
1377 }
1378
1379 /* only update stat for leaf entries to avoid duplication */
1380 hists__inc_stats(hists, he);
1381 if (!he->filtered)
1382 hists__calc_col_len(hists, he);
1383
1384 if (!use_callchain)
1385 continue;
1386
1387 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1388 u64 total = he->stat.period;
1389
1390 if (symbol_conf.cumulate_callchain)
1391 total = he->stat_acc->period;
1392
1393 min_callchain_hits = total * (callchain_param.min_percent / 100);
1394 }
1395
1396 callchain_param.sort(&he->sorted_chain, he->callchain,
1397 min_callchain_hits, &callchain_param);
1398 }
1399 }
1400
1401 static void __hists__insert_output_entry(struct rb_root *entries,
1402 struct hist_entry *he,
1403 u64 min_callchain_hits,
1404 bool use_callchain)
1405 {
1406 struct rb_node **p = &entries->rb_node;
1407 struct rb_node *parent = NULL;
1408 struct hist_entry *iter;
1409
1410 if (use_callchain) {
1411 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1412 u64 total = he->stat.period;
1413
1414 if (symbol_conf.cumulate_callchain)
1415 total = he->stat_acc->period;
1416
1417 min_callchain_hits = total * (callchain_param.min_percent / 100);
1418 }
1419 callchain_param.sort(&he->sorted_chain, he->callchain,
1420 min_callchain_hits, &callchain_param);
1421 }
1422
1423 while (*p != NULL) {
1424 parent = *p;
1425 iter = rb_entry(parent, struct hist_entry, rb_node);
1426
1427 if (hist_entry__sort(he, iter) > 0)
1428 p = &(*p)->rb_left;
1429 else
1430 p = &(*p)->rb_right;
1431 }
1432
1433 rb_link_node(&he->rb_node, parent, p);
1434 rb_insert_color(&he->rb_node, entries);
1435 }
1436
1437 static void output_resort(struct hists *hists, struct ui_progress *prog,
1438 bool use_callchain)
1439 {
1440 struct rb_root *root;
1441 struct rb_node *next;
1442 struct hist_entry *n;
1443 u64 callchain_total;
1444 u64 min_callchain_hits;
1445
1446 callchain_total = hists->callchain_period;
1447 if (symbol_conf.filter_relative)
1448 callchain_total = hists->callchain_non_filtered_period;
1449
1450 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1451
1452 hists__reset_stats(hists);
1453 hists__reset_col_len(hists);
1454
1455 if (symbol_conf.report_hierarchy) {
1456 return hists__hierarchy_output_resort(hists, prog,
1457 &hists->entries_collapsed,
1458 &hists->entries,
1459 min_callchain_hits,
1460 use_callchain);
1461 }
1462
1463 if (sort__need_collapse)
1464 root = &hists->entries_collapsed;
1465 else
1466 root = hists->entries_in;
1467
1468 next = rb_first(root);
1469 hists->entries = RB_ROOT;
1470
1471 while (next) {
1472 n = rb_entry(next, struct hist_entry, rb_node_in);
1473 next = rb_next(&n->rb_node_in);
1474
1475 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1476 hists__inc_stats(hists, n);
1477
1478 if (!n->filtered)
1479 hists__calc_col_len(hists, n);
1480
1481 if (prog)
1482 ui_progress__update(prog, 1);
1483 }
1484 }
1485
1486 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1487 {
1488 bool use_callchain;
1489
1490 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1491 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1492 else
1493 use_callchain = symbol_conf.use_callchain;
1494
1495 output_resort(evsel__hists(evsel), prog, use_callchain);
1496 }
1497
1498 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1499 {
1500 output_resort(hists, prog, symbol_conf.use_callchain);
1501 }
1502
1503 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1504 {
1505 if (he->leaf || hmd == HMD_FORCE_SIBLING)
1506 return false;
1507
1508 if (he->unfolded || hmd == HMD_FORCE_CHILD)
1509 return true;
1510
1511 return false;
1512 }
1513
1514 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1515 {
1516 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1517
1518 while (can_goto_child(he, HMD_NORMAL)) {
1519 node = rb_last(&he->hroot_out);
1520 he = rb_entry(node, struct hist_entry, rb_node);
1521 }
1522 return node;
1523 }
1524
1525 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1526 {
1527 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1528
1529 if (can_goto_child(he, hmd))
1530 node = rb_first(&he->hroot_out);
1531 else
1532 node = rb_next(node);
1533
1534 while (node == NULL) {
1535 he = he->parent_he;
1536 if (he == NULL)
1537 break;
1538
1539 node = rb_next(&he->rb_node);
1540 }
1541 return node;
1542 }
1543
1544 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1545 {
1546 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1547
1548 node = rb_prev(node);
1549 if (node)
1550 return rb_hierarchy_last(node);
1551
1552 he = he->parent_he;
1553 if (he == NULL)
1554 return NULL;
1555
1556 return &he->rb_node;
1557 }
1558
1559 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1560 enum hist_filter filter)
1561 {
1562 h->filtered &= ~(1 << filter);
1563
1564 if (symbol_conf.report_hierarchy) {
1565 struct hist_entry *parent = h->parent_he;
1566
1567 while (parent) {
1568 he_stat__add_stat(&parent->stat, &h->stat);
1569
1570 parent->filtered &= ~(1 << filter);
1571
1572 if (parent->filtered)
1573 goto next;
1574
1575 /* force fold unfiltered entry for simplicity */
1576 parent->unfolded = false;
1577 parent->row_offset = 0;
1578 parent->nr_rows = 0;
1579 next:
1580 parent = parent->parent_he;
1581 }
1582 }
1583
1584 if (h->filtered)
1585 return;
1586
1587 /* force fold unfiltered entry for simplicity */
1588 h->unfolded = false;
1589 h->row_offset = 0;
1590 h->nr_rows = 0;
1591
1592 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1593
1594 hists__inc_filter_stats(hists, h);
1595 hists__calc_col_len(hists, h);
1596 }
1597
1598
1599 static bool hists__filter_entry_by_dso(struct hists *hists,
1600 struct hist_entry *he)
1601 {
1602 if (hists->dso_filter != NULL &&
1603 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1604 he->filtered |= (1 << HIST_FILTER__DSO);
1605 return true;
1606 }
1607
1608 return false;
1609 }
1610
1611 static bool hists__filter_entry_by_thread(struct hists *hists,
1612 struct hist_entry *he)
1613 {
1614 if (hists->thread_filter != NULL &&
1615 he->thread != hists->thread_filter) {
1616 he->filtered |= (1 << HIST_FILTER__THREAD);
1617 return true;
1618 }
1619
1620 return false;
1621 }
1622
1623 static bool hists__filter_entry_by_symbol(struct hists *hists,
1624 struct hist_entry *he)
1625 {
1626 if (hists->symbol_filter_str != NULL &&
1627 (!he->ms.sym || strstr(he->ms.sym->name,
1628 hists->symbol_filter_str) == NULL)) {
1629 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1630 return true;
1631 }
1632
1633 return false;
1634 }
1635
1636 static bool hists__filter_entry_by_socket(struct hists *hists,
1637 struct hist_entry *he)
1638 {
1639 if ((hists->socket_filter > -1) &&
1640 (he->socket != hists->socket_filter)) {
1641 he->filtered |= (1 << HIST_FILTER__SOCKET);
1642 return true;
1643 }
1644
1645 return false;
1646 }
1647
1648 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1649
1650 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1651 {
1652 struct rb_node *nd;
1653
1654 hists->stats.nr_non_filtered_samples = 0;
1655
1656 hists__reset_filter_stats(hists);
1657 hists__reset_col_len(hists);
1658
1659 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1660 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1661
1662 if (filter(hists, h))
1663 continue;
1664
1665 hists__remove_entry_filter(hists, h, type);
1666 }
1667 }
1668
1669 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
1670 {
1671 struct rb_node *nd;
1672
1673 hists->stats.nr_non_filtered_samples = 0;
1674
1675 hists__reset_filter_stats(hists);
1676 hists__reset_col_len(hists);
1677
1678 nd = rb_first(&hists->entries);
1679 while (nd) {
1680 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1681 int ret;
1682
1683 ret = hist_entry__filter(h, type, arg);
1684
1685 /*
1686 * case 1. non-matching type
1687 * zero out the period, set filter marker and move to child
1688 */
1689 if (ret < 0) {
1690 memset(&h->stat, 0, sizeof(h->stat));
1691 h->filtered |= (1 << type);
1692
1693 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
1694 }
1695 /*
1696 * case 2. matched type (filter out)
1697 * set filter marker and move to next
1698 */
1699 else if (ret == 1) {
1700 h->filtered |= (1 << type);
1701
1702 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1703 }
1704 /*
1705 * case 3. ok (not filtered)
1706 * add period to hists and parents, erase the filter marker
1707 * and move to next sibling
1708 */
1709 else {
1710 hists__remove_entry_filter(hists, h, type);
1711
1712 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1713 }
1714 }
1715 }
1716
1717 void hists__filter_by_thread(struct hists *hists)
1718 {
1719 if (symbol_conf.report_hierarchy)
1720 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
1721 hists->thread_filter);
1722 else
1723 hists__filter_by_type(hists, HIST_FILTER__THREAD,
1724 hists__filter_entry_by_thread);
1725 }
1726
1727 void hists__filter_by_dso(struct hists *hists)
1728 {
1729 if (symbol_conf.report_hierarchy)
1730 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
1731 hists->dso_filter);
1732 else
1733 hists__filter_by_type(hists, HIST_FILTER__DSO,
1734 hists__filter_entry_by_dso);
1735 }
1736
1737 void hists__filter_by_symbol(struct hists *hists)
1738 {
1739 if (symbol_conf.report_hierarchy)
1740 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
1741 hists->symbol_filter_str);
1742 else
1743 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1744 hists__filter_entry_by_symbol);
1745 }
1746
1747 void hists__filter_by_socket(struct hists *hists)
1748 {
1749 if (symbol_conf.report_hierarchy)
1750 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
1751 &hists->socket_filter);
1752 else
1753 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
1754 hists__filter_entry_by_socket);
1755 }
1756
1757 void events_stats__inc(struct events_stats *stats, u32 type)
1758 {
1759 ++stats->nr_events[0];
1760 ++stats->nr_events[type];
1761 }
1762
1763 void hists__inc_nr_events(struct hists *hists, u32 type)
1764 {
1765 events_stats__inc(&hists->stats, type);
1766 }
1767
1768 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1769 {
1770 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1771 if (!filtered)
1772 hists->stats.nr_non_filtered_samples++;
1773 }
1774
1775 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1776 struct hist_entry *pair)
1777 {
1778 struct rb_root *root;
1779 struct rb_node **p;
1780 struct rb_node *parent = NULL;
1781 struct hist_entry *he;
1782 int64_t cmp;
1783
1784 if (sort__need_collapse)
1785 root = &hists->entries_collapsed;
1786 else
1787 root = hists->entries_in;
1788
1789 p = &root->rb_node;
1790
1791 while (*p != NULL) {
1792 parent = *p;
1793 he = rb_entry(parent, struct hist_entry, rb_node_in);
1794
1795 cmp = hist_entry__collapse(he, pair);
1796
1797 if (!cmp)
1798 goto out;
1799
1800 if (cmp < 0)
1801 p = &(*p)->rb_left;
1802 else
1803 p = &(*p)->rb_right;
1804 }
1805
1806 he = hist_entry__new(pair, true);
1807 if (he) {
1808 memset(&he->stat, 0, sizeof(he->stat));
1809 he->hists = hists;
1810 rb_link_node(&he->rb_node_in, parent, p);
1811 rb_insert_color(&he->rb_node_in, root);
1812 hists__inc_stats(hists, he);
1813 he->dummy = true;
1814 }
1815 out:
1816 return he;
1817 }
1818
1819 static struct hist_entry *hists__find_entry(struct hists *hists,
1820 struct hist_entry *he)
1821 {
1822 struct rb_node *n;
1823
1824 if (sort__need_collapse)
1825 n = hists->entries_collapsed.rb_node;
1826 else
1827 n = hists->entries_in->rb_node;
1828
1829 while (n) {
1830 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1831 int64_t cmp = hist_entry__collapse(iter, he);
1832
1833 if (cmp < 0)
1834 n = n->rb_left;
1835 else if (cmp > 0)
1836 n = n->rb_right;
1837 else
1838 return iter;
1839 }
1840
1841 return NULL;
1842 }
1843
1844 /*
1845 * Look for pairs to link to the leader buckets (hist_entries):
1846 */
1847 void hists__match(struct hists *leader, struct hists *other)
1848 {
1849 struct rb_root *root;
1850 struct rb_node *nd;
1851 struct hist_entry *pos, *pair;
1852
1853 if (sort__need_collapse)
1854 root = &leader->entries_collapsed;
1855 else
1856 root = leader->entries_in;
1857
1858 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1859 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1860 pair = hists__find_entry(other, pos);
1861
1862 if (pair)
1863 hist_entry__add_pair(pair, pos);
1864 }
1865 }
1866
1867 /*
1868 * Look for entries in the other hists that are not present in the leader, if
1869 * we find them, just add a dummy entry on the leader hists, with period=0,
1870 * nr_events=0, to serve as the list header.
1871 */
1872 int hists__link(struct hists *leader, struct hists *other)
1873 {
1874 struct rb_root *root;
1875 struct rb_node *nd;
1876 struct hist_entry *pos, *pair;
1877
1878 if (sort__need_collapse)
1879 root = &other->entries_collapsed;
1880 else
1881 root = other->entries_in;
1882
1883 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1884 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1885
1886 if (!hist_entry__has_pairs(pos)) {
1887 pair = hists__add_dummy_entry(leader, pos);
1888 if (pair == NULL)
1889 return -1;
1890 hist_entry__add_pair(pos, pair);
1891 }
1892 }
1893
1894 return 0;
1895 }
1896
1897 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1898 struct perf_sample *sample, bool nonany_branch_mode)
1899 {
1900 struct branch_info *bi;
1901
1902 /* If we have branch cycles always annotate them. */
1903 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1904 int i;
1905
1906 bi = sample__resolve_bstack(sample, al);
1907 if (bi) {
1908 struct addr_map_symbol *prev = NULL;
1909
1910 /*
1911 * Ignore errors, still want to process the
1912 * other entries.
1913 *
1914 * For non standard branch modes always
1915 * force no IPC (prev == NULL)
1916 *
1917 * Note that perf stores branches reversed from
1918 * program order!
1919 */
1920 for (i = bs->nr - 1; i >= 0; i--) {
1921 addr_map_symbol__account_cycles(&bi[i].from,
1922 nonany_branch_mode ? NULL : prev,
1923 bi[i].flags.cycles);
1924 prev = &bi[i].to;
1925 }
1926 free(bi);
1927 }
1928 }
1929 }
1930
1931 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1932 {
1933 struct perf_evsel *pos;
1934 size_t ret = 0;
1935
1936 evlist__for_each(evlist, pos) {
1937 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1938 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1939 }
1940
1941 return ret;
1942 }
1943
1944
1945 u64 hists__total_period(struct hists *hists)
1946 {
1947 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1948 hists->stats.total_period;
1949 }
1950
1951 int parse_filter_percentage(const struct option *opt __maybe_unused,
1952 const char *arg, int unset __maybe_unused)
1953 {
1954 if (!strcmp(arg, "relative"))
1955 symbol_conf.filter_relative = true;
1956 else if (!strcmp(arg, "absolute"))
1957 symbol_conf.filter_relative = false;
1958 else
1959 return -1;
1960
1961 return 0;
1962 }
1963
1964 int perf_hist_config(const char *var, const char *value)
1965 {
1966 if (!strcmp(var, "hist.percentage"))
1967 return parse_filter_percentage(NULL, value, 0);
1968
1969 return 0;
1970 }
1971
1972 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
1973 {
1974 memset(hists, 0, sizeof(*hists));
1975 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1976 hists->entries_in = &hists->entries_in_array[0];
1977 hists->entries_collapsed = RB_ROOT;
1978 hists->entries = RB_ROOT;
1979 pthread_mutex_init(&hists->lock, NULL);
1980 hists->socket_filter = -1;
1981 hists->hpp_list = hpp_list;
1982 return 0;
1983 }
1984
1985 static void hists__delete_remaining_entries(struct rb_root *root)
1986 {
1987 struct rb_node *node;
1988 struct hist_entry *he;
1989
1990 while (!RB_EMPTY_ROOT(root)) {
1991 node = rb_first(root);
1992 rb_erase(node, root);
1993
1994 he = rb_entry(node, struct hist_entry, rb_node_in);
1995 hist_entry__delete(he);
1996 }
1997 }
1998
1999 static void hists__delete_all_entries(struct hists *hists)
2000 {
2001 hists__delete_entries(hists);
2002 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2003 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2004 hists__delete_remaining_entries(&hists->entries_collapsed);
2005 }
2006
2007 static void hists_evsel__exit(struct perf_evsel *evsel)
2008 {
2009 struct hists *hists = evsel__hists(evsel);
2010
2011 hists__delete_all_entries(hists);
2012 }
2013
2014 static int hists_evsel__init(struct perf_evsel *evsel)
2015 {
2016 struct hists *hists = evsel__hists(evsel);
2017
2018 __hists__init(hists, &perf_hpp_list);
2019 return 0;
2020 }
2021
2022 /*
2023 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2024 * stored in the rbtree...
2025 */
2026
2027 int hists__init(void)
2028 {
2029 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2030 hists_evsel__init,
2031 hists_evsel__exit);
2032 if (err)
2033 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2034
2035 return err;
2036 }
2037
2038 void perf_hpp_list__init(struct perf_hpp_list *list)
2039 {
2040 INIT_LIST_HEAD(&list->fields);
2041 INIT_LIST_HEAD(&list->sorts);
2042 }
This page took 0.075178 seconds and 6 git commands to generate.