Merge branch 'pm-sleep'
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
35 return true;
36 }
37 return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42 enum hist_column col;
43
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 int symlen;
62 u16 len;
63
64 /*
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
68 */
69 if (h->ms.sym) {
70 symlen = h->ms.sym->namelen + 4;
71 if (verbose)
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 } else {
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 }
79
80 len = thread__comm_len(h->thread);
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84 if (h->ms.map) {
85 len = dso__name_len(h->ms.map->dso);
86 hists__new_col_len(hists, HISTC_DSO, len);
87 }
88
89 if (h->parent)
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92 if (h->branch_info) {
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
95 if (verbose)
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 } else {
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 }
106
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
109 if (verbose)
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 } else {
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 }
120 }
121
122 if (h->mem_info) {
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 symlen);
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen + 1);
130 } else {
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 symlen);
134 }
135
136 if (h->mem_info->iaddr.sym) {
137 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
138 + unresolved_col_width + 2;
139 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
140 symlen);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
144 symlen);
145 }
146
147 if (h->mem_info->daddr.map) {
148 symlen = dso__name_len(h->mem_info->daddr.map->dso);
149 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
150 symlen);
151 } else {
152 symlen = unresolved_col_width + 4 + 2;
153 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154 }
155 } else {
156 symlen = unresolved_col_width + 4 + 2;
157 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
158 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
159 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
160 }
161
162 hists__new_col_len(hists, HISTC_CPU, 3);
163 hists__new_col_len(hists, HISTC_SOCKET, 6);
164 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
165 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
166 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
167 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
168 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
169 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
170
171 if (h->srcline)
172 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
173
174 if (h->srcfile)
175 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
176
177 if (h->transaction)
178 hists__new_col_len(hists, HISTC_TRANSACTION,
179 hist_entry__transaction_len());
180 }
181
182 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
183 {
184 struct rb_node *next = rb_first(&hists->entries);
185 struct hist_entry *n;
186 int row = 0;
187
188 hists__reset_col_len(hists);
189
190 while (next && row++ < max_rows) {
191 n = rb_entry(next, struct hist_entry, rb_node);
192 if (!n->filtered)
193 hists__calc_col_len(hists, n);
194 next = rb_next(&n->rb_node);
195 }
196 }
197
198 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
199 unsigned int cpumode, u64 period)
200 {
201 switch (cpumode) {
202 case PERF_RECORD_MISC_KERNEL:
203 he_stat->period_sys += period;
204 break;
205 case PERF_RECORD_MISC_USER:
206 he_stat->period_us += period;
207 break;
208 case PERF_RECORD_MISC_GUEST_KERNEL:
209 he_stat->period_guest_sys += period;
210 break;
211 case PERF_RECORD_MISC_GUEST_USER:
212 he_stat->period_guest_us += period;
213 break;
214 default:
215 break;
216 }
217 }
218
219 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
220 u64 weight)
221 {
222
223 he_stat->period += period;
224 he_stat->weight += weight;
225 he_stat->nr_events += 1;
226 }
227
228 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
229 {
230 dest->period += src->period;
231 dest->period_sys += src->period_sys;
232 dest->period_us += src->period_us;
233 dest->period_guest_sys += src->period_guest_sys;
234 dest->period_guest_us += src->period_guest_us;
235 dest->nr_events += src->nr_events;
236 dest->weight += src->weight;
237 }
238
239 static void he_stat__decay(struct he_stat *he_stat)
240 {
241 he_stat->period = (he_stat->period * 7) / 8;
242 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
243 /* XXX need decay for weight too? */
244 }
245
246 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
247 {
248 u64 prev_period = he->stat.period;
249 u64 diff;
250
251 if (prev_period == 0)
252 return true;
253
254 he_stat__decay(&he->stat);
255 if (symbol_conf.cumulate_callchain)
256 he_stat__decay(he->stat_acc);
257
258 diff = prev_period - he->stat.period;
259
260 hists->stats.total_period -= diff;
261 if (!he->filtered)
262 hists->stats.total_non_filtered_period -= diff;
263
264 return he->stat.period == 0;
265 }
266
267 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
268 {
269 rb_erase(&he->rb_node, &hists->entries);
270
271 if (sort__need_collapse)
272 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
273
274 --hists->nr_entries;
275 if (!he->filtered)
276 --hists->nr_non_filtered_entries;
277
278 hist_entry__delete(he);
279 }
280
281 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
282 {
283 struct rb_node *next = rb_first(&hists->entries);
284 struct hist_entry *n;
285
286 while (next) {
287 n = rb_entry(next, struct hist_entry, rb_node);
288 next = rb_next(&n->rb_node);
289 if (((zap_user && n->level == '.') ||
290 (zap_kernel && n->level != '.') ||
291 hists__decay_entry(hists, n))) {
292 hists__delete_entry(hists, n);
293 }
294 }
295 }
296
297 void hists__delete_entries(struct hists *hists)
298 {
299 struct rb_node *next = rb_first(&hists->entries);
300 struct hist_entry *n;
301
302 while (next) {
303 n = rb_entry(next, struct hist_entry, rb_node);
304 next = rb_next(&n->rb_node);
305
306 hists__delete_entry(hists, n);
307 }
308 }
309
310 /*
311 * histogram, sorted on item, collects periods
312 */
313
314 static struct hist_entry *hist_entry__new(struct hist_entry *template,
315 bool sample_self)
316 {
317 size_t callchain_size = 0;
318 struct hist_entry *he;
319
320 if (symbol_conf.use_callchain)
321 callchain_size = sizeof(struct callchain_root);
322
323 he = zalloc(sizeof(*he) + callchain_size);
324
325 if (he != NULL) {
326 *he = *template;
327
328 if (symbol_conf.cumulate_callchain) {
329 he->stat_acc = malloc(sizeof(he->stat));
330 if (he->stat_acc == NULL) {
331 free(he);
332 return NULL;
333 }
334 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
335 if (!sample_self)
336 memset(&he->stat, 0, sizeof(he->stat));
337 }
338
339 map__get(he->ms.map);
340
341 if (he->branch_info) {
342 /*
343 * This branch info is (a part of) allocated from
344 * sample__resolve_bstack() and will be freed after
345 * adding new entries. So we need to save a copy.
346 */
347 he->branch_info = malloc(sizeof(*he->branch_info));
348 if (he->branch_info == NULL) {
349 map__zput(he->ms.map);
350 free(he->stat_acc);
351 free(he);
352 return NULL;
353 }
354
355 memcpy(he->branch_info, template->branch_info,
356 sizeof(*he->branch_info));
357
358 map__get(he->branch_info->from.map);
359 map__get(he->branch_info->to.map);
360 }
361
362 if (he->mem_info) {
363 map__get(he->mem_info->iaddr.map);
364 map__get(he->mem_info->daddr.map);
365 }
366
367 if (symbol_conf.use_callchain)
368 callchain_init(he->callchain);
369
370 INIT_LIST_HEAD(&he->pairs.node);
371 thread__get(he->thread);
372 }
373
374 return he;
375 }
376
377 static u8 symbol__parent_filter(const struct symbol *parent)
378 {
379 if (symbol_conf.exclude_other && parent == NULL)
380 return 1 << HIST_FILTER__PARENT;
381 return 0;
382 }
383
384 static struct hist_entry *hists__findnew_entry(struct hists *hists,
385 struct hist_entry *entry,
386 struct addr_location *al,
387 bool sample_self)
388 {
389 struct rb_node **p;
390 struct rb_node *parent = NULL;
391 struct hist_entry *he;
392 int64_t cmp;
393 u64 period = entry->stat.period;
394 u64 weight = entry->stat.weight;
395
396 p = &hists->entries_in->rb_node;
397
398 while (*p != NULL) {
399 parent = *p;
400 he = rb_entry(parent, struct hist_entry, rb_node_in);
401
402 /*
403 * Make sure that it receives arguments in a same order as
404 * hist_entry__collapse() so that we can use an appropriate
405 * function when searching an entry regardless which sort
406 * keys were used.
407 */
408 cmp = hist_entry__cmp(he, entry);
409
410 if (!cmp) {
411 if (sample_self)
412 he_stat__add_period(&he->stat, period, weight);
413 if (symbol_conf.cumulate_callchain)
414 he_stat__add_period(he->stat_acc, period, weight);
415
416 /*
417 * This mem info was allocated from sample__resolve_mem
418 * and will not be used anymore.
419 */
420 zfree(&entry->mem_info);
421
422 /* If the map of an existing hist_entry has
423 * become out-of-date due to an exec() or
424 * similar, update it. Otherwise we will
425 * mis-adjust symbol addresses when computing
426 * the history counter to increment.
427 */
428 if (he->ms.map != entry->ms.map) {
429 map__put(he->ms.map);
430 he->ms.map = map__get(entry->ms.map);
431 }
432 goto out;
433 }
434
435 if (cmp < 0)
436 p = &(*p)->rb_left;
437 else
438 p = &(*p)->rb_right;
439 }
440
441 he = hist_entry__new(entry, sample_self);
442 if (!he)
443 return NULL;
444
445 hists->nr_entries++;
446
447 rb_link_node(&he->rb_node_in, parent, p);
448 rb_insert_color(&he->rb_node_in, hists->entries_in);
449 out:
450 if (sample_self)
451 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
452 if (symbol_conf.cumulate_callchain)
453 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
454 return he;
455 }
456
457 struct hist_entry *__hists__add_entry(struct hists *hists,
458 struct addr_location *al,
459 struct symbol *sym_parent,
460 struct branch_info *bi,
461 struct mem_info *mi,
462 u64 period, u64 weight, u64 transaction,
463 bool sample_self)
464 {
465 struct hist_entry entry = {
466 .thread = al->thread,
467 .comm = thread__comm(al->thread),
468 .ms = {
469 .map = al->map,
470 .sym = al->sym,
471 },
472 .socket = al->socket,
473 .cpu = al->cpu,
474 .cpumode = al->cpumode,
475 .ip = al->addr,
476 .level = al->level,
477 .stat = {
478 .nr_events = 1,
479 .period = period,
480 .weight = weight,
481 },
482 .parent = sym_parent,
483 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
484 .hists = hists,
485 .branch_info = bi,
486 .mem_info = mi,
487 .transaction = transaction,
488 };
489
490 return hists__findnew_entry(hists, &entry, al, sample_self);
491 }
492
493 static int
494 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
495 struct addr_location *al __maybe_unused)
496 {
497 return 0;
498 }
499
500 static int
501 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
502 struct addr_location *al __maybe_unused)
503 {
504 return 0;
505 }
506
507 static int
508 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
509 {
510 struct perf_sample *sample = iter->sample;
511 struct mem_info *mi;
512
513 mi = sample__resolve_mem(sample, al);
514 if (mi == NULL)
515 return -ENOMEM;
516
517 iter->priv = mi;
518 return 0;
519 }
520
521 static int
522 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
523 {
524 u64 cost;
525 struct mem_info *mi = iter->priv;
526 struct hists *hists = evsel__hists(iter->evsel);
527 struct hist_entry *he;
528
529 if (mi == NULL)
530 return -EINVAL;
531
532 cost = iter->sample->weight;
533 if (!cost)
534 cost = 1;
535
536 /*
537 * must pass period=weight in order to get the correct
538 * sorting from hists__collapse_resort() which is solely
539 * based on periods. We want sorting be done on nr_events * weight
540 * and this is indirectly achieved by passing period=weight here
541 * and the he_stat__add_period() function.
542 */
543 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
544 cost, cost, 0, true);
545 if (!he)
546 return -ENOMEM;
547
548 iter->he = he;
549 return 0;
550 }
551
552 static int
553 iter_finish_mem_entry(struct hist_entry_iter *iter,
554 struct addr_location *al __maybe_unused)
555 {
556 struct perf_evsel *evsel = iter->evsel;
557 struct hists *hists = evsel__hists(evsel);
558 struct hist_entry *he = iter->he;
559 int err = -EINVAL;
560
561 if (he == NULL)
562 goto out;
563
564 hists__inc_nr_samples(hists, he->filtered);
565
566 err = hist_entry__append_callchain(he, iter->sample);
567
568 out:
569 /*
570 * We don't need to free iter->priv (mem_info) here since the mem info
571 * was either already freed in hists__findnew_entry() or passed to a
572 * new hist entry by hist_entry__new().
573 */
574 iter->priv = NULL;
575
576 iter->he = NULL;
577 return err;
578 }
579
580 static int
581 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
582 {
583 struct branch_info *bi;
584 struct perf_sample *sample = iter->sample;
585
586 bi = sample__resolve_bstack(sample, al);
587 if (!bi)
588 return -ENOMEM;
589
590 iter->curr = 0;
591 iter->total = sample->branch_stack->nr;
592
593 iter->priv = bi;
594 return 0;
595 }
596
597 static int
598 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
599 struct addr_location *al __maybe_unused)
600 {
601 /* to avoid calling callback function */
602 iter->he = NULL;
603
604 return 0;
605 }
606
607 static int
608 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
609 {
610 struct branch_info *bi = iter->priv;
611 int i = iter->curr;
612
613 if (bi == NULL)
614 return 0;
615
616 if (iter->curr >= iter->total)
617 return 0;
618
619 al->map = bi[i].to.map;
620 al->sym = bi[i].to.sym;
621 al->addr = bi[i].to.addr;
622 return 1;
623 }
624
625 static int
626 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
627 {
628 struct branch_info *bi;
629 struct perf_evsel *evsel = iter->evsel;
630 struct hists *hists = evsel__hists(evsel);
631 struct hist_entry *he = NULL;
632 int i = iter->curr;
633 int err = 0;
634
635 bi = iter->priv;
636
637 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
638 goto out;
639
640 /*
641 * The report shows the percentage of total branches captured
642 * and not events sampled. Thus we use a pseudo period of 1.
643 */
644 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
645 1, bi->flags.cycles ? bi->flags.cycles : 1,
646 0, true);
647 if (he == NULL)
648 return -ENOMEM;
649
650 hists__inc_nr_samples(hists, he->filtered);
651
652 out:
653 iter->he = he;
654 iter->curr++;
655 return err;
656 }
657
658 static int
659 iter_finish_branch_entry(struct hist_entry_iter *iter,
660 struct addr_location *al __maybe_unused)
661 {
662 zfree(&iter->priv);
663 iter->he = NULL;
664
665 return iter->curr >= iter->total ? 0 : -1;
666 }
667
668 static int
669 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
670 struct addr_location *al __maybe_unused)
671 {
672 return 0;
673 }
674
675 static int
676 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
677 {
678 struct perf_evsel *evsel = iter->evsel;
679 struct perf_sample *sample = iter->sample;
680 struct hist_entry *he;
681
682 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
683 sample->period, sample->weight,
684 sample->transaction, true);
685 if (he == NULL)
686 return -ENOMEM;
687
688 iter->he = he;
689 return 0;
690 }
691
692 static int
693 iter_finish_normal_entry(struct hist_entry_iter *iter,
694 struct addr_location *al __maybe_unused)
695 {
696 struct hist_entry *he = iter->he;
697 struct perf_evsel *evsel = iter->evsel;
698 struct perf_sample *sample = iter->sample;
699
700 if (he == NULL)
701 return 0;
702
703 iter->he = NULL;
704
705 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
706
707 return hist_entry__append_callchain(he, sample);
708 }
709
710 static int
711 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
712 struct addr_location *al __maybe_unused)
713 {
714 struct hist_entry **he_cache;
715
716 callchain_cursor_commit(&callchain_cursor);
717
718 /*
719 * This is for detecting cycles or recursions so that they're
720 * cumulated only one time to prevent entries more than 100%
721 * overhead.
722 */
723 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
724 if (he_cache == NULL)
725 return -ENOMEM;
726
727 iter->priv = he_cache;
728 iter->curr = 0;
729
730 return 0;
731 }
732
733 static int
734 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
735 struct addr_location *al)
736 {
737 struct perf_evsel *evsel = iter->evsel;
738 struct hists *hists = evsel__hists(evsel);
739 struct perf_sample *sample = iter->sample;
740 struct hist_entry **he_cache = iter->priv;
741 struct hist_entry *he;
742 int err = 0;
743
744 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
745 sample->period, sample->weight,
746 sample->transaction, true);
747 if (he == NULL)
748 return -ENOMEM;
749
750 iter->he = he;
751 he_cache[iter->curr++] = he;
752
753 hist_entry__append_callchain(he, sample);
754
755 /*
756 * We need to re-initialize the cursor since callchain_append()
757 * advanced the cursor to the end.
758 */
759 callchain_cursor_commit(&callchain_cursor);
760
761 hists__inc_nr_samples(hists, he->filtered);
762
763 return err;
764 }
765
766 static int
767 iter_next_cumulative_entry(struct hist_entry_iter *iter,
768 struct addr_location *al)
769 {
770 struct callchain_cursor_node *node;
771
772 node = callchain_cursor_current(&callchain_cursor);
773 if (node == NULL)
774 return 0;
775
776 return fill_callchain_info(al, node, iter->hide_unresolved);
777 }
778
779 static int
780 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
781 struct addr_location *al)
782 {
783 struct perf_evsel *evsel = iter->evsel;
784 struct perf_sample *sample = iter->sample;
785 struct hist_entry **he_cache = iter->priv;
786 struct hist_entry *he;
787 struct hist_entry he_tmp = {
788 .hists = evsel__hists(evsel),
789 .cpu = al->cpu,
790 .thread = al->thread,
791 .comm = thread__comm(al->thread),
792 .ip = al->addr,
793 .ms = {
794 .map = al->map,
795 .sym = al->sym,
796 },
797 .parent = iter->parent,
798 };
799 int i;
800 struct callchain_cursor cursor;
801
802 callchain_cursor_snapshot(&cursor, &callchain_cursor);
803
804 callchain_cursor_advance(&callchain_cursor);
805
806 /*
807 * Check if there's duplicate entries in the callchain.
808 * It's possible that it has cycles or recursive calls.
809 */
810 for (i = 0; i < iter->curr; i++) {
811 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
812 /* to avoid calling callback function */
813 iter->he = NULL;
814 return 0;
815 }
816 }
817
818 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
819 sample->period, sample->weight,
820 sample->transaction, false);
821 if (he == NULL)
822 return -ENOMEM;
823
824 iter->he = he;
825 he_cache[iter->curr++] = he;
826
827 if (symbol_conf.use_callchain)
828 callchain_append(he->callchain, &cursor, sample->period);
829 return 0;
830 }
831
832 static int
833 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
834 struct addr_location *al __maybe_unused)
835 {
836 zfree(&iter->priv);
837 iter->he = NULL;
838
839 return 0;
840 }
841
842 const struct hist_iter_ops hist_iter_mem = {
843 .prepare_entry = iter_prepare_mem_entry,
844 .add_single_entry = iter_add_single_mem_entry,
845 .next_entry = iter_next_nop_entry,
846 .add_next_entry = iter_add_next_nop_entry,
847 .finish_entry = iter_finish_mem_entry,
848 };
849
850 const struct hist_iter_ops hist_iter_branch = {
851 .prepare_entry = iter_prepare_branch_entry,
852 .add_single_entry = iter_add_single_branch_entry,
853 .next_entry = iter_next_branch_entry,
854 .add_next_entry = iter_add_next_branch_entry,
855 .finish_entry = iter_finish_branch_entry,
856 };
857
858 const struct hist_iter_ops hist_iter_normal = {
859 .prepare_entry = iter_prepare_normal_entry,
860 .add_single_entry = iter_add_single_normal_entry,
861 .next_entry = iter_next_nop_entry,
862 .add_next_entry = iter_add_next_nop_entry,
863 .finish_entry = iter_finish_normal_entry,
864 };
865
866 const struct hist_iter_ops hist_iter_cumulative = {
867 .prepare_entry = iter_prepare_cumulative_entry,
868 .add_single_entry = iter_add_single_cumulative_entry,
869 .next_entry = iter_next_cumulative_entry,
870 .add_next_entry = iter_add_next_cumulative_entry,
871 .finish_entry = iter_finish_cumulative_entry,
872 };
873
874 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
875 int max_stack_depth, void *arg)
876 {
877 int err, err2;
878
879 err = sample__resolve_callchain(iter->sample, &iter->parent,
880 iter->evsel, al, max_stack_depth);
881 if (err)
882 return err;
883
884 iter->max_stack = max_stack_depth;
885
886 err = iter->ops->prepare_entry(iter, al);
887 if (err)
888 goto out;
889
890 err = iter->ops->add_single_entry(iter, al);
891 if (err)
892 goto out;
893
894 if (iter->he && iter->add_entry_cb) {
895 err = iter->add_entry_cb(iter, al, true, arg);
896 if (err)
897 goto out;
898 }
899
900 while (iter->ops->next_entry(iter, al)) {
901 err = iter->ops->add_next_entry(iter, al);
902 if (err)
903 break;
904
905 if (iter->he && iter->add_entry_cb) {
906 err = iter->add_entry_cb(iter, al, false, arg);
907 if (err)
908 goto out;
909 }
910 }
911
912 out:
913 err2 = iter->ops->finish_entry(iter, al);
914 if (!err)
915 err = err2;
916
917 return err;
918 }
919
920 int64_t
921 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
922 {
923 struct perf_hpp_fmt *fmt;
924 int64_t cmp = 0;
925
926 perf_hpp__for_each_sort_list(fmt) {
927 if (perf_hpp__should_skip(fmt))
928 continue;
929
930 cmp = fmt->cmp(fmt, left, right);
931 if (cmp)
932 break;
933 }
934
935 return cmp;
936 }
937
938 int64_t
939 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
940 {
941 struct perf_hpp_fmt *fmt;
942 int64_t cmp = 0;
943
944 perf_hpp__for_each_sort_list(fmt) {
945 if (perf_hpp__should_skip(fmt))
946 continue;
947
948 cmp = fmt->collapse(fmt, left, right);
949 if (cmp)
950 break;
951 }
952
953 return cmp;
954 }
955
956 void hist_entry__delete(struct hist_entry *he)
957 {
958 thread__zput(he->thread);
959 map__zput(he->ms.map);
960
961 if (he->branch_info) {
962 map__zput(he->branch_info->from.map);
963 map__zput(he->branch_info->to.map);
964 zfree(&he->branch_info);
965 }
966
967 if (he->mem_info) {
968 map__zput(he->mem_info->iaddr.map);
969 map__zput(he->mem_info->daddr.map);
970 zfree(&he->mem_info);
971 }
972
973 zfree(&he->stat_acc);
974 free_srcline(he->srcline);
975 if (he->srcfile && he->srcfile[0])
976 free(he->srcfile);
977 free_callchain(he->callchain);
978 free(he);
979 }
980
981 /*
982 * collapse the histogram
983 */
984
985 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
986 struct rb_root *root,
987 struct hist_entry *he)
988 {
989 struct rb_node **p = &root->rb_node;
990 struct rb_node *parent = NULL;
991 struct hist_entry *iter;
992 int64_t cmp;
993
994 while (*p != NULL) {
995 parent = *p;
996 iter = rb_entry(parent, struct hist_entry, rb_node_in);
997
998 cmp = hist_entry__collapse(iter, he);
999
1000 if (!cmp) {
1001 he_stat__add_stat(&iter->stat, &he->stat);
1002 if (symbol_conf.cumulate_callchain)
1003 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1004
1005 if (symbol_conf.use_callchain) {
1006 callchain_cursor_reset(&callchain_cursor);
1007 callchain_merge(&callchain_cursor,
1008 iter->callchain,
1009 he->callchain);
1010 }
1011 hist_entry__delete(he);
1012 return false;
1013 }
1014
1015 if (cmp < 0)
1016 p = &(*p)->rb_left;
1017 else
1018 p = &(*p)->rb_right;
1019 }
1020 hists->nr_entries++;
1021
1022 rb_link_node(&he->rb_node_in, parent, p);
1023 rb_insert_color(&he->rb_node_in, root);
1024 return true;
1025 }
1026
1027 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1028 {
1029 struct rb_root *root;
1030
1031 pthread_mutex_lock(&hists->lock);
1032
1033 root = hists->entries_in;
1034 if (++hists->entries_in > &hists->entries_in_array[1])
1035 hists->entries_in = &hists->entries_in_array[0];
1036
1037 pthread_mutex_unlock(&hists->lock);
1038
1039 return root;
1040 }
1041
1042 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1043 {
1044 hists__filter_entry_by_dso(hists, he);
1045 hists__filter_entry_by_thread(hists, he);
1046 hists__filter_entry_by_symbol(hists, he);
1047 hists__filter_entry_by_socket(hists, he);
1048 }
1049
1050 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1051 {
1052 struct rb_root *root;
1053 struct rb_node *next;
1054 struct hist_entry *n;
1055
1056 if (!sort__need_collapse)
1057 return;
1058
1059 hists->nr_entries = 0;
1060
1061 root = hists__get_rotate_entries_in(hists);
1062
1063 next = rb_first(root);
1064
1065 while (next) {
1066 if (session_done())
1067 break;
1068 n = rb_entry(next, struct hist_entry, rb_node_in);
1069 next = rb_next(&n->rb_node_in);
1070
1071 rb_erase(&n->rb_node_in, root);
1072 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1073 /*
1074 * If it wasn't combined with one of the entries already
1075 * collapsed, we need to apply the filters that may have
1076 * been set by, say, the hist_browser.
1077 */
1078 hists__apply_filters(hists, n);
1079 }
1080 if (prog)
1081 ui_progress__update(prog, 1);
1082 }
1083 }
1084
1085 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1086 {
1087 struct perf_hpp_fmt *fmt;
1088 int64_t cmp = 0;
1089
1090 perf_hpp__for_each_sort_list(fmt) {
1091 if (perf_hpp__should_skip(fmt))
1092 continue;
1093
1094 cmp = fmt->sort(fmt, a, b);
1095 if (cmp)
1096 break;
1097 }
1098
1099 return cmp;
1100 }
1101
1102 static void hists__reset_filter_stats(struct hists *hists)
1103 {
1104 hists->nr_non_filtered_entries = 0;
1105 hists->stats.total_non_filtered_period = 0;
1106 }
1107
1108 void hists__reset_stats(struct hists *hists)
1109 {
1110 hists->nr_entries = 0;
1111 hists->stats.total_period = 0;
1112
1113 hists__reset_filter_stats(hists);
1114 }
1115
1116 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1117 {
1118 hists->nr_non_filtered_entries++;
1119 hists->stats.total_non_filtered_period += h->stat.period;
1120 }
1121
1122 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1123 {
1124 if (!h->filtered)
1125 hists__inc_filter_stats(hists, h);
1126
1127 hists->nr_entries++;
1128 hists->stats.total_period += h->stat.period;
1129 }
1130
1131 static void __hists__insert_output_entry(struct rb_root *entries,
1132 struct hist_entry *he,
1133 u64 min_callchain_hits,
1134 bool use_callchain)
1135 {
1136 struct rb_node **p = &entries->rb_node;
1137 struct rb_node *parent = NULL;
1138 struct hist_entry *iter;
1139
1140 if (use_callchain)
1141 callchain_param.sort(&he->sorted_chain, he->callchain,
1142 min_callchain_hits, &callchain_param);
1143
1144 while (*p != NULL) {
1145 parent = *p;
1146 iter = rb_entry(parent, struct hist_entry, rb_node);
1147
1148 if (hist_entry__sort(he, iter) > 0)
1149 p = &(*p)->rb_left;
1150 else
1151 p = &(*p)->rb_right;
1152 }
1153
1154 rb_link_node(&he->rb_node, parent, p);
1155 rb_insert_color(&he->rb_node, entries);
1156 }
1157
1158 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1159 {
1160 struct rb_root *root;
1161 struct rb_node *next;
1162 struct hist_entry *n;
1163 u64 min_callchain_hits;
1164 struct perf_evsel *evsel = hists_to_evsel(hists);
1165 bool use_callchain;
1166
1167 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1168 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1169 else
1170 use_callchain = symbol_conf.use_callchain;
1171
1172 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1173
1174 if (sort__need_collapse)
1175 root = &hists->entries_collapsed;
1176 else
1177 root = hists->entries_in;
1178
1179 next = rb_first(root);
1180 hists->entries = RB_ROOT;
1181
1182 hists__reset_stats(hists);
1183 hists__reset_col_len(hists);
1184
1185 while (next) {
1186 n = rb_entry(next, struct hist_entry, rb_node_in);
1187 next = rb_next(&n->rb_node_in);
1188
1189 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1190 hists__inc_stats(hists, n);
1191
1192 if (!n->filtered)
1193 hists__calc_col_len(hists, n);
1194
1195 if (prog)
1196 ui_progress__update(prog, 1);
1197 }
1198 }
1199
1200 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1201 enum hist_filter filter)
1202 {
1203 h->filtered &= ~(1 << filter);
1204 if (h->filtered)
1205 return;
1206
1207 /* force fold unfiltered entry for simplicity */
1208 h->unfolded = false;
1209 h->row_offset = 0;
1210 h->nr_rows = 0;
1211
1212 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1213
1214 hists__inc_filter_stats(hists, h);
1215 hists__calc_col_len(hists, h);
1216 }
1217
1218
1219 static bool hists__filter_entry_by_dso(struct hists *hists,
1220 struct hist_entry *he)
1221 {
1222 if (hists->dso_filter != NULL &&
1223 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1224 he->filtered |= (1 << HIST_FILTER__DSO);
1225 return true;
1226 }
1227
1228 return false;
1229 }
1230
1231 void hists__filter_by_dso(struct hists *hists)
1232 {
1233 struct rb_node *nd;
1234
1235 hists->stats.nr_non_filtered_samples = 0;
1236
1237 hists__reset_filter_stats(hists);
1238 hists__reset_col_len(hists);
1239
1240 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1241 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1242
1243 if (symbol_conf.exclude_other && !h->parent)
1244 continue;
1245
1246 if (hists__filter_entry_by_dso(hists, h))
1247 continue;
1248
1249 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1250 }
1251 }
1252
1253 static bool hists__filter_entry_by_thread(struct hists *hists,
1254 struct hist_entry *he)
1255 {
1256 if (hists->thread_filter != NULL &&
1257 he->thread != hists->thread_filter) {
1258 he->filtered |= (1 << HIST_FILTER__THREAD);
1259 return true;
1260 }
1261
1262 return false;
1263 }
1264
1265 void hists__filter_by_thread(struct hists *hists)
1266 {
1267 struct rb_node *nd;
1268
1269 hists->stats.nr_non_filtered_samples = 0;
1270
1271 hists__reset_filter_stats(hists);
1272 hists__reset_col_len(hists);
1273
1274 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1275 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1276
1277 if (hists__filter_entry_by_thread(hists, h))
1278 continue;
1279
1280 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1281 }
1282 }
1283
1284 static bool hists__filter_entry_by_symbol(struct hists *hists,
1285 struct hist_entry *he)
1286 {
1287 if (hists->symbol_filter_str != NULL &&
1288 (!he->ms.sym || strstr(he->ms.sym->name,
1289 hists->symbol_filter_str) == NULL)) {
1290 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1291 return true;
1292 }
1293
1294 return false;
1295 }
1296
1297 void hists__filter_by_symbol(struct hists *hists)
1298 {
1299 struct rb_node *nd;
1300
1301 hists->stats.nr_non_filtered_samples = 0;
1302
1303 hists__reset_filter_stats(hists);
1304 hists__reset_col_len(hists);
1305
1306 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1307 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1308
1309 if (hists__filter_entry_by_symbol(hists, h))
1310 continue;
1311
1312 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1313 }
1314 }
1315
1316 static bool hists__filter_entry_by_socket(struct hists *hists,
1317 struct hist_entry *he)
1318 {
1319 if ((hists->socket_filter > -1) &&
1320 (he->socket != hists->socket_filter)) {
1321 he->filtered |= (1 << HIST_FILTER__SOCKET);
1322 return true;
1323 }
1324
1325 return false;
1326 }
1327
1328 void hists__filter_by_socket(struct hists *hists)
1329 {
1330 struct rb_node *nd;
1331
1332 hists->stats.nr_non_filtered_samples = 0;
1333
1334 hists__reset_filter_stats(hists);
1335 hists__reset_col_len(hists);
1336
1337 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1338 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1339
1340 if (hists__filter_entry_by_socket(hists, h))
1341 continue;
1342
1343 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1344 }
1345 }
1346
1347 void events_stats__inc(struct events_stats *stats, u32 type)
1348 {
1349 ++stats->nr_events[0];
1350 ++stats->nr_events[type];
1351 }
1352
1353 void hists__inc_nr_events(struct hists *hists, u32 type)
1354 {
1355 events_stats__inc(&hists->stats, type);
1356 }
1357
1358 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1359 {
1360 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1361 if (!filtered)
1362 hists->stats.nr_non_filtered_samples++;
1363 }
1364
1365 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1366 struct hist_entry *pair)
1367 {
1368 struct rb_root *root;
1369 struct rb_node **p;
1370 struct rb_node *parent = NULL;
1371 struct hist_entry *he;
1372 int64_t cmp;
1373
1374 if (sort__need_collapse)
1375 root = &hists->entries_collapsed;
1376 else
1377 root = hists->entries_in;
1378
1379 p = &root->rb_node;
1380
1381 while (*p != NULL) {
1382 parent = *p;
1383 he = rb_entry(parent, struct hist_entry, rb_node_in);
1384
1385 cmp = hist_entry__collapse(he, pair);
1386
1387 if (!cmp)
1388 goto out;
1389
1390 if (cmp < 0)
1391 p = &(*p)->rb_left;
1392 else
1393 p = &(*p)->rb_right;
1394 }
1395
1396 he = hist_entry__new(pair, true);
1397 if (he) {
1398 memset(&he->stat, 0, sizeof(he->stat));
1399 he->hists = hists;
1400 rb_link_node(&he->rb_node_in, parent, p);
1401 rb_insert_color(&he->rb_node_in, root);
1402 hists__inc_stats(hists, he);
1403 he->dummy = true;
1404 }
1405 out:
1406 return he;
1407 }
1408
1409 static struct hist_entry *hists__find_entry(struct hists *hists,
1410 struct hist_entry *he)
1411 {
1412 struct rb_node *n;
1413
1414 if (sort__need_collapse)
1415 n = hists->entries_collapsed.rb_node;
1416 else
1417 n = hists->entries_in->rb_node;
1418
1419 while (n) {
1420 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1421 int64_t cmp = hist_entry__collapse(iter, he);
1422
1423 if (cmp < 0)
1424 n = n->rb_left;
1425 else if (cmp > 0)
1426 n = n->rb_right;
1427 else
1428 return iter;
1429 }
1430
1431 return NULL;
1432 }
1433
1434 /*
1435 * Look for pairs to link to the leader buckets (hist_entries):
1436 */
1437 void hists__match(struct hists *leader, struct hists *other)
1438 {
1439 struct rb_root *root;
1440 struct rb_node *nd;
1441 struct hist_entry *pos, *pair;
1442
1443 if (sort__need_collapse)
1444 root = &leader->entries_collapsed;
1445 else
1446 root = leader->entries_in;
1447
1448 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1449 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1450 pair = hists__find_entry(other, pos);
1451
1452 if (pair)
1453 hist_entry__add_pair(pair, pos);
1454 }
1455 }
1456
1457 /*
1458 * Look for entries in the other hists that are not present in the leader, if
1459 * we find them, just add a dummy entry on the leader hists, with period=0,
1460 * nr_events=0, to serve as the list header.
1461 */
1462 int hists__link(struct hists *leader, struct hists *other)
1463 {
1464 struct rb_root *root;
1465 struct rb_node *nd;
1466 struct hist_entry *pos, *pair;
1467
1468 if (sort__need_collapse)
1469 root = &other->entries_collapsed;
1470 else
1471 root = other->entries_in;
1472
1473 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1474 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1475
1476 if (!hist_entry__has_pairs(pos)) {
1477 pair = hists__add_dummy_entry(leader, pos);
1478 if (pair == NULL)
1479 return -1;
1480 hist_entry__add_pair(pos, pair);
1481 }
1482 }
1483
1484 return 0;
1485 }
1486
1487 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1488 struct perf_sample *sample, bool nonany_branch_mode)
1489 {
1490 struct branch_info *bi;
1491
1492 /* If we have branch cycles always annotate them. */
1493 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1494 int i;
1495
1496 bi = sample__resolve_bstack(sample, al);
1497 if (bi) {
1498 struct addr_map_symbol *prev = NULL;
1499
1500 /*
1501 * Ignore errors, still want to process the
1502 * other entries.
1503 *
1504 * For non standard branch modes always
1505 * force no IPC (prev == NULL)
1506 *
1507 * Note that perf stores branches reversed from
1508 * program order!
1509 */
1510 for (i = bs->nr - 1; i >= 0; i--) {
1511 addr_map_symbol__account_cycles(&bi[i].from,
1512 nonany_branch_mode ? NULL : prev,
1513 bi[i].flags.cycles);
1514 prev = &bi[i].to;
1515 }
1516 free(bi);
1517 }
1518 }
1519 }
1520
1521 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1522 {
1523 struct perf_evsel *pos;
1524 size_t ret = 0;
1525
1526 evlist__for_each(evlist, pos) {
1527 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1528 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1529 }
1530
1531 return ret;
1532 }
1533
1534
1535 u64 hists__total_period(struct hists *hists)
1536 {
1537 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1538 hists->stats.total_period;
1539 }
1540
1541 int parse_filter_percentage(const struct option *opt __maybe_unused,
1542 const char *arg, int unset __maybe_unused)
1543 {
1544 if (!strcmp(arg, "relative"))
1545 symbol_conf.filter_relative = true;
1546 else if (!strcmp(arg, "absolute"))
1547 symbol_conf.filter_relative = false;
1548 else
1549 return -1;
1550
1551 return 0;
1552 }
1553
1554 int perf_hist_config(const char *var, const char *value)
1555 {
1556 if (!strcmp(var, "hist.percentage"))
1557 return parse_filter_percentage(NULL, value, 0);
1558
1559 return 0;
1560 }
1561
1562 static int hists_evsel__init(struct perf_evsel *evsel)
1563 {
1564 struct hists *hists = evsel__hists(evsel);
1565
1566 memset(hists, 0, sizeof(*hists));
1567 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1568 hists->entries_in = &hists->entries_in_array[0];
1569 hists->entries_collapsed = RB_ROOT;
1570 hists->entries = RB_ROOT;
1571 pthread_mutex_init(&hists->lock, NULL);
1572 hists->socket_filter = -1;
1573 return 0;
1574 }
1575
1576 /*
1577 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1578 * stored in the rbtree...
1579 */
1580
1581 int hists__init(void)
1582 {
1583 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1584 hists_evsel__init, NULL);
1585 if (err)
1586 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1587
1588 return err;
1589 }
This page took 0.091072 seconds and 6 git commands to generate.