perf stat: Fix cmd_stat to release cpu_map
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
35 return true;
36 }
37 return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42 enum hist_column col;
43
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 int symlen;
62 u16 len;
63
64 /*
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
68 */
69 if (h->ms.sym) {
70 symlen = h->ms.sym->namelen + 4;
71 if (verbose)
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 } else {
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 }
79
80 len = thread__comm_len(h->thread);
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84 if (h->ms.map) {
85 len = dso__name_len(h->ms.map->dso);
86 hists__new_col_len(hists, HISTC_DSO, len);
87 }
88
89 if (h->parent)
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92 if (h->branch_info) {
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
95 if (verbose)
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 } else {
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 }
106
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
109 if (verbose)
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 } else {
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 }
120 }
121
122 if (h->mem_info) {
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 symlen);
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen + 1);
130 } else {
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 symlen);
134 }
135
136 if (h->mem_info->iaddr.sym) {
137 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
138 + unresolved_col_width + 2;
139 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
140 symlen);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
144 symlen);
145 }
146
147 if (h->mem_info->daddr.map) {
148 symlen = dso__name_len(h->mem_info->daddr.map->dso);
149 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
150 symlen);
151 } else {
152 symlen = unresolved_col_width + 4 + 2;
153 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154 }
155 } else {
156 symlen = unresolved_col_width + 4 + 2;
157 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
158 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
159 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
160 }
161
162 hists__new_col_len(hists, HISTC_CPU, 3);
163 hists__new_col_len(hists, HISTC_SOCKET, 6);
164 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
165 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
166 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
167 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
168 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
169 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
170
171 if (h->srcline)
172 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
173
174 if (h->srcfile)
175 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
176
177 if (h->transaction)
178 hists__new_col_len(hists, HISTC_TRANSACTION,
179 hist_entry__transaction_len());
180 }
181
182 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
183 {
184 struct rb_node *next = rb_first(&hists->entries);
185 struct hist_entry *n;
186 int row = 0;
187
188 hists__reset_col_len(hists);
189
190 while (next && row++ < max_rows) {
191 n = rb_entry(next, struct hist_entry, rb_node);
192 if (!n->filtered)
193 hists__calc_col_len(hists, n);
194 next = rb_next(&n->rb_node);
195 }
196 }
197
198 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
199 unsigned int cpumode, u64 period)
200 {
201 switch (cpumode) {
202 case PERF_RECORD_MISC_KERNEL:
203 he_stat->period_sys += period;
204 break;
205 case PERF_RECORD_MISC_USER:
206 he_stat->period_us += period;
207 break;
208 case PERF_RECORD_MISC_GUEST_KERNEL:
209 he_stat->period_guest_sys += period;
210 break;
211 case PERF_RECORD_MISC_GUEST_USER:
212 he_stat->period_guest_us += period;
213 break;
214 default:
215 break;
216 }
217 }
218
219 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
220 u64 weight)
221 {
222
223 he_stat->period += period;
224 he_stat->weight += weight;
225 he_stat->nr_events += 1;
226 }
227
228 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
229 {
230 dest->period += src->period;
231 dest->period_sys += src->period_sys;
232 dest->period_us += src->period_us;
233 dest->period_guest_sys += src->period_guest_sys;
234 dest->period_guest_us += src->period_guest_us;
235 dest->nr_events += src->nr_events;
236 dest->weight += src->weight;
237 }
238
239 static void he_stat__decay(struct he_stat *he_stat)
240 {
241 he_stat->period = (he_stat->period * 7) / 8;
242 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
243 /* XXX need decay for weight too? */
244 }
245
246 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
247 {
248 u64 prev_period = he->stat.period;
249 u64 diff;
250
251 if (prev_period == 0)
252 return true;
253
254 he_stat__decay(&he->stat);
255 if (symbol_conf.cumulate_callchain)
256 he_stat__decay(he->stat_acc);
257
258 diff = prev_period - he->stat.period;
259
260 hists->stats.total_period -= diff;
261 if (!he->filtered)
262 hists->stats.total_non_filtered_period -= diff;
263
264 return he->stat.period == 0;
265 }
266
267 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
268 {
269 rb_erase(&he->rb_node, &hists->entries);
270
271 if (sort__need_collapse)
272 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
273
274 --hists->nr_entries;
275 if (!he->filtered)
276 --hists->nr_non_filtered_entries;
277
278 hist_entry__delete(he);
279 }
280
281 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
282 {
283 struct rb_node *next = rb_first(&hists->entries);
284 struct hist_entry *n;
285
286 while (next) {
287 n = rb_entry(next, struct hist_entry, rb_node);
288 next = rb_next(&n->rb_node);
289 if (((zap_user && n->level == '.') ||
290 (zap_kernel && n->level != '.') ||
291 hists__decay_entry(hists, n))) {
292 hists__delete_entry(hists, n);
293 }
294 }
295 }
296
297 void hists__delete_entries(struct hists *hists)
298 {
299 struct rb_node *next = rb_first(&hists->entries);
300 struct hist_entry *n;
301
302 while (next) {
303 n = rb_entry(next, struct hist_entry, rb_node);
304 next = rb_next(&n->rb_node);
305
306 hists__delete_entry(hists, n);
307 }
308 }
309
310 /*
311 * histogram, sorted on item, collects periods
312 */
313
314 static struct hist_entry *hist_entry__new(struct hist_entry *template,
315 bool sample_self)
316 {
317 size_t callchain_size = 0;
318 struct hist_entry *he;
319
320 if (symbol_conf.use_callchain)
321 callchain_size = sizeof(struct callchain_root);
322
323 he = zalloc(sizeof(*he) + callchain_size);
324
325 if (he != NULL) {
326 *he = *template;
327
328 if (symbol_conf.cumulate_callchain) {
329 he->stat_acc = malloc(sizeof(he->stat));
330 if (he->stat_acc == NULL) {
331 free(he);
332 return NULL;
333 }
334 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
335 if (!sample_self)
336 memset(&he->stat, 0, sizeof(he->stat));
337 }
338
339 map__get(he->ms.map);
340
341 if (he->branch_info) {
342 /*
343 * This branch info is (a part of) allocated from
344 * sample__resolve_bstack() and will be freed after
345 * adding new entries. So we need to save a copy.
346 */
347 he->branch_info = malloc(sizeof(*he->branch_info));
348 if (he->branch_info == NULL) {
349 map__zput(he->ms.map);
350 free(he->stat_acc);
351 free(he);
352 return NULL;
353 }
354
355 memcpy(he->branch_info, template->branch_info,
356 sizeof(*he->branch_info));
357
358 map__get(he->branch_info->from.map);
359 map__get(he->branch_info->to.map);
360 }
361
362 if (he->mem_info) {
363 map__get(he->mem_info->iaddr.map);
364 map__get(he->mem_info->daddr.map);
365 }
366
367 if (symbol_conf.use_callchain)
368 callchain_init(he->callchain);
369
370 INIT_LIST_HEAD(&he->pairs.node);
371 thread__get(he->thread);
372 }
373
374 return he;
375 }
376
377 static u8 symbol__parent_filter(const struct symbol *parent)
378 {
379 if (symbol_conf.exclude_other && parent == NULL)
380 return 1 << HIST_FILTER__PARENT;
381 return 0;
382 }
383
384 static struct hist_entry *hists__findnew_entry(struct hists *hists,
385 struct hist_entry *entry,
386 struct addr_location *al,
387 bool sample_self)
388 {
389 struct rb_node **p;
390 struct rb_node *parent = NULL;
391 struct hist_entry *he;
392 int64_t cmp;
393 u64 period = entry->stat.period;
394 u64 weight = entry->stat.weight;
395
396 p = &hists->entries_in->rb_node;
397
398 while (*p != NULL) {
399 parent = *p;
400 he = rb_entry(parent, struct hist_entry, rb_node_in);
401
402 /*
403 * Make sure that it receives arguments in a same order as
404 * hist_entry__collapse() so that we can use an appropriate
405 * function when searching an entry regardless which sort
406 * keys were used.
407 */
408 cmp = hist_entry__cmp(he, entry);
409
410 if (!cmp) {
411 if (sample_self)
412 he_stat__add_period(&he->stat, period, weight);
413 if (symbol_conf.cumulate_callchain)
414 he_stat__add_period(he->stat_acc, period, weight);
415
416 /*
417 * This mem info was allocated from sample__resolve_mem
418 * and will not be used anymore.
419 */
420 zfree(&entry->mem_info);
421
422 /* If the map of an existing hist_entry has
423 * become out-of-date due to an exec() or
424 * similar, update it. Otherwise we will
425 * mis-adjust symbol addresses when computing
426 * the history counter to increment.
427 */
428 if (he->ms.map != entry->ms.map) {
429 map__put(he->ms.map);
430 he->ms.map = map__get(entry->ms.map);
431 }
432 goto out;
433 }
434
435 if (cmp < 0)
436 p = &(*p)->rb_left;
437 else
438 p = &(*p)->rb_right;
439 }
440
441 he = hist_entry__new(entry, sample_self);
442 if (!he)
443 return NULL;
444
445 hists->nr_entries++;
446
447 rb_link_node(&he->rb_node_in, parent, p);
448 rb_insert_color(&he->rb_node_in, hists->entries_in);
449 out:
450 if (sample_self)
451 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
452 if (symbol_conf.cumulate_callchain)
453 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
454 return he;
455 }
456
457 struct hist_entry *__hists__add_entry(struct hists *hists,
458 struct addr_location *al,
459 struct symbol *sym_parent,
460 struct branch_info *bi,
461 struct mem_info *mi,
462 u64 period, u64 weight, u64 transaction,
463 bool sample_self)
464 {
465 struct hist_entry entry = {
466 .thread = al->thread,
467 .comm = thread__comm(al->thread),
468 .ms = {
469 .map = al->map,
470 .sym = al->sym,
471 },
472 .socket = al->socket,
473 .cpu = al->cpu,
474 .cpumode = al->cpumode,
475 .ip = al->addr,
476 .level = al->level,
477 .stat = {
478 .nr_events = 1,
479 .period = period,
480 .weight = weight,
481 },
482 .parent = sym_parent,
483 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
484 .hists = hists,
485 .branch_info = bi,
486 .mem_info = mi,
487 .transaction = transaction,
488 };
489
490 return hists__findnew_entry(hists, &entry, al, sample_self);
491 }
492
493 static int
494 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
495 struct addr_location *al __maybe_unused)
496 {
497 return 0;
498 }
499
500 static int
501 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
502 struct addr_location *al __maybe_unused)
503 {
504 return 0;
505 }
506
507 static int
508 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
509 {
510 struct perf_sample *sample = iter->sample;
511 struct mem_info *mi;
512
513 mi = sample__resolve_mem(sample, al);
514 if (mi == NULL)
515 return -ENOMEM;
516
517 iter->priv = mi;
518 return 0;
519 }
520
521 static int
522 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
523 {
524 u64 cost;
525 struct mem_info *mi = iter->priv;
526 struct hists *hists = evsel__hists(iter->evsel);
527 struct hist_entry *he;
528
529 if (mi == NULL)
530 return -EINVAL;
531
532 cost = iter->sample->weight;
533 if (!cost)
534 cost = 1;
535
536 /*
537 * must pass period=weight in order to get the correct
538 * sorting from hists__collapse_resort() which is solely
539 * based on periods. We want sorting be done on nr_events * weight
540 * and this is indirectly achieved by passing period=weight here
541 * and the he_stat__add_period() function.
542 */
543 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
544 cost, cost, 0, true);
545 if (!he)
546 return -ENOMEM;
547
548 iter->he = he;
549 return 0;
550 }
551
552 static int
553 iter_finish_mem_entry(struct hist_entry_iter *iter,
554 struct addr_location *al __maybe_unused)
555 {
556 struct perf_evsel *evsel = iter->evsel;
557 struct hists *hists = evsel__hists(evsel);
558 struct hist_entry *he = iter->he;
559 int err = -EINVAL;
560
561 if (he == NULL)
562 goto out;
563
564 hists__inc_nr_samples(hists, he->filtered);
565
566 err = hist_entry__append_callchain(he, iter->sample);
567
568 out:
569 /*
570 * We don't need to free iter->priv (mem_info) here since the mem info
571 * was either already freed in hists__findnew_entry() or passed to a
572 * new hist entry by hist_entry__new().
573 */
574 iter->priv = NULL;
575
576 iter->he = NULL;
577 return err;
578 }
579
580 static int
581 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
582 {
583 struct branch_info *bi;
584 struct perf_sample *sample = iter->sample;
585
586 bi = sample__resolve_bstack(sample, al);
587 if (!bi)
588 return -ENOMEM;
589
590 iter->curr = 0;
591 iter->total = sample->branch_stack->nr;
592
593 iter->priv = bi;
594 return 0;
595 }
596
597 static int
598 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
599 struct addr_location *al __maybe_unused)
600 {
601 /* to avoid calling callback function */
602 iter->he = NULL;
603
604 return 0;
605 }
606
607 static int
608 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
609 {
610 struct branch_info *bi = iter->priv;
611 int i = iter->curr;
612
613 if (bi == NULL)
614 return 0;
615
616 if (iter->curr >= iter->total)
617 return 0;
618
619 al->map = bi[i].to.map;
620 al->sym = bi[i].to.sym;
621 al->addr = bi[i].to.addr;
622 return 1;
623 }
624
625 static int
626 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
627 {
628 struct branch_info *bi;
629 struct perf_evsel *evsel = iter->evsel;
630 struct hists *hists = evsel__hists(evsel);
631 struct hist_entry *he = NULL;
632 int i = iter->curr;
633 int err = 0;
634
635 bi = iter->priv;
636
637 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
638 goto out;
639
640 /*
641 * The report shows the percentage of total branches captured
642 * and not events sampled. Thus we use a pseudo period of 1.
643 */
644 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
645 1, bi->flags.cycles ? bi->flags.cycles : 1,
646 0, true);
647 if (he == NULL)
648 return -ENOMEM;
649
650 hists__inc_nr_samples(hists, he->filtered);
651
652 out:
653 iter->he = he;
654 iter->curr++;
655 return err;
656 }
657
658 static int
659 iter_finish_branch_entry(struct hist_entry_iter *iter,
660 struct addr_location *al __maybe_unused)
661 {
662 zfree(&iter->priv);
663 iter->he = NULL;
664
665 return iter->curr >= iter->total ? 0 : -1;
666 }
667
668 static int
669 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
670 struct addr_location *al __maybe_unused)
671 {
672 return 0;
673 }
674
675 static int
676 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
677 {
678 struct perf_evsel *evsel = iter->evsel;
679 struct perf_sample *sample = iter->sample;
680 struct hist_entry *he;
681
682 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
683 sample->period, sample->weight,
684 sample->transaction, true);
685 if (he == NULL)
686 return -ENOMEM;
687
688 iter->he = he;
689 return 0;
690 }
691
692 static int
693 iter_finish_normal_entry(struct hist_entry_iter *iter,
694 struct addr_location *al __maybe_unused)
695 {
696 struct hist_entry *he = iter->he;
697 struct perf_evsel *evsel = iter->evsel;
698 struct perf_sample *sample = iter->sample;
699
700 if (he == NULL)
701 return 0;
702
703 iter->he = NULL;
704
705 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
706
707 return hist_entry__append_callchain(he, sample);
708 }
709
710 static int
711 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
712 struct addr_location *al __maybe_unused)
713 {
714 struct hist_entry **he_cache;
715
716 callchain_cursor_commit(&callchain_cursor);
717
718 /*
719 * This is for detecting cycles or recursions so that they're
720 * cumulated only one time to prevent entries more than 100%
721 * overhead.
722 */
723 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
724 if (he_cache == NULL)
725 return -ENOMEM;
726
727 iter->priv = he_cache;
728 iter->curr = 0;
729
730 return 0;
731 }
732
733 static int
734 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
735 struct addr_location *al)
736 {
737 struct perf_evsel *evsel = iter->evsel;
738 struct hists *hists = evsel__hists(evsel);
739 struct perf_sample *sample = iter->sample;
740 struct hist_entry **he_cache = iter->priv;
741 struct hist_entry *he;
742 int err = 0;
743
744 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
745 sample->period, sample->weight,
746 sample->transaction, true);
747 if (he == NULL)
748 return -ENOMEM;
749
750 iter->he = he;
751 he_cache[iter->curr++] = he;
752
753 hist_entry__append_callchain(he, sample);
754
755 /*
756 * We need to re-initialize the cursor since callchain_append()
757 * advanced the cursor to the end.
758 */
759 callchain_cursor_commit(&callchain_cursor);
760
761 hists__inc_nr_samples(hists, he->filtered);
762
763 return err;
764 }
765
766 static int
767 iter_next_cumulative_entry(struct hist_entry_iter *iter,
768 struct addr_location *al)
769 {
770 struct callchain_cursor_node *node;
771
772 node = callchain_cursor_current(&callchain_cursor);
773 if (node == NULL)
774 return 0;
775
776 return fill_callchain_info(al, node, iter->hide_unresolved);
777 }
778
779 static int
780 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
781 struct addr_location *al)
782 {
783 struct perf_evsel *evsel = iter->evsel;
784 struct perf_sample *sample = iter->sample;
785 struct hist_entry **he_cache = iter->priv;
786 struct hist_entry *he;
787 struct hist_entry he_tmp = {
788 .hists = evsel__hists(evsel),
789 .cpu = al->cpu,
790 .thread = al->thread,
791 .comm = thread__comm(al->thread),
792 .ip = al->addr,
793 .ms = {
794 .map = al->map,
795 .sym = al->sym,
796 },
797 .parent = iter->parent,
798 };
799 int i;
800 struct callchain_cursor cursor;
801
802 callchain_cursor_snapshot(&cursor, &callchain_cursor);
803
804 callchain_cursor_advance(&callchain_cursor);
805
806 /*
807 * Check if there's duplicate entries in the callchain.
808 * It's possible that it has cycles or recursive calls.
809 */
810 for (i = 0; i < iter->curr; i++) {
811 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
812 /* to avoid calling callback function */
813 iter->he = NULL;
814 return 0;
815 }
816 }
817
818 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
819 sample->period, sample->weight,
820 sample->transaction, false);
821 if (he == NULL)
822 return -ENOMEM;
823
824 iter->he = he;
825 he_cache[iter->curr++] = he;
826
827 if (symbol_conf.use_callchain)
828 callchain_append(he->callchain, &cursor, sample->period);
829 return 0;
830 }
831
832 static int
833 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
834 struct addr_location *al __maybe_unused)
835 {
836 zfree(&iter->priv);
837 iter->he = NULL;
838
839 return 0;
840 }
841
842 const struct hist_iter_ops hist_iter_mem = {
843 .prepare_entry = iter_prepare_mem_entry,
844 .add_single_entry = iter_add_single_mem_entry,
845 .next_entry = iter_next_nop_entry,
846 .add_next_entry = iter_add_next_nop_entry,
847 .finish_entry = iter_finish_mem_entry,
848 };
849
850 const struct hist_iter_ops hist_iter_branch = {
851 .prepare_entry = iter_prepare_branch_entry,
852 .add_single_entry = iter_add_single_branch_entry,
853 .next_entry = iter_next_branch_entry,
854 .add_next_entry = iter_add_next_branch_entry,
855 .finish_entry = iter_finish_branch_entry,
856 };
857
858 const struct hist_iter_ops hist_iter_normal = {
859 .prepare_entry = iter_prepare_normal_entry,
860 .add_single_entry = iter_add_single_normal_entry,
861 .next_entry = iter_next_nop_entry,
862 .add_next_entry = iter_add_next_nop_entry,
863 .finish_entry = iter_finish_normal_entry,
864 };
865
866 const struct hist_iter_ops hist_iter_cumulative = {
867 .prepare_entry = iter_prepare_cumulative_entry,
868 .add_single_entry = iter_add_single_cumulative_entry,
869 .next_entry = iter_next_cumulative_entry,
870 .add_next_entry = iter_add_next_cumulative_entry,
871 .finish_entry = iter_finish_cumulative_entry,
872 };
873
874 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
875 int max_stack_depth, void *arg)
876 {
877 int err, err2;
878
879 err = sample__resolve_callchain(iter->sample, &iter->parent,
880 iter->evsel, al, max_stack_depth);
881 if (err)
882 return err;
883
884 iter->max_stack = max_stack_depth;
885
886 err = iter->ops->prepare_entry(iter, al);
887 if (err)
888 goto out;
889
890 err = iter->ops->add_single_entry(iter, al);
891 if (err)
892 goto out;
893
894 if (iter->he && iter->add_entry_cb) {
895 err = iter->add_entry_cb(iter, al, true, arg);
896 if (err)
897 goto out;
898 }
899
900 while (iter->ops->next_entry(iter, al)) {
901 err = iter->ops->add_next_entry(iter, al);
902 if (err)
903 break;
904
905 if (iter->he && iter->add_entry_cb) {
906 err = iter->add_entry_cb(iter, al, false, arg);
907 if (err)
908 goto out;
909 }
910 }
911
912 out:
913 err2 = iter->ops->finish_entry(iter, al);
914 if (!err)
915 err = err2;
916
917 return err;
918 }
919
920 int64_t
921 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
922 {
923 struct perf_hpp_fmt *fmt;
924 int64_t cmp = 0;
925
926 perf_hpp__for_each_sort_list(fmt) {
927 cmp = fmt->cmp(fmt, left, right);
928 if (cmp)
929 break;
930 }
931
932 return cmp;
933 }
934
935 int64_t
936 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
937 {
938 struct perf_hpp_fmt *fmt;
939 int64_t cmp = 0;
940
941 perf_hpp__for_each_sort_list(fmt) {
942 cmp = fmt->collapse(fmt, left, right);
943 if (cmp)
944 break;
945 }
946
947 return cmp;
948 }
949
950 void hist_entry__delete(struct hist_entry *he)
951 {
952 thread__zput(he->thread);
953 map__zput(he->ms.map);
954
955 if (he->branch_info) {
956 map__zput(he->branch_info->from.map);
957 map__zput(he->branch_info->to.map);
958 zfree(&he->branch_info);
959 }
960
961 if (he->mem_info) {
962 map__zput(he->mem_info->iaddr.map);
963 map__zput(he->mem_info->daddr.map);
964 zfree(&he->mem_info);
965 }
966
967 zfree(&he->stat_acc);
968 free_srcline(he->srcline);
969 if (he->srcfile && he->srcfile[0])
970 free(he->srcfile);
971 free_callchain(he->callchain);
972 free(he);
973 }
974
975 /*
976 * collapse the histogram
977 */
978
979 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
980 struct rb_root *root,
981 struct hist_entry *he)
982 {
983 struct rb_node **p = &root->rb_node;
984 struct rb_node *parent = NULL;
985 struct hist_entry *iter;
986 int64_t cmp;
987
988 while (*p != NULL) {
989 parent = *p;
990 iter = rb_entry(parent, struct hist_entry, rb_node_in);
991
992 cmp = hist_entry__collapse(iter, he);
993
994 if (!cmp) {
995 he_stat__add_stat(&iter->stat, &he->stat);
996 if (symbol_conf.cumulate_callchain)
997 he_stat__add_stat(iter->stat_acc, he->stat_acc);
998
999 if (symbol_conf.use_callchain) {
1000 callchain_cursor_reset(&callchain_cursor);
1001 callchain_merge(&callchain_cursor,
1002 iter->callchain,
1003 he->callchain);
1004 }
1005 hist_entry__delete(he);
1006 return false;
1007 }
1008
1009 if (cmp < 0)
1010 p = &(*p)->rb_left;
1011 else
1012 p = &(*p)->rb_right;
1013 }
1014 hists->nr_entries++;
1015
1016 rb_link_node(&he->rb_node_in, parent, p);
1017 rb_insert_color(&he->rb_node_in, root);
1018 return true;
1019 }
1020
1021 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1022 {
1023 struct rb_root *root;
1024
1025 pthread_mutex_lock(&hists->lock);
1026
1027 root = hists->entries_in;
1028 if (++hists->entries_in > &hists->entries_in_array[1])
1029 hists->entries_in = &hists->entries_in_array[0];
1030
1031 pthread_mutex_unlock(&hists->lock);
1032
1033 return root;
1034 }
1035
1036 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1037 {
1038 hists__filter_entry_by_dso(hists, he);
1039 hists__filter_entry_by_thread(hists, he);
1040 hists__filter_entry_by_symbol(hists, he);
1041 hists__filter_entry_by_socket(hists, he);
1042 }
1043
1044 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1045 {
1046 struct rb_root *root;
1047 struct rb_node *next;
1048 struct hist_entry *n;
1049
1050 if (!sort__need_collapse)
1051 return;
1052
1053 hists->nr_entries = 0;
1054
1055 root = hists__get_rotate_entries_in(hists);
1056
1057 next = rb_first(root);
1058
1059 while (next) {
1060 if (session_done())
1061 break;
1062 n = rb_entry(next, struct hist_entry, rb_node_in);
1063 next = rb_next(&n->rb_node_in);
1064
1065 rb_erase(&n->rb_node_in, root);
1066 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1067 /*
1068 * If it wasn't combined with one of the entries already
1069 * collapsed, we need to apply the filters that may have
1070 * been set by, say, the hist_browser.
1071 */
1072 hists__apply_filters(hists, n);
1073 }
1074 if (prog)
1075 ui_progress__update(prog, 1);
1076 }
1077 }
1078
1079 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1080 {
1081 struct perf_hpp_fmt *fmt;
1082 int64_t cmp = 0;
1083
1084 perf_hpp__for_each_sort_list(fmt) {
1085 if (perf_hpp__should_skip(fmt))
1086 continue;
1087
1088 cmp = fmt->sort(fmt, a, b);
1089 if (cmp)
1090 break;
1091 }
1092
1093 return cmp;
1094 }
1095
1096 static void hists__reset_filter_stats(struct hists *hists)
1097 {
1098 hists->nr_non_filtered_entries = 0;
1099 hists->stats.total_non_filtered_period = 0;
1100 }
1101
1102 void hists__reset_stats(struct hists *hists)
1103 {
1104 hists->nr_entries = 0;
1105 hists->stats.total_period = 0;
1106
1107 hists__reset_filter_stats(hists);
1108 }
1109
1110 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1111 {
1112 hists->nr_non_filtered_entries++;
1113 hists->stats.total_non_filtered_period += h->stat.period;
1114 }
1115
1116 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1117 {
1118 if (!h->filtered)
1119 hists__inc_filter_stats(hists, h);
1120
1121 hists->nr_entries++;
1122 hists->stats.total_period += h->stat.period;
1123 }
1124
1125 static void __hists__insert_output_entry(struct rb_root *entries,
1126 struct hist_entry *he,
1127 u64 min_callchain_hits,
1128 bool use_callchain)
1129 {
1130 struct rb_node **p = &entries->rb_node;
1131 struct rb_node *parent = NULL;
1132 struct hist_entry *iter;
1133
1134 if (use_callchain)
1135 callchain_param.sort(&he->sorted_chain, he->callchain,
1136 min_callchain_hits, &callchain_param);
1137
1138 while (*p != NULL) {
1139 parent = *p;
1140 iter = rb_entry(parent, struct hist_entry, rb_node);
1141
1142 if (hist_entry__sort(he, iter) > 0)
1143 p = &(*p)->rb_left;
1144 else
1145 p = &(*p)->rb_right;
1146 }
1147
1148 rb_link_node(&he->rb_node, parent, p);
1149 rb_insert_color(&he->rb_node, entries);
1150 }
1151
1152 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1153 {
1154 struct rb_root *root;
1155 struct rb_node *next;
1156 struct hist_entry *n;
1157 u64 min_callchain_hits;
1158 struct perf_evsel *evsel = hists_to_evsel(hists);
1159 bool use_callchain;
1160
1161 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1162 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1163 else
1164 use_callchain = symbol_conf.use_callchain;
1165
1166 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1167
1168 if (sort__need_collapse)
1169 root = &hists->entries_collapsed;
1170 else
1171 root = hists->entries_in;
1172
1173 next = rb_first(root);
1174 hists->entries = RB_ROOT;
1175
1176 hists__reset_stats(hists);
1177 hists__reset_col_len(hists);
1178
1179 while (next) {
1180 n = rb_entry(next, struct hist_entry, rb_node_in);
1181 next = rb_next(&n->rb_node_in);
1182
1183 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1184 hists__inc_stats(hists, n);
1185
1186 if (!n->filtered)
1187 hists__calc_col_len(hists, n);
1188
1189 if (prog)
1190 ui_progress__update(prog, 1);
1191 }
1192 }
1193
1194 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1195 enum hist_filter filter)
1196 {
1197 h->filtered &= ~(1 << filter);
1198 if (h->filtered)
1199 return;
1200
1201 /* force fold unfiltered entry for simplicity */
1202 h->unfolded = false;
1203 h->row_offset = 0;
1204 h->nr_rows = 0;
1205
1206 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1207
1208 hists__inc_filter_stats(hists, h);
1209 hists__calc_col_len(hists, h);
1210 }
1211
1212
1213 static bool hists__filter_entry_by_dso(struct hists *hists,
1214 struct hist_entry *he)
1215 {
1216 if (hists->dso_filter != NULL &&
1217 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1218 he->filtered |= (1 << HIST_FILTER__DSO);
1219 return true;
1220 }
1221
1222 return false;
1223 }
1224
1225 void hists__filter_by_dso(struct hists *hists)
1226 {
1227 struct rb_node *nd;
1228
1229 hists->stats.nr_non_filtered_samples = 0;
1230
1231 hists__reset_filter_stats(hists);
1232 hists__reset_col_len(hists);
1233
1234 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1235 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1236
1237 if (symbol_conf.exclude_other && !h->parent)
1238 continue;
1239
1240 if (hists__filter_entry_by_dso(hists, h))
1241 continue;
1242
1243 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1244 }
1245 }
1246
1247 static bool hists__filter_entry_by_thread(struct hists *hists,
1248 struct hist_entry *he)
1249 {
1250 if (hists->thread_filter != NULL &&
1251 he->thread != hists->thread_filter) {
1252 he->filtered |= (1 << HIST_FILTER__THREAD);
1253 return true;
1254 }
1255
1256 return false;
1257 }
1258
1259 void hists__filter_by_thread(struct hists *hists)
1260 {
1261 struct rb_node *nd;
1262
1263 hists->stats.nr_non_filtered_samples = 0;
1264
1265 hists__reset_filter_stats(hists);
1266 hists__reset_col_len(hists);
1267
1268 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1269 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1270
1271 if (hists__filter_entry_by_thread(hists, h))
1272 continue;
1273
1274 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1275 }
1276 }
1277
1278 static bool hists__filter_entry_by_symbol(struct hists *hists,
1279 struct hist_entry *he)
1280 {
1281 if (hists->symbol_filter_str != NULL &&
1282 (!he->ms.sym || strstr(he->ms.sym->name,
1283 hists->symbol_filter_str) == NULL)) {
1284 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1285 return true;
1286 }
1287
1288 return false;
1289 }
1290
1291 void hists__filter_by_symbol(struct hists *hists)
1292 {
1293 struct rb_node *nd;
1294
1295 hists->stats.nr_non_filtered_samples = 0;
1296
1297 hists__reset_filter_stats(hists);
1298 hists__reset_col_len(hists);
1299
1300 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1301 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1302
1303 if (hists__filter_entry_by_symbol(hists, h))
1304 continue;
1305
1306 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1307 }
1308 }
1309
1310 static bool hists__filter_entry_by_socket(struct hists *hists,
1311 struct hist_entry *he)
1312 {
1313 if ((hists->socket_filter > -1) &&
1314 (he->socket != hists->socket_filter)) {
1315 he->filtered |= (1 << HIST_FILTER__SOCKET);
1316 return true;
1317 }
1318
1319 return false;
1320 }
1321
1322 void hists__filter_by_socket(struct hists *hists)
1323 {
1324 struct rb_node *nd;
1325
1326 hists->stats.nr_non_filtered_samples = 0;
1327
1328 hists__reset_filter_stats(hists);
1329 hists__reset_col_len(hists);
1330
1331 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1332 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1333
1334 if (hists__filter_entry_by_socket(hists, h))
1335 continue;
1336
1337 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1338 }
1339 }
1340
1341 void events_stats__inc(struct events_stats *stats, u32 type)
1342 {
1343 ++stats->nr_events[0];
1344 ++stats->nr_events[type];
1345 }
1346
1347 void hists__inc_nr_events(struct hists *hists, u32 type)
1348 {
1349 events_stats__inc(&hists->stats, type);
1350 }
1351
1352 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1353 {
1354 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1355 if (!filtered)
1356 hists->stats.nr_non_filtered_samples++;
1357 }
1358
1359 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1360 struct hist_entry *pair)
1361 {
1362 struct rb_root *root;
1363 struct rb_node **p;
1364 struct rb_node *parent = NULL;
1365 struct hist_entry *he;
1366 int64_t cmp;
1367
1368 if (sort__need_collapse)
1369 root = &hists->entries_collapsed;
1370 else
1371 root = hists->entries_in;
1372
1373 p = &root->rb_node;
1374
1375 while (*p != NULL) {
1376 parent = *p;
1377 he = rb_entry(parent, struct hist_entry, rb_node_in);
1378
1379 cmp = hist_entry__collapse(he, pair);
1380
1381 if (!cmp)
1382 goto out;
1383
1384 if (cmp < 0)
1385 p = &(*p)->rb_left;
1386 else
1387 p = &(*p)->rb_right;
1388 }
1389
1390 he = hist_entry__new(pair, true);
1391 if (he) {
1392 memset(&he->stat, 0, sizeof(he->stat));
1393 he->hists = hists;
1394 rb_link_node(&he->rb_node_in, parent, p);
1395 rb_insert_color(&he->rb_node_in, root);
1396 hists__inc_stats(hists, he);
1397 he->dummy = true;
1398 }
1399 out:
1400 return he;
1401 }
1402
1403 static struct hist_entry *hists__find_entry(struct hists *hists,
1404 struct hist_entry *he)
1405 {
1406 struct rb_node *n;
1407
1408 if (sort__need_collapse)
1409 n = hists->entries_collapsed.rb_node;
1410 else
1411 n = hists->entries_in->rb_node;
1412
1413 while (n) {
1414 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1415 int64_t cmp = hist_entry__collapse(iter, he);
1416
1417 if (cmp < 0)
1418 n = n->rb_left;
1419 else if (cmp > 0)
1420 n = n->rb_right;
1421 else
1422 return iter;
1423 }
1424
1425 return NULL;
1426 }
1427
1428 /*
1429 * Look for pairs to link to the leader buckets (hist_entries):
1430 */
1431 void hists__match(struct hists *leader, struct hists *other)
1432 {
1433 struct rb_root *root;
1434 struct rb_node *nd;
1435 struct hist_entry *pos, *pair;
1436
1437 if (sort__need_collapse)
1438 root = &leader->entries_collapsed;
1439 else
1440 root = leader->entries_in;
1441
1442 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1443 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1444 pair = hists__find_entry(other, pos);
1445
1446 if (pair)
1447 hist_entry__add_pair(pair, pos);
1448 }
1449 }
1450
1451 /*
1452 * Look for entries in the other hists that are not present in the leader, if
1453 * we find them, just add a dummy entry on the leader hists, with period=0,
1454 * nr_events=0, to serve as the list header.
1455 */
1456 int hists__link(struct hists *leader, struct hists *other)
1457 {
1458 struct rb_root *root;
1459 struct rb_node *nd;
1460 struct hist_entry *pos, *pair;
1461
1462 if (sort__need_collapse)
1463 root = &other->entries_collapsed;
1464 else
1465 root = other->entries_in;
1466
1467 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1468 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1469
1470 if (!hist_entry__has_pairs(pos)) {
1471 pair = hists__add_dummy_entry(leader, pos);
1472 if (pair == NULL)
1473 return -1;
1474 hist_entry__add_pair(pos, pair);
1475 }
1476 }
1477
1478 return 0;
1479 }
1480
1481 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1482 struct perf_sample *sample, bool nonany_branch_mode)
1483 {
1484 struct branch_info *bi;
1485
1486 /* If we have branch cycles always annotate them. */
1487 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1488 int i;
1489
1490 bi = sample__resolve_bstack(sample, al);
1491 if (bi) {
1492 struct addr_map_symbol *prev = NULL;
1493
1494 /*
1495 * Ignore errors, still want to process the
1496 * other entries.
1497 *
1498 * For non standard branch modes always
1499 * force no IPC (prev == NULL)
1500 *
1501 * Note that perf stores branches reversed from
1502 * program order!
1503 */
1504 for (i = bs->nr - 1; i >= 0; i--) {
1505 addr_map_symbol__account_cycles(&bi[i].from,
1506 nonany_branch_mode ? NULL : prev,
1507 bi[i].flags.cycles);
1508 prev = &bi[i].to;
1509 }
1510 free(bi);
1511 }
1512 }
1513 }
1514
1515 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1516 {
1517 struct perf_evsel *pos;
1518 size_t ret = 0;
1519
1520 evlist__for_each(evlist, pos) {
1521 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1522 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1523 }
1524
1525 return ret;
1526 }
1527
1528
1529 u64 hists__total_period(struct hists *hists)
1530 {
1531 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1532 hists->stats.total_period;
1533 }
1534
1535 int parse_filter_percentage(const struct option *opt __maybe_unused,
1536 const char *arg, int unset __maybe_unused)
1537 {
1538 if (!strcmp(arg, "relative"))
1539 symbol_conf.filter_relative = true;
1540 else if (!strcmp(arg, "absolute"))
1541 symbol_conf.filter_relative = false;
1542 else
1543 return -1;
1544
1545 return 0;
1546 }
1547
1548 int perf_hist_config(const char *var, const char *value)
1549 {
1550 if (!strcmp(var, "hist.percentage"))
1551 return parse_filter_percentage(NULL, value, 0);
1552
1553 return 0;
1554 }
1555
1556 static int hists_evsel__init(struct perf_evsel *evsel)
1557 {
1558 struct hists *hists = evsel__hists(evsel);
1559
1560 memset(hists, 0, sizeof(*hists));
1561 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1562 hists->entries_in = &hists->entries_in_array[0];
1563 hists->entries_collapsed = RB_ROOT;
1564 hists->entries = RB_ROOT;
1565 pthread_mutex_init(&hists->lock, NULL);
1566 hists->socket_filter = -1;
1567 return 0;
1568 }
1569
1570 /*
1571 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1572 * stored in the rbtree...
1573 */
1574
1575 int hists__init(void)
1576 {
1577 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1578 hists_evsel__init, NULL);
1579 if (err)
1580 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1581
1582 return err;
1583 }
This page took 0.061868 seconds and 6 git commands to generate.