perf tools: Introduce new sort type "socket" for the processor socket
[deliverable/linux.git] / tools / perf / util / hist.c
CommitLineData
8a0ecfb8 1#include "util.h"
598357eb 2#include "build-id.h"
3d1d07ec 3#include "hist.h"
4e4f06e4
ACM
4#include "session.h"
5#include "sort.h"
2a1731fb 6#include "evlist.h"
29d720ed 7#include "evsel.h"
69bcb019 8#include "annotate.h"
740b97f9 9#include "ui/progress.h"
9b33827d 10#include <math.h>
3d1d07ec 11
90cf1fb5
ACM
12static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
e94d53eb
NK
16static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
90cf1fb5 18
42b28ac0 19u16 hists__col_len(struct hists *hists, enum hist_column col)
8a6c5b26 20{
42b28ac0 21 return hists->col_len[col];
8a6c5b26
ACM
22}
23
42b28ac0 24void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 25{
42b28ac0 26 hists->col_len[col] = len;
8a6c5b26
ACM
27}
28
42b28ac0 29bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 30{
42b28ac0
ACM
31 if (len > hists__col_len(hists, col)) {
32 hists__set_col_len(hists, col, len);
8a6c5b26
ACM
33 return true;
34 }
35 return false;
36}
37
7ccf4f90 38void hists__reset_col_len(struct hists *hists)
8a6c5b26
ACM
39{
40 enum hist_column col;
41
42 for (col = 0; col < HISTC_NR_COLS; ++col)
42b28ac0 43 hists__set_col_len(hists, col, 0);
8a6c5b26
ACM
44}
45
b5387528
RAV
46static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
47{
48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
49
50 if (hists__col_len(hists, dso) < unresolved_col_width &&
51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 !symbol_conf.dso_list)
53 hists__set_col_len(hists, dso, unresolved_col_width);
54}
55
7ccf4f90 56void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
8a6c5b26 57{
b5387528 58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
98a3b32c 59 int symlen;
8a6c5b26
ACM
60 u16 len;
61
ded19d57
NK
62 /*
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
66 */
67 if (h->ms.sym) {
68 symlen = h->ms.sym->namelen + 4;
69 if (verbose)
70 symlen += BITS_PER_LONG / 4 + 2 + 3;
71 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 } else {
98a3b32c
SE
73 symlen = unresolved_col_width + 4 + 2;
74 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
b5387528 75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
98a3b32c 76 }
8a6c5b26
ACM
77
78 len = thread__comm_len(h->thread);
42b28ac0
ACM
79 if (hists__new_col_len(hists, HISTC_COMM, len))
80 hists__set_col_len(hists, HISTC_THREAD, len + 6);
8a6c5b26
ACM
81
82 if (h->ms.map) {
83 len = dso__name_len(h->ms.map->dso);
42b28ac0 84 hists__new_col_len(hists, HISTC_DSO, len);
8a6c5b26 85 }
b5387528 86
cb993744
NK
87 if (h->parent)
88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
89
b5387528 90 if (h->branch_info) {
b5387528
RAV
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
ded19d57
NK
93 if (verbose)
94 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
96
97 symlen = dso__name_len(h->branch_info->from.map->dso);
98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 } else {
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 }
104
105 if (h->branch_info->to.sym) {
106 symlen = (int)h->branch_info->to.sym->namelen + 4;
ded19d57
NK
107 if (verbose)
108 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 } else {
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 }
118 }
98a3b32c
SE
119
120 if (h->mem_info) {
98a3b32c
SE
121 if (h->mem_info->daddr.sym) {
122 symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 + unresolved_col_width + 2;
124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 symlen);
9b32ba71
DZ
126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 symlen + 1);
98a3b32c
SE
128 } else {
129 symlen = unresolved_col_width + 4 + 2;
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 symlen);
132 }
133 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 symlen);
137 } else {
138 symlen = unresolved_col_width + 4 + 2;
139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
140 }
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 }
146
a4978eca 147 hists__new_col_len(hists, HISTC_CPU, 3);
2e7ea3ab 148 hists__new_col_len(hists, HISTC_SOCKET, 6);
98a3b32c
SE
149 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
150 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
151 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
152 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
153 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
154 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
475eeab9 155
e8e6d37e
ACM
156 if (h->srcline)
157 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
158
31191a85
AK
159 if (h->srcfile)
160 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
161
475eeab9
AK
162 if (h->transaction)
163 hists__new_col_len(hists, HISTC_TRANSACTION,
164 hist_entry__transaction_len());
8a6c5b26
ACM
165}
166
7ccf4f90
NK
167void hists__output_recalc_col_len(struct hists *hists, int max_rows)
168{
169 struct rb_node *next = rb_first(&hists->entries);
170 struct hist_entry *n;
171 int row = 0;
172
173 hists__reset_col_len(hists);
174
175 while (next && row++ < max_rows) {
176 n = rb_entry(next, struct hist_entry, rb_node);
177 if (!n->filtered)
178 hists__calc_col_len(hists, n);
179 next = rb_next(&n->rb_node);
180 }
181}
182
f39056f9
NK
183static void he_stat__add_cpumode_period(struct he_stat *he_stat,
184 unsigned int cpumode, u64 period)
a1645ce1 185{
28e2a106 186 switch (cpumode) {
a1645ce1 187 case PERF_RECORD_MISC_KERNEL:
f39056f9 188 he_stat->period_sys += period;
a1645ce1
ZY
189 break;
190 case PERF_RECORD_MISC_USER:
f39056f9 191 he_stat->period_us += period;
a1645ce1
ZY
192 break;
193 case PERF_RECORD_MISC_GUEST_KERNEL:
f39056f9 194 he_stat->period_guest_sys += period;
a1645ce1
ZY
195 break;
196 case PERF_RECORD_MISC_GUEST_USER:
f39056f9 197 he_stat->period_guest_us += period;
a1645ce1
ZY
198 break;
199 default:
200 break;
201 }
202}
203
05484298
AK
204static void he_stat__add_period(struct he_stat *he_stat, u64 period,
205 u64 weight)
139c0815 206{
98a3b32c 207
139c0815 208 he_stat->period += period;
05484298 209 he_stat->weight += weight;
139c0815
NK
210 he_stat->nr_events += 1;
211}
212
213static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
214{
215 dest->period += src->period;
216 dest->period_sys += src->period_sys;
217 dest->period_us += src->period_us;
218 dest->period_guest_sys += src->period_guest_sys;
219 dest->period_guest_us += src->period_guest_us;
220 dest->nr_events += src->nr_events;
05484298 221 dest->weight += src->weight;
139c0815
NK
222}
223
f39056f9 224static void he_stat__decay(struct he_stat *he_stat)
ab81f3fd 225{
f39056f9
NK
226 he_stat->period = (he_stat->period * 7) / 8;
227 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
05484298 228 /* XXX need decay for weight too? */
ab81f3fd
ACM
229}
230
231static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
232{
b24c28f7 233 u64 prev_period = he->stat.period;
3186b681 234 u64 diff;
c64550cf
ACM
235
236 if (prev_period == 0)
df71d95f 237 return true;
c64550cf 238
f39056f9 239 he_stat__decay(&he->stat);
f8be1c8c
NK
240 if (symbol_conf.cumulate_callchain)
241 he_stat__decay(he->stat_acc);
c64550cf 242
3186b681
NK
243 diff = prev_period - he->stat.period;
244
245 hists->stats.total_period -= diff;
c64550cf 246 if (!he->filtered)
3186b681 247 hists->stats.total_non_filtered_period -= diff;
c64550cf 248
b24c28f7 249 return he->stat.period == 0;
ab81f3fd
ACM
250}
251
956b65e1
ACM
252static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
253{
254 rb_erase(&he->rb_node, &hists->entries);
255
256 if (sort__need_collapse)
257 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
258
259 --hists->nr_entries;
260 if (!he->filtered)
261 --hists->nr_non_filtered_entries;
262
263 hist_entry__delete(he);
264}
265
3a5714f8 266void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
ab81f3fd
ACM
267{
268 struct rb_node *next = rb_first(&hists->entries);
269 struct hist_entry *n;
270
271 while (next) {
272 n = rb_entry(next, struct hist_entry, rb_node);
273 next = rb_next(&n->rb_node);
b079d4e9
ACM
274 if (((zap_user && n->level == '.') ||
275 (zap_kernel && n->level != '.') ||
4c47f4fc 276 hists__decay_entry(hists, n))) {
956b65e1 277 hists__delete_entry(hists, n);
ab81f3fd
ACM
278 }
279 }
280}
281
701937bd
NK
282void hists__delete_entries(struct hists *hists)
283{
284 struct rb_node *next = rb_first(&hists->entries);
285 struct hist_entry *n;
286
287 while (next) {
288 n = rb_entry(next, struct hist_entry, rb_node);
289 next = rb_next(&n->rb_node);
290
956b65e1 291 hists__delete_entry(hists, n);
701937bd
NK
292 }
293}
294
3d1d07ec 295/*
c82ee828 296 * histogram, sorted on item, collects periods
3d1d07ec
JK
297 */
298
a0b51af3
NK
299static struct hist_entry *hist_entry__new(struct hist_entry *template,
300 bool sample_self)
28e2a106 301{
f8be1c8c
NK
302 size_t callchain_size = 0;
303 struct hist_entry *he;
304
82aa019e 305 if (symbol_conf.use_callchain)
f8be1c8c
NK
306 callchain_size = sizeof(struct callchain_root);
307
308 he = zalloc(sizeof(*he) + callchain_size);
28e2a106 309
12c14278
ACM
310 if (he != NULL) {
311 *he = *template;
c4b35351 312
f8be1c8c
NK
313 if (symbol_conf.cumulate_callchain) {
314 he->stat_acc = malloc(sizeof(he->stat));
315 if (he->stat_acc == NULL) {
316 free(he);
317 return NULL;
318 }
319 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
a0b51af3
NK
320 if (!sample_self)
321 memset(&he->stat, 0, sizeof(he->stat));
f8be1c8c
NK
322 }
323
5c24b67a 324 map__get(he->ms.map);
3cf0cb1f
SE
325
326 if (he->branch_info) {
26353a61
NK
327 /*
328 * This branch info is (a part of) allocated from
644f2df2 329 * sample__resolve_bstack() and will be freed after
26353a61
NK
330 * adding new entries. So we need to save a copy.
331 */
332 he->branch_info = malloc(sizeof(*he->branch_info));
333 if (he->branch_info == NULL) {
5c24b67a 334 map__zput(he->ms.map);
f8be1c8c 335 free(he->stat_acc);
26353a61
NK
336 free(he);
337 return NULL;
338 }
339
340 memcpy(he->branch_info, template->branch_info,
341 sizeof(*he->branch_info));
342
5c24b67a
ACM
343 map__get(he->branch_info->from.map);
344 map__get(he->branch_info->to.map);
3cf0cb1f
SE
345 }
346
98a3b32c 347 if (he->mem_info) {
5c24b67a
ACM
348 map__get(he->mem_info->iaddr.map);
349 map__get(he->mem_info->daddr.map);
98a3b32c
SE
350 }
351
28e2a106 352 if (symbol_conf.use_callchain)
12c14278 353 callchain_init(he->callchain);
b821c732
ACM
354
355 INIT_LIST_HEAD(&he->pairs.node);
f3b623b8 356 thread__get(he->thread);
28e2a106
ACM
357 }
358
12c14278 359 return he;
28e2a106
ACM
360}
361
7a007ca9
ACM
362static u8 symbol__parent_filter(const struct symbol *parent)
363{
364 if (symbol_conf.exclude_other && parent == NULL)
365 return 1 << HIST_FILTER__PARENT;
366 return 0;
367}
368
e7e0efcd
ACM
369static struct hist_entry *hists__findnew_entry(struct hists *hists,
370 struct hist_entry *entry,
371 struct addr_location *al,
372 bool sample_self)
9735abf1 373{
1980c2eb 374 struct rb_node **p;
9735abf1
ACM
375 struct rb_node *parent = NULL;
376 struct hist_entry *he;
354cc40e 377 int64_t cmp;
f1cbf78d
NK
378 u64 period = entry->stat.period;
379 u64 weight = entry->stat.weight;
9735abf1 380
1980c2eb
ACM
381 p = &hists->entries_in->rb_node;
382
9735abf1
ACM
383 while (*p != NULL) {
384 parent = *p;
1980c2eb 385 he = rb_entry(parent, struct hist_entry, rb_node_in);
9735abf1 386
9afcf930
NK
387 /*
388 * Make sure that it receives arguments in a same order as
389 * hist_entry__collapse() so that we can use an appropriate
390 * function when searching an entry regardless which sort
391 * keys were used.
392 */
393 cmp = hist_entry__cmp(he, entry);
9735abf1
ACM
394
395 if (!cmp) {
a0b51af3
NK
396 if (sample_self)
397 he_stat__add_period(&he->stat, period, weight);
f8be1c8c
NK
398 if (symbol_conf.cumulate_callchain)
399 he_stat__add_period(he->stat_acc, period, weight);
63fa471d 400
ceb2acbc 401 /*
e80faac0 402 * This mem info was allocated from sample__resolve_mem
ceb2acbc
NK
403 * and will not be used anymore.
404 */
74cf249d 405 zfree(&entry->mem_info);
ceb2acbc 406
63fa471d
DM
407 /* If the map of an existing hist_entry has
408 * become out-of-date due to an exec() or
409 * similar, update it. Otherwise we will
410 * mis-adjust symbol addresses when computing
411 * the history counter to increment.
412 */
413 if (he->ms.map != entry->ms.map) {
5c24b67a
ACM
414 map__put(he->ms.map);
415 he->ms.map = map__get(entry->ms.map);
63fa471d 416 }
28e2a106 417 goto out;
9735abf1
ACM
418 }
419
420 if (cmp < 0)
421 p = &(*p)->rb_left;
422 else
423 p = &(*p)->rb_right;
424 }
425
a0b51af3 426 he = hist_entry__new(entry, sample_self);
9735abf1 427 if (!he)
27a0dcb7 428 return NULL;
1980c2eb 429
590cd344
NK
430 hists->nr_entries++;
431
1980c2eb
ACM
432 rb_link_node(&he->rb_node_in, parent, p);
433 rb_insert_color(&he->rb_node_in, hists->entries_in);
28e2a106 434out:
a0b51af3
NK
435 if (sample_self)
436 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
f8be1c8c
NK
437 if (symbol_conf.cumulate_callchain)
438 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
9735abf1
ACM
439 return he;
440}
441
c824c433 442struct hist_entry *__hists__add_entry(struct hists *hists,
b5387528 443 struct addr_location *al,
41a4e6e2
NK
444 struct symbol *sym_parent,
445 struct branch_info *bi,
446 struct mem_info *mi,
a0b51af3
NK
447 u64 period, u64 weight, u64 transaction,
448 bool sample_self)
b5387528
RAV
449{
450 struct hist_entry entry = {
451 .thread = al->thread,
4dfced35 452 .comm = thread__comm(al->thread),
b5387528
RAV
453 .ms = {
454 .map = al->map,
455 .sym = al->sym,
456 },
0c4c4deb 457 .socket = al->socket,
7365be55
DZ
458 .cpu = al->cpu,
459 .cpumode = al->cpumode,
460 .ip = al->addr,
461 .level = al->level,
b24c28f7 462 .stat = {
c4b35351 463 .nr_events = 1,
41a4e6e2 464 .period = period,
05484298 465 .weight = weight,
b24c28f7 466 },
b5387528 467 .parent = sym_parent,
2c86c7ca 468 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
c824c433 469 .hists = hists,
41a4e6e2
NK
470 .branch_info = bi,
471 .mem_info = mi,
475eeab9 472 .transaction = transaction,
b5387528
RAV
473 };
474
e7e0efcd 475 return hists__findnew_entry(hists, &entry, al, sample_self);
b5387528
RAV
476}
477
69bcb019
NK
478static int
479iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
480 struct addr_location *al __maybe_unused)
481{
482 return 0;
483}
484
485static int
486iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
487 struct addr_location *al __maybe_unused)
488{
489 return 0;
490}
491
492static int
493iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
494{
495 struct perf_sample *sample = iter->sample;
496 struct mem_info *mi;
497
498 mi = sample__resolve_mem(sample, al);
499 if (mi == NULL)
500 return -ENOMEM;
501
502 iter->priv = mi;
503 return 0;
504}
505
506static int
507iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
508{
509 u64 cost;
510 struct mem_info *mi = iter->priv;
4ea062ed 511 struct hists *hists = evsel__hists(iter->evsel);
69bcb019
NK
512 struct hist_entry *he;
513
514 if (mi == NULL)
515 return -EINVAL;
516
517 cost = iter->sample->weight;
518 if (!cost)
519 cost = 1;
520
521 /*
522 * must pass period=weight in order to get the correct
523 * sorting from hists__collapse_resort() which is solely
524 * based on periods. We want sorting be done on nr_events * weight
525 * and this is indirectly achieved by passing period=weight here
526 * and the he_stat__add_period() function.
527 */
4ea062ed 528 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
a0b51af3 529 cost, cost, 0, true);
69bcb019
NK
530 if (!he)
531 return -ENOMEM;
532
533 iter->he = he;
534 return 0;
535}
536
537static int
9d3c02d7
NK
538iter_finish_mem_entry(struct hist_entry_iter *iter,
539 struct addr_location *al __maybe_unused)
69bcb019
NK
540{
541 struct perf_evsel *evsel = iter->evsel;
4ea062ed 542 struct hists *hists = evsel__hists(evsel);
69bcb019 543 struct hist_entry *he = iter->he;
69bcb019
NK
544 int err = -EINVAL;
545
546 if (he == NULL)
547 goto out;
548
4ea062ed 549 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
550
551 err = hist_entry__append_callchain(he, iter->sample);
552
553out:
554 /*
e7e0efcd
ACM
555 * We don't need to free iter->priv (mem_info) here since the mem info
556 * was either already freed in hists__findnew_entry() or passed to a
557 * new hist entry by hist_entry__new().
69bcb019
NK
558 */
559 iter->priv = NULL;
560
561 iter->he = NULL;
562 return err;
563}
564
565static int
566iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
567{
568 struct branch_info *bi;
569 struct perf_sample *sample = iter->sample;
570
571 bi = sample__resolve_bstack(sample, al);
572 if (!bi)
573 return -ENOMEM;
574
575 iter->curr = 0;
576 iter->total = sample->branch_stack->nr;
577
578 iter->priv = bi;
579 return 0;
580}
581
582static int
583iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
584 struct addr_location *al __maybe_unused)
585{
9d3c02d7
NK
586 /* to avoid calling callback function */
587 iter->he = NULL;
588
69bcb019
NK
589 return 0;
590}
591
592static int
593iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
594{
595 struct branch_info *bi = iter->priv;
596 int i = iter->curr;
597
598 if (bi == NULL)
599 return 0;
600
601 if (iter->curr >= iter->total)
602 return 0;
603
604 al->map = bi[i].to.map;
605 al->sym = bi[i].to.sym;
606 al->addr = bi[i].to.addr;
607 return 1;
608}
609
610static int
611iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
612{
9d3c02d7 613 struct branch_info *bi;
69bcb019 614 struct perf_evsel *evsel = iter->evsel;
4ea062ed 615 struct hists *hists = evsel__hists(evsel);
69bcb019
NK
616 struct hist_entry *he = NULL;
617 int i = iter->curr;
618 int err = 0;
619
620 bi = iter->priv;
621
622 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
623 goto out;
624
625 /*
626 * The report shows the percentage of total branches captured
627 * and not events sampled. Thus we use a pseudo period of 1.
628 */
4ea062ed 629 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
0e332f03
AK
630 1, bi->flags.cycles ? bi->flags.cycles : 1,
631 0, true);
69bcb019
NK
632 if (he == NULL)
633 return -ENOMEM;
634
4ea062ed 635 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
636
637out:
638 iter->he = he;
639 iter->curr++;
640 return err;
641}
642
643static int
644iter_finish_branch_entry(struct hist_entry_iter *iter,
645 struct addr_location *al __maybe_unused)
646{
647 zfree(&iter->priv);
648 iter->he = NULL;
649
650 return iter->curr >= iter->total ? 0 : -1;
651}
652
653static int
654iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
655 struct addr_location *al __maybe_unused)
656{
657 return 0;
658}
659
660static int
661iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
662{
663 struct perf_evsel *evsel = iter->evsel;
664 struct perf_sample *sample = iter->sample;
665 struct hist_entry *he;
666
4ea062ed 667 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
69bcb019 668 sample->period, sample->weight,
a0b51af3 669 sample->transaction, true);
69bcb019
NK
670 if (he == NULL)
671 return -ENOMEM;
672
673 iter->he = he;
674 return 0;
675}
676
677static int
9d3c02d7
NK
678iter_finish_normal_entry(struct hist_entry_iter *iter,
679 struct addr_location *al __maybe_unused)
69bcb019 680{
69bcb019
NK
681 struct hist_entry *he = iter->he;
682 struct perf_evsel *evsel = iter->evsel;
683 struct perf_sample *sample = iter->sample;
684
685 if (he == NULL)
686 return 0;
687
688 iter->he = NULL;
689
4ea062ed 690 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
69bcb019
NK
691
692 return hist_entry__append_callchain(he, sample);
693}
694
7a13aa28
NK
695static int
696iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
697 struct addr_location *al __maybe_unused)
698{
b4d3c8bd
NK
699 struct hist_entry **he_cache;
700
7a13aa28 701 callchain_cursor_commit(&callchain_cursor);
b4d3c8bd
NK
702
703 /*
704 * This is for detecting cycles or recursions so that they're
705 * cumulated only one time to prevent entries more than 100%
706 * overhead.
707 */
708 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
709 if (he_cache == NULL)
710 return -ENOMEM;
711
712 iter->priv = he_cache;
713 iter->curr = 0;
714
7a13aa28
NK
715 return 0;
716}
717
718static int
719iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
720 struct addr_location *al)
721{
722 struct perf_evsel *evsel = iter->evsel;
4ea062ed 723 struct hists *hists = evsel__hists(evsel);
7a13aa28 724 struct perf_sample *sample = iter->sample;
b4d3c8bd 725 struct hist_entry **he_cache = iter->priv;
7a13aa28
NK
726 struct hist_entry *he;
727 int err = 0;
728
4ea062ed 729 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
7a13aa28
NK
730 sample->period, sample->weight,
731 sample->transaction, true);
732 if (he == NULL)
733 return -ENOMEM;
734
735 iter->he = he;
b4d3c8bd 736 he_cache[iter->curr++] = he;
7a13aa28 737
82aa019e 738 hist_entry__append_callchain(he, sample);
be7f855a
NK
739
740 /*
741 * We need to re-initialize the cursor since callchain_append()
742 * advanced the cursor to the end.
743 */
744 callchain_cursor_commit(&callchain_cursor);
745
4ea062ed 746 hists__inc_nr_samples(hists, he->filtered);
7a13aa28
NK
747
748 return err;
749}
750
751static int
752iter_next_cumulative_entry(struct hist_entry_iter *iter,
753 struct addr_location *al)
754{
755 struct callchain_cursor_node *node;
756
757 node = callchain_cursor_current(&callchain_cursor);
758 if (node == NULL)
759 return 0;
760
c7405d85 761 return fill_callchain_info(al, node, iter->hide_unresolved);
7a13aa28
NK
762}
763
764static int
765iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
766 struct addr_location *al)
767{
768 struct perf_evsel *evsel = iter->evsel;
769 struct perf_sample *sample = iter->sample;
b4d3c8bd 770 struct hist_entry **he_cache = iter->priv;
7a13aa28 771 struct hist_entry *he;
b4d3c8bd 772 struct hist_entry he_tmp = {
5cef8976 773 .hists = evsel__hists(evsel),
b4d3c8bd
NK
774 .cpu = al->cpu,
775 .thread = al->thread,
776 .comm = thread__comm(al->thread),
777 .ip = al->addr,
778 .ms = {
779 .map = al->map,
780 .sym = al->sym,
781 },
782 .parent = iter->parent,
783 };
784 int i;
be7f855a
NK
785 struct callchain_cursor cursor;
786
787 callchain_cursor_snapshot(&cursor, &callchain_cursor);
788
789 callchain_cursor_advance(&callchain_cursor);
b4d3c8bd
NK
790
791 /*
792 * Check if there's duplicate entries in the callchain.
793 * It's possible that it has cycles or recursive calls.
794 */
795 for (i = 0; i < iter->curr; i++) {
9d3c02d7
NK
796 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
797 /* to avoid calling callback function */
798 iter->he = NULL;
b4d3c8bd 799 return 0;
9d3c02d7 800 }
b4d3c8bd 801 }
7a13aa28 802
4ea062ed 803 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
7a13aa28
NK
804 sample->period, sample->weight,
805 sample->transaction, false);
806 if (he == NULL)
807 return -ENOMEM;
808
809 iter->he = he;
b4d3c8bd 810 he_cache[iter->curr++] = he;
7a13aa28 811
82aa019e
NK
812 if (symbol_conf.use_callchain)
813 callchain_append(he->callchain, &cursor, sample->period);
7a13aa28
NK
814 return 0;
815}
816
817static int
818iter_finish_cumulative_entry(struct hist_entry_iter *iter,
819 struct addr_location *al __maybe_unused)
820{
b4d3c8bd 821 zfree(&iter->priv);
7a13aa28 822 iter->he = NULL;
b4d3c8bd 823
7a13aa28
NK
824 return 0;
825}
826
69bcb019
NK
827const struct hist_iter_ops hist_iter_mem = {
828 .prepare_entry = iter_prepare_mem_entry,
829 .add_single_entry = iter_add_single_mem_entry,
830 .next_entry = iter_next_nop_entry,
831 .add_next_entry = iter_add_next_nop_entry,
832 .finish_entry = iter_finish_mem_entry,
833};
834
835const struct hist_iter_ops hist_iter_branch = {
836 .prepare_entry = iter_prepare_branch_entry,
837 .add_single_entry = iter_add_single_branch_entry,
838 .next_entry = iter_next_branch_entry,
839 .add_next_entry = iter_add_next_branch_entry,
840 .finish_entry = iter_finish_branch_entry,
841};
842
843const struct hist_iter_ops hist_iter_normal = {
844 .prepare_entry = iter_prepare_normal_entry,
845 .add_single_entry = iter_add_single_normal_entry,
846 .next_entry = iter_next_nop_entry,
847 .add_next_entry = iter_add_next_nop_entry,
848 .finish_entry = iter_finish_normal_entry,
849};
850
7a13aa28
NK
851const struct hist_iter_ops hist_iter_cumulative = {
852 .prepare_entry = iter_prepare_cumulative_entry,
853 .add_single_entry = iter_add_single_cumulative_entry,
854 .next_entry = iter_next_cumulative_entry,
855 .add_next_entry = iter_add_next_cumulative_entry,
856 .finish_entry = iter_finish_cumulative_entry,
857};
858
69bcb019 859int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
9d3c02d7 860 int max_stack_depth, void *arg)
69bcb019
NK
861{
862 int err, err2;
863
063bd936
NK
864 err = sample__resolve_callchain(iter->sample, &iter->parent,
865 iter->evsel, al, max_stack_depth);
69bcb019
NK
866 if (err)
867 return err;
868
69bcb019
NK
869 err = iter->ops->prepare_entry(iter, al);
870 if (err)
871 goto out;
872
873 err = iter->ops->add_single_entry(iter, al);
874 if (err)
875 goto out;
876
9d3c02d7
NK
877 if (iter->he && iter->add_entry_cb) {
878 err = iter->add_entry_cb(iter, al, true, arg);
879 if (err)
880 goto out;
881 }
882
69bcb019
NK
883 while (iter->ops->next_entry(iter, al)) {
884 err = iter->ops->add_next_entry(iter, al);
885 if (err)
886 break;
9d3c02d7
NK
887
888 if (iter->he && iter->add_entry_cb) {
889 err = iter->add_entry_cb(iter, al, false, arg);
890 if (err)
891 goto out;
892 }
69bcb019
NK
893 }
894
895out:
896 err2 = iter->ops->finish_entry(iter, al);
897 if (!err)
898 err = err2;
899
900 return err;
901}
902
3d1d07ec
JK
903int64_t
904hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
905{
093f0ef3 906 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
907 int64_t cmp = 0;
908
093f0ef3 909 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
910 if (perf_hpp__should_skip(fmt))
911 continue;
912
87bbdf76 913 cmp = fmt->cmp(fmt, left, right);
3d1d07ec
JK
914 if (cmp)
915 break;
916 }
917
918 return cmp;
919}
920
921int64_t
922hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
923{
093f0ef3 924 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
925 int64_t cmp = 0;
926
093f0ef3 927 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
928 if (perf_hpp__should_skip(fmt))
929 continue;
930
87bbdf76 931 cmp = fmt->collapse(fmt, left, right);
3d1d07ec
JK
932 if (cmp)
933 break;
934 }
935
936 return cmp;
937}
938
6733d1bf 939void hist_entry__delete(struct hist_entry *he)
3d1d07ec 940{
f3b623b8 941 thread__zput(he->thread);
5c24b67a
ACM
942 map__zput(he->ms.map);
943
944 if (he->branch_info) {
945 map__zput(he->branch_info->from.map);
946 map__zput(he->branch_info->to.map);
947 zfree(&he->branch_info);
948 }
949
950 if (he->mem_info) {
951 map__zput(he->mem_info->iaddr.map);
952 map__zput(he->mem_info->daddr.map);
953 zfree(&he->mem_info);
954 }
955
f8be1c8c 956 zfree(&he->stat_acc);
f048d548 957 free_srcline(he->srcline);
31191a85
AK
958 if (he->srcfile && he->srcfile[0])
959 free(he->srcfile);
d114960c 960 free_callchain(he->callchain);
3d1d07ec
JK
961 free(he);
962}
963
964/*
965 * collapse the histogram
966 */
967
1d037ca1 968static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1b3a0e95
FW
969 struct rb_root *root,
970 struct hist_entry *he)
3d1d07ec 971{
b9bf0892 972 struct rb_node **p = &root->rb_node;
3d1d07ec
JK
973 struct rb_node *parent = NULL;
974 struct hist_entry *iter;
975 int64_t cmp;
976
977 while (*p != NULL) {
978 parent = *p;
1980c2eb 979 iter = rb_entry(parent, struct hist_entry, rb_node_in);
3d1d07ec
JK
980
981 cmp = hist_entry__collapse(iter, he);
982
983 if (!cmp) {
139c0815 984 he_stat__add_stat(&iter->stat, &he->stat);
f8be1c8c
NK
985 if (symbol_conf.cumulate_callchain)
986 he_stat__add_stat(iter->stat_acc, he->stat_acc);
9ec60972 987
1b3a0e95 988 if (symbol_conf.use_callchain) {
47260645
NK
989 callchain_cursor_reset(&callchain_cursor);
990 callchain_merge(&callchain_cursor,
991 iter->callchain,
1b3a0e95
FW
992 he->callchain);
993 }
6733d1bf 994 hist_entry__delete(he);
fefb0b94 995 return false;
3d1d07ec
JK
996 }
997
998 if (cmp < 0)
999 p = &(*p)->rb_left;
1000 else
1001 p = &(*p)->rb_right;
1002 }
740b97f9 1003 hists->nr_entries++;
3d1d07ec 1004
1980c2eb
ACM
1005 rb_link_node(&he->rb_node_in, parent, p);
1006 rb_insert_color(&he->rb_node_in, root);
fefb0b94 1007 return true;
3d1d07ec
JK
1008}
1009
1980c2eb 1010static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
3d1d07ec 1011{
1980c2eb
ACM
1012 struct rb_root *root;
1013
1014 pthread_mutex_lock(&hists->lock);
1015
1016 root = hists->entries_in;
1017 if (++hists->entries_in > &hists->entries_in_array[1])
1018 hists->entries_in = &hists->entries_in_array[0];
1019
1020 pthread_mutex_unlock(&hists->lock);
1021
1022 return root;
1023}
1024
90cf1fb5
ACM
1025static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1026{
1027 hists__filter_entry_by_dso(hists, he);
1028 hists__filter_entry_by_thread(hists, he);
e94d53eb 1029 hists__filter_entry_by_symbol(hists, he);
90cf1fb5
ACM
1030}
1031
c1fb5651 1032void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1980c2eb
ACM
1033{
1034 struct rb_root *root;
3d1d07ec
JK
1035 struct rb_node *next;
1036 struct hist_entry *n;
1037
3a5714f8 1038 if (!sort__need_collapse)
3d1d07ec
JK
1039 return;
1040
740b97f9
NK
1041 hists->nr_entries = 0;
1042
1980c2eb 1043 root = hists__get_rotate_entries_in(hists);
740b97f9 1044
1980c2eb 1045 next = rb_first(root);
b9bf0892 1046
3d1d07ec 1047 while (next) {
33e940a2
ACM
1048 if (session_done())
1049 break;
1980c2eb
ACM
1050 n = rb_entry(next, struct hist_entry, rb_node_in);
1051 next = rb_next(&n->rb_node_in);
3d1d07ec 1052
1980c2eb 1053 rb_erase(&n->rb_node_in, root);
90cf1fb5
ACM
1054 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1055 /*
1056 * If it wasn't combined with one of the entries already
1057 * collapsed, we need to apply the filters that may have
1058 * been set by, say, the hist_browser.
1059 */
1060 hists__apply_filters(hists, n);
90cf1fb5 1061 }
c1fb5651
NK
1062 if (prog)
1063 ui_progress__update(prog, 1);
3d1d07ec 1064 }
1980c2eb 1065}
b9bf0892 1066
043ca389 1067static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
29d720ed 1068{
043ca389
NK
1069 struct perf_hpp_fmt *fmt;
1070 int64_t cmp = 0;
29d720ed 1071
26d8b338 1072 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
1073 if (perf_hpp__should_skip(fmt))
1074 continue;
1075
87bbdf76 1076 cmp = fmt->sort(fmt, a, b);
043ca389 1077 if (cmp)
29d720ed
NK
1078 break;
1079 }
1080
043ca389 1081 return cmp;
29d720ed
NK
1082}
1083
9283ba9b
NK
1084static void hists__reset_filter_stats(struct hists *hists)
1085{
1086 hists->nr_non_filtered_entries = 0;
1087 hists->stats.total_non_filtered_period = 0;
1088}
1089
1090void hists__reset_stats(struct hists *hists)
1091{
1092 hists->nr_entries = 0;
1093 hists->stats.total_period = 0;
1094
1095 hists__reset_filter_stats(hists);
1096}
1097
1098static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1099{
1100 hists->nr_non_filtered_entries++;
1101 hists->stats.total_non_filtered_period += h->stat.period;
1102}
1103
1104void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1105{
1106 if (!h->filtered)
1107 hists__inc_filter_stats(hists, h);
1108
1109 hists->nr_entries++;
1110 hists->stats.total_period += h->stat.period;
1111}
1112
1c02c4d2
ACM
1113static void __hists__insert_output_entry(struct rb_root *entries,
1114 struct hist_entry *he,
f9db0d0f
KL
1115 u64 min_callchain_hits,
1116 bool use_callchain)
3d1d07ec 1117{
1c02c4d2 1118 struct rb_node **p = &entries->rb_node;
3d1d07ec
JK
1119 struct rb_node *parent = NULL;
1120 struct hist_entry *iter;
1121
f9db0d0f 1122 if (use_callchain)
b9fb9304 1123 callchain_param.sort(&he->sorted_chain, he->callchain,
3d1d07ec
JK
1124 min_callchain_hits, &callchain_param);
1125
1126 while (*p != NULL) {
1127 parent = *p;
1128 iter = rb_entry(parent, struct hist_entry, rb_node);
1129
043ca389 1130 if (hist_entry__sort(he, iter) > 0)
3d1d07ec
JK
1131 p = &(*p)->rb_left;
1132 else
1133 p = &(*p)->rb_right;
1134 }
1135
1136 rb_link_node(&he->rb_node, parent, p);
1c02c4d2 1137 rb_insert_color(&he->rb_node, entries);
3d1d07ec
JK
1138}
1139
740b97f9 1140void hists__output_resort(struct hists *hists, struct ui_progress *prog)
3d1d07ec 1141{
1980c2eb 1142 struct rb_root *root;
3d1d07ec
JK
1143 struct rb_node *next;
1144 struct hist_entry *n;
3d1d07ec 1145 u64 min_callchain_hits;
f9db0d0f 1146 struct perf_evsel *evsel = hists_to_evsel(hists);
9e207ddf
KL
1147 bool use_callchain;
1148
1149 if (evsel && !symbol_conf.show_ref_callgraph)
1150 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1151 else
1152 use_callchain = symbol_conf.use_callchain;
3d1d07ec 1153
42b28ac0 1154 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
3d1d07ec 1155
3a5714f8 1156 if (sort__need_collapse)
1980c2eb
ACM
1157 root = &hists->entries_collapsed;
1158 else
1159 root = hists->entries_in;
1160
1161 next = rb_first(root);
1162 hists->entries = RB_ROOT;
3d1d07ec 1163
9283ba9b 1164 hists__reset_stats(hists);
42b28ac0 1165 hists__reset_col_len(hists);
fefb0b94 1166
3d1d07ec 1167 while (next) {
1980c2eb
ACM
1168 n = rb_entry(next, struct hist_entry, rb_node_in);
1169 next = rb_next(&n->rb_node_in);
3d1d07ec 1170
f9db0d0f 1171 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
6263835a 1172 hists__inc_stats(hists, n);
ae993efc
NK
1173
1174 if (!n->filtered)
1175 hists__calc_col_len(hists, n);
740b97f9
NK
1176
1177 if (prog)
1178 ui_progress__update(prog, 1);
3d1d07ec 1179 }
1980c2eb 1180}
b9bf0892 1181
42b28ac0 1182static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
cc5edb0e
ACM
1183 enum hist_filter filter)
1184{
1185 h->filtered &= ~(1 << filter);
1186 if (h->filtered)
1187 return;
1188
87e90f43 1189 /* force fold unfiltered entry for simplicity */
3698dab1 1190 h->unfolded = false;
0f0cbf7a 1191 h->row_offset = 0;
a8cd1f43 1192 h->nr_rows = 0;
9283ba9b 1193
1ab1fa5d 1194 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
cc5edb0e 1195
9283ba9b 1196 hists__inc_filter_stats(hists, h);
42b28ac0 1197 hists__calc_col_len(hists, h);
cc5edb0e
ACM
1198}
1199
90cf1fb5
ACM
1200
1201static bool hists__filter_entry_by_dso(struct hists *hists,
1202 struct hist_entry *he)
1203{
1204 if (hists->dso_filter != NULL &&
1205 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1206 he->filtered |= (1 << HIST_FILTER__DSO);
1207 return true;
1208 }
1209
1210 return false;
1211}
1212
d7b76f09 1213void hists__filter_by_dso(struct hists *hists)
b09e0190
ACM
1214{
1215 struct rb_node *nd;
1216
1ab1fa5d 1217 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1218
1219 hists__reset_filter_stats(hists);
42b28ac0 1220 hists__reset_col_len(hists);
b09e0190 1221
42b28ac0 1222 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1223 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1224
1225 if (symbol_conf.exclude_other && !h->parent)
1226 continue;
1227
90cf1fb5 1228 if (hists__filter_entry_by_dso(hists, h))
b09e0190 1229 continue;
b09e0190 1230
42b28ac0 1231 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
b09e0190
ACM
1232 }
1233}
1234
90cf1fb5
ACM
1235static bool hists__filter_entry_by_thread(struct hists *hists,
1236 struct hist_entry *he)
1237{
1238 if (hists->thread_filter != NULL &&
1239 he->thread != hists->thread_filter) {
1240 he->filtered |= (1 << HIST_FILTER__THREAD);
1241 return true;
1242 }
1243
1244 return false;
1245}
1246
d7b76f09 1247void hists__filter_by_thread(struct hists *hists)
b09e0190
ACM
1248{
1249 struct rb_node *nd;
1250
1ab1fa5d 1251 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1252
1253 hists__reset_filter_stats(hists);
42b28ac0 1254 hists__reset_col_len(hists);
b09e0190 1255
42b28ac0 1256 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1257 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1258
90cf1fb5 1259 if (hists__filter_entry_by_thread(hists, h))
b09e0190 1260 continue;
cc5edb0e 1261
42b28ac0 1262 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
b09e0190
ACM
1263 }
1264}
ef7b93a1 1265
e94d53eb
NK
1266static bool hists__filter_entry_by_symbol(struct hists *hists,
1267 struct hist_entry *he)
1268{
1269 if (hists->symbol_filter_str != NULL &&
1270 (!he->ms.sym || strstr(he->ms.sym->name,
1271 hists->symbol_filter_str) == NULL)) {
1272 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1273 return true;
1274 }
1275
1276 return false;
1277}
1278
1279void hists__filter_by_symbol(struct hists *hists)
1280{
1281 struct rb_node *nd;
1282
1ab1fa5d 1283 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1284
1285 hists__reset_filter_stats(hists);
e94d53eb
NK
1286 hists__reset_col_len(hists);
1287
1288 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1289 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1290
1291 if (hists__filter_entry_by_symbol(hists, h))
1292 continue;
1293
1294 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1295 }
1296}
1297
28a6b6aa
ACM
1298void events_stats__inc(struct events_stats *stats, u32 type)
1299{
1300 ++stats->nr_events[0];
1301 ++stats->nr_events[type];
1302}
1303
42b28ac0 1304void hists__inc_nr_events(struct hists *hists, u32 type)
c8446b9b 1305{
28a6b6aa 1306 events_stats__inc(&hists->stats, type);
c8446b9b 1307}
95529be4 1308
1844dbcb
NK
1309void hists__inc_nr_samples(struct hists *hists, bool filtered)
1310{
1311 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1312 if (!filtered)
1313 hists->stats.nr_non_filtered_samples++;
1314}
1315
494d70a1
ACM
1316static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1317 struct hist_entry *pair)
1318{
ce74f60e
NK
1319 struct rb_root *root;
1320 struct rb_node **p;
494d70a1
ACM
1321 struct rb_node *parent = NULL;
1322 struct hist_entry *he;
354cc40e 1323 int64_t cmp;
494d70a1 1324
ce74f60e
NK
1325 if (sort__need_collapse)
1326 root = &hists->entries_collapsed;
1327 else
1328 root = hists->entries_in;
1329
1330 p = &root->rb_node;
1331
494d70a1
ACM
1332 while (*p != NULL) {
1333 parent = *p;
ce74f60e 1334 he = rb_entry(parent, struct hist_entry, rb_node_in);
494d70a1 1335
ce74f60e 1336 cmp = hist_entry__collapse(he, pair);
494d70a1
ACM
1337
1338 if (!cmp)
1339 goto out;
1340
1341 if (cmp < 0)
1342 p = &(*p)->rb_left;
1343 else
1344 p = &(*p)->rb_right;
1345 }
1346
a0b51af3 1347 he = hist_entry__new(pair, true);
494d70a1 1348 if (he) {
30193d78
ACM
1349 memset(&he->stat, 0, sizeof(he->stat));
1350 he->hists = hists;
ce74f60e
NK
1351 rb_link_node(&he->rb_node_in, parent, p);
1352 rb_insert_color(&he->rb_node_in, root);
6263835a 1353 hists__inc_stats(hists, he);
e0af43d2 1354 he->dummy = true;
494d70a1
ACM
1355 }
1356out:
1357 return he;
1358}
1359
95529be4
ACM
1360static struct hist_entry *hists__find_entry(struct hists *hists,
1361 struct hist_entry *he)
1362{
ce74f60e
NK
1363 struct rb_node *n;
1364
1365 if (sort__need_collapse)
1366 n = hists->entries_collapsed.rb_node;
1367 else
1368 n = hists->entries_in->rb_node;
95529be4
ACM
1369
1370 while (n) {
ce74f60e
NK
1371 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1372 int64_t cmp = hist_entry__collapse(iter, he);
95529be4
ACM
1373
1374 if (cmp < 0)
1375 n = n->rb_left;
1376 else if (cmp > 0)
1377 n = n->rb_right;
1378 else
1379 return iter;
1380 }
1381
1382 return NULL;
1383}
1384
1385/*
1386 * Look for pairs to link to the leader buckets (hist_entries):
1387 */
1388void hists__match(struct hists *leader, struct hists *other)
1389{
ce74f60e 1390 struct rb_root *root;
95529be4
ACM
1391 struct rb_node *nd;
1392 struct hist_entry *pos, *pair;
1393
ce74f60e
NK
1394 if (sort__need_collapse)
1395 root = &leader->entries_collapsed;
1396 else
1397 root = leader->entries_in;
1398
1399 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1400 pos = rb_entry(nd, struct hist_entry, rb_node_in);
95529be4
ACM
1401 pair = hists__find_entry(other, pos);
1402
1403 if (pair)
5fa9041b 1404 hist_entry__add_pair(pair, pos);
95529be4
ACM
1405 }
1406}
494d70a1
ACM
1407
1408/*
1409 * Look for entries in the other hists that are not present in the leader, if
1410 * we find them, just add a dummy entry on the leader hists, with period=0,
1411 * nr_events=0, to serve as the list header.
1412 */
1413int hists__link(struct hists *leader, struct hists *other)
1414{
ce74f60e 1415 struct rb_root *root;
494d70a1
ACM
1416 struct rb_node *nd;
1417 struct hist_entry *pos, *pair;
1418
ce74f60e
NK
1419 if (sort__need_collapse)
1420 root = &other->entries_collapsed;
1421 else
1422 root = other->entries_in;
1423
1424 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1425 pos = rb_entry(nd, struct hist_entry, rb_node_in);
494d70a1
ACM
1426
1427 if (!hist_entry__has_pairs(pos)) {
1428 pair = hists__add_dummy_entry(leader, pos);
1429 if (pair == NULL)
1430 return -1;
5fa9041b 1431 hist_entry__add_pair(pos, pair);
494d70a1
ACM
1432 }
1433 }
1434
1435 return 0;
1436}
f2148330 1437
57849998
AK
1438void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1439 struct perf_sample *sample, bool nonany_branch_mode)
1440{
1441 struct branch_info *bi;
1442
1443 /* If we have branch cycles always annotate them. */
1444 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1445 int i;
1446
1447 bi = sample__resolve_bstack(sample, al);
1448 if (bi) {
1449 struct addr_map_symbol *prev = NULL;
1450
1451 /*
1452 * Ignore errors, still want to process the
1453 * other entries.
1454 *
1455 * For non standard branch modes always
1456 * force no IPC (prev == NULL)
1457 *
1458 * Note that perf stores branches reversed from
1459 * program order!
1460 */
1461 for (i = bs->nr - 1; i >= 0; i--) {
1462 addr_map_symbol__account_cycles(&bi[i].from,
1463 nonany_branch_mode ? NULL : prev,
1464 bi[i].flags.cycles);
1465 prev = &bi[i].to;
1466 }
1467 free(bi);
1468 }
1469 }
1470}
2a1731fb
ACM
1471
1472size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1473{
1474 struct perf_evsel *pos;
1475 size_t ret = 0;
1476
1477 evlist__for_each(evlist, pos) {
1478 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1479 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1480 }
1481
1482 return ret;
1483}
1484
1485
f2148330
NK
1486u64 hists__total_period(struct hists *hists)
1487{
1488 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1489 hists->stats.total_period;
1490}
33db4568
NK
1491
1492int parse_filter_percentage(const struct option *opt __maybe_unused,
1493 const char *arg, int unset __maybe_unused)
1494{
1495 if (!strcmp(arg, "relative"))
1496 symbol_conf.filter_relative = true;
1497 else if (!strcmp(arg, "absolute"))
1498 symbol_conf.filter_relative = false;
1499 else
1500 return -1;
1501
1502 return 0;
1503}
0b93da17
NK
1504
1505int perf_hist_config(const char *var, const char *value)
1506{
1507 if (!strcmp(var, "hist.percentage"))
1508 return parse_filter_percentage(NULL, value, 0);
1509
1510 return 0;
1511}
a635fc51
ACM
1512
1513static int hists_evsel__init(struct perf_evsel *evsel)
1514{
1515 struct hists *hists = evsel__hists(evsel);
1516
1517 memset(hists, 0, sizeof(*hists));
1518 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1519 hists->entries_in = &hists->entries_in_array[0];
1520 hists->entries_collapsed = RB_ROOT;
1521 hists->entries = RB_ROOT;
1522 pthread_mutex_init(&hists->lock, NULL);
1523 return 0;
1524}
1525
1526/*
1527 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1528 * stored in the rbtree...
1529 */
1530
1531int hists__init(void)
1532{
1533 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1534 hists_evsel__init, NULL);
1535 if (err)
1536 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1537
1538 return err;
1539}
This page took 0.346763 seconds and 5 git commands to generate.