Commit | Line | Data |
---|---|---|
78f7defe | 1 | #include "annotate.h" |
8a0ecfb8 | 2 | #include "util.h" |
598357eb | 3 | #include "build-id.h" |
3d1d07ec | 4 | #include "hist.h" |
4e4f06e4 ACM |
5 | #include "session.h" |
6 | #include "sort.h" | |
9b33827d | 7 | #include <math.h> |
3d1d07ec | 8 | |
90cf1fb5 ACM |
9 | static bool hists__filter_entry_by_dso(struct hists *hists, |
10 | struct hist_entry *he); | |
11 | static bool hists__filter_entry_by_thread(struct hists *hists, | |
12 | struct hist_entry *he); | |
e94d53eb NK |
13 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
14 | struct hist_entry *he); | |
90cf1fb5 | 15 | |
7a007ca9 ACM |
16 | enum hist_filter { |
17 | HIST_FILTER__DSO, | |
18 | HIST_FILTER__THREAD, | |
19 | HIST_FILTER__PARENT, | |
e94d53eb | 20 | HIST_FILTER__SYMBOL, |
7a007ca9 ACM |
21 | }; |
22 | ||
3d1d07ec JK |
23 | struct callchain_param callchain_param = { |
24 | .mode = CHAIN_GRAPH_REL, | |
d797fdc5 SL |
25 | .min_percent = 0.5, |
26 | .order = ORDER_CALLEE | |
3d1d07ec JK |
27 | }; |
28 | ||
42b28ac0 | 29 | u16 hists__col_len(struct hists *hists, enum hist_column col) |
8a6c5b26 | 30 | { |
42b28ac0 | 31 | return hists->col_len[col]; |
8a6c5b26 ACM |
32 | } |
33 | ||
42b28ac0 | 34 | void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 35 | { |
42b28ac0 | 36 | hists->col_len[col] = len; |
8a6c5b26 ACM |
37 | } |
38 | ||
42b28ac0 | 39 | bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 40 | { |
42b28ac0 ACM |
41 | if (len > hists__col_len(hists, col)) { |
42 | hists__set_col_len(hists, col, len); | |
8a6c5b26 ACM |
43 | return true; |
44 | } | |
45 | return false; | |
46 | } | |
47 | ||
7ccf4f90 | 48 | void hists__reset_col_len(struct hists *hists) |
8a6c5b26 ACM |
49 | { |
50 | enum hist_column col; | |
51 | ||
52 | for (col = 0; col < HISTC_NR_COLS; ++col) | |
42b28ac0 | 53 | hists__set_col_len(hists, col, 0); |
8a6c5b26 ACM |
54 | } |
55 | ||
b5387528 RAV |
56 | static void hists__set_unres_dso_col_len(struct hists *hists, int dso) |
57 | { | |
58 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | |
59 | ||
60 | if (hists__col_len(hists, dso) < unresolved_col_width && | |
61 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | |
62 | !symbol_conf.dso_list) | |
63 | hists__set_col_len(hists, dso, unresolved_col_width); | |
64 | } | |
65 | ||
7ccf4f90 | 66 | void hists__calc_col_len(struct hists *hists, struct hist_entry *h) |
8a6c5b26 | 67 | { |
b5387528 | 68 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; |
8a6c5b26 ACM |
69 | u16 len; |
70 | ||
71 | if (h->ms.sym) | |
b5387528 RAV |
72 | hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); |
73 | else | |
74 | hists__set_unres_dso_col_len(hists, HISTC_DSO); | |
8a6c5b26 ACM |
75 | |
76 | len = thread__comm_len(h->thread); | |
42b28ac0 ACM |
77 | if (hists__new_col_len(hists, HISTC_COMM, len)) |
78 | hists__set_col_len(hists, HISTC_THREAD, len + 6); | |
8a6c5b26 ACM |
79 | |
80 | if (h->ms.map) { | |
81 | len = dso__name_len(h->ms.map->dso); | |
42b28ac0 | 82 | hists__new_col_len(hists, HISTC_DSO, len); |
8a6c5b26 | 83 | } |
b5387528 RAV |
84 | |
85 | if (h->branch_info) { | |
86 | int symlen; | |
87 | /* | |
88 | * +4 accounts for '[x] ' priv level info | |
89 | * +2 account of 0x prefix on raw addresses | |
90 | */ | |
91 | if (h->branch_info->from.sym) { | |
92 | symlen = (int)h->branch_info->from.sym->namelen + 4; | |
93 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
94 | ||
95 | symlen = dso__name_len(h->branch_info->from.map->dso); | |
96 | hists__new_col_len(hists, HISTC_DSO_FROM, symlen); | |
97 | } else { | |
98 | symlen = unresolved_col_width + 4 + 2; | |
99 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
100 | hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); | |
101 | } | |
102 | ||
103 | if (h->branch_info->to.sym) { | |
104 | symlen = (int)h->branch_info->to.sym->namelen + 4; | |
105 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
106 | ||
107 | symlen = dso__name_len(h->branch_info->to.map->dso); | |
108 | hists__new_col_len(hists, HISTC_DSO_TO, symlen); | |
109 | } else { | |
110 | symlen = unresolved_col_width + 4 + 2; | |
111 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
112 | hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); | |
113 | } | |
114 | } | |
8a6c5b26 ACM |
115 | } |
116 | ||
7ccf4f90 NK |
117 | void hists__output_recalc_col_len(struct hists *hists, int max_rows) |
118 | { | |
119 | struct rb_node *next = rb_first(&hists->entries); | |
120 | struct hist_entry *n; | |
121 | int row = 0; | |
122 | ||
123 | hists__reset_col_len(hists); | |
124 | ||
125 | while (next && row++ < max_rows) { | |
126 | n = rb_entry(next, struct hist_entry, rb_node); | |
127 | if (!n->filtered) | |
128 | hists__calc_col_len(hists, n); | |
129 | next = rb_next(&n->rb_node); | |
130 | } | |
131 | } | |
132 | ||
12c14278 | 133 | static void hist_entry__add_cpumode_period(struct hist_entry *he, |
c82ee828 | 134 | unsigned int cpumode, u64 period) |
a1645ce1 | 135 | { |
28e2a106 | 136 | switch (cpumode) { |
a1645ce1 | 137 | case PERF_RECORD_MISC_KERNEL: |
b24c28f7 | 138 | he->stat.period_sys += period; |
a1645ce1 ZY |
139 | break; |
140 | case PERF_RECORD_MISC_USER: | |
b24c28f7 | 141 | he->stat.period_us += period; |
a1645ce1 ZY |
142 | break; |
143 | case PERF_RECORD_MISC_GUEST_KERNEL: | |
b24c28f7 | 144 | he->stat.period_guest_sys += period; |
a1645ce1 ZY |
145 | break; |
146 | case PERF_RECORD_MISC_GUEST_USER: | |
b24c28f7 | 147 | he->stat.period_guest_us += period; |
a1645ce1 ZY |
148 | break; |
149 | default: | |
150 | break; | |
151 | } | |
152 | } | |
153 | ||
139c0815 NK |
154 | static void he_stat__add_period(struct he_stat *he_stat, u64 period) |
155 | { | |
156 | he_stat->period += period; | |
157 | he_stat->nr_events += 1; | |
158 | } | |
159 | ||
160 | static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) | |
161 | { | |
162 | dest->period += src->period; | |
163 | dest->period_sys += src->period_sys; | |
164 | dest->period_us += src->period_us; | |
165 | dest->period_guest_sys += src->period_guest_sys; | |
166 | dest->period_guest_us += src->period_guest_us; | |
167 | dest->nr_events += src->nr_events; | |
168 | } | |
169 | ||
ab81f3fd ACM |
170 | static void hist_entry__decay(struct hist_entry *he) |
171 | { | |
b24c28f7 NK |
172 | he->stat.period = (he->stat.period * 7) / 8; |
173 | he->stat.nr_events = (he->stat.nr_events * 7) / 8; | |
ab81f3fd ACM |
174 | } |
175 | ||
176 | static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) | |
177 | { | |
b24c28f7 | 178 | u64 prev_period = he->stat.period; |
c64550cf ACM |
179 | |
180 | if (prev_period == 0) | |
df71d95f | 181 | return true; |
c64550cf | 182 | |
ab81f3fd | 183 | hist_entry__decay(he); |
c64550cf ACM |
184 | |
185 | if (!he->filtered) | |
b24c28f7 | 186 | hists->stats.total_period -= prev_period - he->stat.period; |
c64550cf | 187 | |
b24c28f7 | 188 | return he->stat.period == 0; |
ab81f3fd ACM |
189 | } |
190 | ||
b079d4e9 ACM |
191 | static void __hists__decay_entries(struct hists *hists, bool zap_user, |
192 | bool zap_kernel, bool threaded) | |
ab81f3fd ACM |
193 | { |
194 | struct rb_node *next = rb_first(&hists->entries); | |
195 | struct hist_entry *n; | |
196 | ||
197 | while (next) { | |
198 | n = rb_entry(next, struct hist_entry, rb_node); | |
199 | next = rb_next(&n->rb_node); | |
df71d95f ACM |
200 | /* |
201 | * We may be annotating this, for instance, so keep it here in | |
202 | * case some it gets new samples, we'll eventually free it when | |
203 | * the user stops browsing and it agains gets fully decayed. | |
204 | */ | |
b079d4e9 ACM |
205 | if (((zap_user && n->level == '.') || |
206 | (zap_kernel && n->level != '.') || | |
207 | hists__decay_entry(hists, n)) && | |
208 | !n->used) { | |
ab81f3fd ACM |
209 | rb_erase(&n->rb_node, &hists->entries); |
210 | ||
e345fa18 | 211 | if (sort__need_collapse || threaded) |
ab81f3fd ACM |
212 | rb_erase(&n->rb_node_in, &hists->entries_collapsed); |
213 | ||
214 | hist_entry__free(n); | |
215 | --hists->nr_entries; | |
216 | } | |
217 | } | |
218 | } | |
219 | ||
b079d4e9 | 220 | void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) |
e345fa18 | 221 | { |
b079d4e9 | 222 | return __hists__decay_entries(hists, zap_user, zap_kernel, false); |
e345fa18 ACM |
223 | } |
224 | ||
b079d4e9 ACM |
225 | void hists__decay_entries_threaded(struct hists *hists, |
226 | bool zap_user, bool zap_kernel) | |
e345fa18 | 227 | { |
b079d4e9 | 228 | return __hists__decay_entries(hists, zap_user, zap_kernel, true); |
e345fa18 ACM |
229 | } |
230 | ||
3d1d07ec | 231 | /* |
c82ee828 | 232 | * histogram, sorted on item, collects periods |
3d1d07ec JK |
233 | */ |
234 | ||
28e2a106 ACM |
235 | static struct hist_entry *hist_entry__new(struct hist_entry *template) |
236 | { | |
d2009c51 | 237 | size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; |
12c14278 | 238 | struct hist_entry *he = malloc(sizeof(*he) + callchain_size); |
28e2a106 | 239 | |
12c14278 ACM |
240 | if (he != NULL) { |
241 | *he = *template; | |
c4b35351 | 242 | |
12c14278 ACM |
243 | if (he->ms.map) |
244 | he->ms.map->referenced = true; | |
28e2a106 | 245 | if (symbol_conf.use_callchain) |
12c14278 | 246 | callchain_init(he->callchain); |
b821c732 ACM |
247 | |
248 | INIT_LIST_HEAD(&he->pairs.node); | |
28e2a106 ACM |
249 | } |
250 | ||
12c14278 | 251 | return he; |
28e2a106 ACM |
252 | } |
253 | ||
66f97ed3 | 254 | void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) |
fefb0b94 | 255 | { |
8a6c5b26 | 256 | if (!h->filtered) { |
42b28ac0 ACM |
257 | hists__calc_col_len(hists, h); |
258 | ++hists->nr_entries; | |
b24c28f7 | 259 | hists->stats.total_period += h->stat.period; |
8a6c5b26 | 260 | } |
fefb0b94 ACM |
261 | } |
262 | ||
7a007ca9 ACM |
263 | static u8 symbol__parent_filter(const struct symbol *parent) |
264 | { | |
265 | if (symbol_conf.exclude_other && parent == NULL) | |
266 | return 1 << HIST_FILTER__PARENT; | |
267 | return 0; | |
268 | } | |
269 | ||
b5387528 RAV |
270 | static struct hist_entry *add_hist_entry(struct hists *hists, |
271 | struct hist_entry *entry, | |
1c02c4d2 | 272 | struct addr_location *al, |
b5387528 | 273 | u64 period) |
9735abf1 | 274 | { |
1980c2eb | 275 | struct rb_node **p; |
9735abf1 ACM |
276 | struct rb_node *parent = NULL; |
277 | struct hist_entry *he; | |
9735abf1 ACM |
278 | int cmp; |
279 | ||
1980c2eb ACM |
280 | pthread_mutex_lock(&hists->lock); |
281 | ||
282 | p = &hists->entries_in->rb_node; | |
283 | ||
9735abf1 ACM |
284 | while (*p != NULL) { |
285 | parent = *p; | |
1980c2eb | 286 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
9735abf1 | 287 | |
9afcf930 NK |
288 | /* |
289 | * Make sure that it receives arguments in a same order as | |
290 | * hist_entry__collapse() so that we can use an appropriate | |
291 | * function when searching an entry regardless which sort | |
292 | * keys were used. | |
293 | */ | |
294 | cmp = hist_entry__cmp(he, entry); | |
9735abf1 ACM |
295 | |
296 | if (!cmp) { | |
139c0815 | 297 | he_stat__add_period(&he->stat, period); |
63fa471d DM |
298 | |
299 | /* If the map of an existing hist_entry has | |
300 | * become out-of-date due to an exec() or | |
301 | * similar, update it. Otherwise we will | |
302 | * mis-adjust symbol addresses when computing | |
303 | * the history counter to increment. | |
304 | */ | |
305 | if (he->ms.map != entry->ms.map) { | |
306 | he->ms.map = entry->ms.map; | |
307 | if (he->ms.map) | |
308 | he->ms.map->referenced = true; | |
309 | } | |
28e2a106 | 310 | goto out; |
9735abf1 ACM |
311 | } |
312 | ||
313 | if (cmp < 0) | |
314 | p = &(*p)->rb_left; | |
315 | else | |
316 | p = &(*p)->rb_right; | |
317 | } | |
318 | ||
b5387528 | 319 | he = hist_entry__new(entry); |
9735abf1 | 320 | if (!he) |
1980c2eb ACM |
321 | goto out_unlock; |
322 | ||
323 | rb_link_node(&he->rb_node_in, parent, p); | |
324 | rb_insert_color(&he->rb_node_in, hists->entries_in); | |
28e2a106 | 325 | out: |
c82ee828 | 326 | hist_entry__add_cpumode_period(he, al->cpumode, period); |
1980c2eb ACM |
327 | out_unlock: |
328 | pthread_mutex_unlock(&hists->lock); | |
9735abf1 ACM |
329 | return he; |
330 | } | |
331 | ||
b5387528 RAV |
332 | struct hist_entry *__hists__add_branch_entry(struct hists *self, |
333 | struct addr_location *al, | |
334 | struct symbol *sym_parent, | |
335 | struct branch_info *bi, | |
336 | u64 period) | |
337 | { | |
338 | struct hist_entry entry = { | |
339 | .thread = al->thread, | |
340 | .ms = { | |
341 | .map = bi->to.map, | |
342 | .sym = bi->to.sym, | |
343 | }, | |
344 | .cpu = al->cpu, | |
345 | .ip = bi->to.addr, | |
346 | .level = al->level, | |
b24c28f7 NK |
347 | .stat = { |
348 | .period = period, | |
c4b35351 | 349 | .nr_events = 1, |
b24c28f7 | 350 | }, |
b5387528 RAV |
351 | .parent = sym_parent, |
352 | .filtered = symbol__parent_filter(sym_parent), | |
353 | .branch_info = bi, | |
ae359f19 | 354 | .hists = self, |
b5387528 RAV |
355 | }; |
356 | ||
357 | return add_hist_entry(self, &entry, al, period); | |
358 | } | |
359 | ||
360 | struct hist_entry *__hists__add_entry(struct hists *self, | |
361 | struct addr_location *al, | |
362 | struct symbol *sym_parent, u64 period) | |
363 | { | |
364 | struct hist_entry entry = { | |
365 | .thread = al->thread, | |
366 | .ms = { | |
367 | .map = al->map, | |
368 | .sym = al->sym, | |
369 | }, | |
370 | .cpu = al->cpu, | |
371 | .ip = al->addr, | |
372 | .level = al->level, | |
b24c28f7 NK |
373 | .stat = { |
374 | .period = period, | |
c4b35351 | 375 | .nr_events = 1, |
b24c28f7 | 376 | }, |
b5387528 RAV |
377 | .parent = sym_parent, |
378 | .filtered = symbol__parent_filter(sym_parent), | |
ae359f19 | 379 | .hists = self, |
b5387528 RAV |
380 | }; |
381 | ||
382 | return add_hist_entry(self, &entry, al, period); | |
383 | } | |
384 | ||
3d1d07ec JK |
385 | int64_t |
386 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | |
387 | { | |
388 | struct sort_entry *se; | |
389 | int64_t cmp = 0; | |
390 | ||
391 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
fcd14984 | 392 | cmp = se->se_cmp(left, right); |
3d1d07ec JK |
393 | if (cmp) |
394 | break; | |
395 | } | |
396 | ||
397 | return cmp; | |
398 | } | |
399 | ||
400 | int64_t | |
401 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | |
402 | { | |
403 | struct sort_entry *se; | |
404 | int64_t cmp = 0; | |
405 | ||
406 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
407 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | |
408 | ||
fcd14984 | 409 | f = se->se_collapse ?: se->se_cmp; |
3d1d07ec JK |
410 | |
411 | cmp = f(left, right); | |
412 | if (cmp) | |
413 | break; | |
414 | } | |
415 | ||
416 | return cmp; | |
417 | } | |
418 | ||
419 | void hist_entry__free(struct hist_entry *he) | |
420 | { | |
580e338d | 421 | free(he->branch_info); |
3d1d07ec JK |
422 | free(he); |
423 | } | |
424 | ||
425 | /* | |
426 | * collapse the histogram | |
427 | */ | |
428 | ||
1d037ca1 | 429 | static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, |
1b3a0e95 FW |
430 | struct rb_root *root, |
431 | struct hist_entry *he) | |
3d1d07ec | 432 | { |
b9bf0892 | 433 | struct rb_node **p = &root->rb_node; |
3d1d07ec JK |
434 | struct rb_node *parent = NULL; |
435 | struct hist_entry *iter; | |
436 | int64_t cmp; | |
437 | ||
438 | while (*p != NULL) { | |
439 | parent = *p; | |
1980c2eb | 440 | iter = rb_entry(parent, struct hist_entry, rb_node_in); |
3d1d07ec JK |
441 | |
442 | cmp = hist_entry__collapse(iter, he); | |
443 | ||
444 | if (!cmp) { | |
139c0815 | 445 | he_stat__add_stat(&iter->stat, &he->stat); |
9ec60972 | 446 | |
1b3a0e95 | 447 | if (symbol_conf.use_callchain) { |
47260645 NK |
448 | callchain_cursor_reset(&callchain_cursor); |
449 | callchain_merge(&callchain_cursor, | |
450 | iter->callchain, | |
1b3a0e95 FW |
451 | he->callchain); |
452 | } | |
3d1d07ec | 453 | hist_entry__free(he); |
fefb0b94 | 454 | return false; |
3d1d07ec JK |
455 | } |
456 | ||
457 | if (cmp < 0) | |
458 | p = &(*p)->rb_left; | |
459 | else | |
460 | p = &(*p)->rb_right; | |
461 | } | |
462 | ||
1980c2eb ACM |
463 | rb_link_node(&he->rb_node_in, parent, p); |
464 | rb_insert_color(&he->rb_node_in, root); | |
fefb0b94 | 465 | return true; |
3d1d07ec JK |
466 | } |
467 | ||
1980c2eb | 468 | static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) |
3d1d07ec | 469 | { |
1980c2eb ACM |
470 | struct rb_root *root; |
471 | ||
472 | pthread_mutex_lock(&hists->lock); | |
473 | ||
474 | root = hists->entries_in; | |
475 | if (++hists->entries_in > &hists->entries_in_array[1]) | |
476 | hists->entries_in = &hists->entries_in_array[0]; | |
477 | ||
478 | pthread_mutex_unlock(&hists->lock); | |
479 | ||
480 | return root; | |
481 | } | |
482 | ||
90cf1fb5 ACM |
483 | static void hists__apply_filters(struct hists *hists, struct hist_entry *he) |
484 | { | |
485 | hists__filter_entry_by_dso(hists, he); | |
486 | hists__filter_entry_by_thread(hists, he); | |
e94d53eb | 487 | hists__filter_entry_by_symbol(hists, he); |
90cf1fb5 ACM |
488 | } |
489 | ||
1980c2eb ACM |
490 | static void __hists__collapse_resort(struct hists *hists, bool threaded) |
491 | { | |
492 | struct rb_root *root; | |
3d1d07ec JK |
493 | struct rb_node *next; |
494 | struct hist_entry *n; | |
495 | ||
1980c2eb | 496 | if (!sort__need_collapse && !threaded) |
3d1d07ec JK |
497 | return; |
498 | ||
1980c2eb ACM |
499 | root = hists__get_rotate_entries_in(hists); |
500 | next = rb_first(root); | |
b9bf0892 | 501 | |
3d1d07ec | 502 | while (next) { |
1980c2eb ACM |
503 | n = rb_entry(next, struct hist_entry, rb_node_in); |
504 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 505 | |
1980c2eb | 506 | rb_erase(&n->rb_node_in, root); |
90cf1fb5 ACM |
507 | if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { |
508 | /* | |
509 | * If it wasn't combined with one of the entries already | |
510 | * collapsed, we need to apply the filters that may have | |
511 | * been set by, say, the hist_browser. | |
512 | */ | |
513 | hists__apply_filters(hists, n); | |
90cf1fb5 | 514 | } |
3d1d07ec | 515 | } |
1980c2eb | 516 | } |
b9bf0892 | 517 | |
1980c2eb ACM |
518 | void hists__collapse_resort(struct hists *hists) |
519 | { | |
520 | return __hists__collapse_resort(hists, false); | |
521 | } | |
522 | ||
523 | void hists__collapse_resort_threaded(struct hists *hists) | |
524 | { | |
525 | return __hists__collapse_resort(hists, true); | |
3d1d07ec JK |
526 | } |
527 | ||
528 | /* | |
c82ee828 | 529 | * reverse the map, sort on period. |
3d1d07ec JK |
530 | */ |
531 | ||
1c02c4d2 ACM |
532 | static void __hists__insert_output_entry(struct rb_root *entries, |
533 | struct hist_entry *he, | |
534 | u64 min_callchain_hits) | |
3d1d07ec | 535 | { |
1c02c4d2 | 536 | struct rb_node **p = &entries->rb_node; |
3d1d07ec JK |
537 | struct rb_node *parent = NULL; |
538 | struct hist_entry *iter; | |
539 | ||
d599db3f | 540 | if (symbol_conf.use_callchain) |
b9fb9304 | 541 | callchain_param.sort(&he->sorted_chain, he->callchain, |
3d1d07ec JK |
542 | min_callchain_hits, &callchain_param); |
543 | ||
544 | while (*p != NULL) { | |
545 | parent = *p; | |
546 | iter = rb_entry(parent, struct hist_entry, rb_node); | |
547 | ||
b24c28f7 | 548 | if (he->stat.period > iter->stat.period) |
3d1d07ec JK |
549 | p = &(*p)->rb_left; |
550 | else | |
551 | p = &(*p)->rb_right; | |
552 | } | |
553 | ||
554 | rb_link_node(&he->rb_node, parent, p); | |
1c02c4d2 | 555 | rb_insert_color(&he->rb_node, entries); |
3d1d07ec JK |
556 | } |
557 | ||
1980c2eb | 558 | static void __hists__output_resort(struct hists *hists, bool threaded) |
3d1d07ec | 559 | { |
1980c2eb | 560 | struct rb_root *root; |
3d1d07ec JK |
561 | struct rb_node *next; |
562 | struct hist_entry *n; | |
3d1d07ec JK |
563 | u64 min_callchain_hits; |
564 | ||
42b28ac0 | 565 | min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); |
3d1d07ec | 566 | |
1980c2eb ACM |
567 | if (sort__need_collapse || threaded) |
568 | root = &hists->entries_collapsed; | |
569 | else | |
570 | root = hists->entries_in; | |
571 | ||
572 | next = rb_first(root); | |
573 | hists->entries = RB_ROOT; | |
3d1d07ec | 574 | |
42b28ac0 | 575 | hists->nr_entries = 0; |
7928631a | 576 | hists->stats.total_period = 0; |
42b28ac0 | 577 | hists__reset_col_len(hists); |
fefb0b94 | 578 | |
3d1d07ec | 579 | while (next) { |
1980c2eb ACM |
580 | n = rb_entry(next, struct hist_entry, rb_node_in); |
581 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 582 | |
1980c2eb | 583 | __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); |
42b28ac0 | 584 | hists__inc_nr_entries(hists, n); |
3d1d07ec | 585 | } |
1980c2eb | 586 | } |
b9bf0892 | 587 | |
1980c2eb ACM |
588 | void hists__output_resort(struct hists *hists) |
589 | { | |
590 | return __hists__output_resort(hists, false); | |
591 | } | |
592 | ||
593 | void hists__output_resort_threaded(struct hists *hists) | |
594 | { | |
595 | return __hists__output_resort(hists, true); | |
3d1d07ec | 596 | } |
4ecf84d0 | 597 | |
42b28ac0 | 598 | static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, |
cc5edb0e ACM |
599 | enum hist_filter filter) |
600 | { | |
601 | h->filtered &= ~(1 << filter); | |
602 | if (h->filtered) | |
603 | return; | |
604 | ||
42b28ac0 | 605 | ++hists->nr_entries; |
0f0cbf7a | 606 | if (h->ms.unfolded) |
42b28ac0 | 607 | hists->nr_entries += h->nr_rows; |
0f0cbf7a | 608 | h->row_offset = 0; |
b24c28f7 NK |
609 | hists->stats.total_period += h->stat.period; |
610 | hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events; | |
cc5edb0e | 611 | |
42b28ac0 | 612 | hists__calc_col_len(hists, h); |
cc5edb0e ACM |
613 | } |
614 | ||
90cf1fb5 ACM |
615 | |
616 | static bool hists__filter_entry_by_dso(struct hists *hists, | |
617 | struct hist_entry *he) | |
618 | { | |
619 | if (hists->dso_filter != NULL && | |
620 | (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { | |
621 | he->filtered |= (1 << HIST_FILTER__DSO); | |
622 | return true; | |
623 | } | |
624 | ||
625 | return false; | |
626 | } | |
627 | ||
d7b76f09 | 628 | void hists__filter_by_dso(struct hists *hists) |
b09e0190 ACM |
629 | { |
630 | struct rb_node *nd; | |
631 | ||
42b28ac0 ACM |
632 | hists->nr_entries = hists->stats.total_period = 0; |
633 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
634 | hists__reset_col_len(hists); | |
b09e0190 | 635 | |
42b28ac0 | 636 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
637 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
638 | ||
639 | if (symbol_conf.exclude_other && !h->parent) | |
640 | continue; | |
641 | ||
90cf1fb5 | 642 | if (hists__filter_entry_by_dso(hists, h)) |
b09e0190 | 643 | continue; |
b09e0190 | 644 | |
42b28ac0 | 645 | hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); |
b09e0190 ACM |
646 | } |
647 | } | |
648 | ||
90cf1fb5 ACM |
649 | static bool hists__filter_entry_by_thread(struct hists *hists, |
650 | struct hist_entry *he) | |
651 | { | |
652 | if (hists->thread_filter != NULL && | |
653 | he->thread != hists->thread_filter) { | |
654 | he->filtered |= (1 << HIST_FILTER__THREAD); | |
655 | return true; | |
656 | } | |
657 | ||
658 | return false; | |
659 | } | |
660 | ||
d7b76f09 | 661 | void hists__filter_by_thread(struct hists *hists) |
b09e0190 ACM |
662 | { |
663 | struct rb_node *nd; | |
664 | ||
42b28ac0 ACM |
665 | hists->nr_entries = hists->stats.total_period = 0; |
666 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
667 | hists__reset_col_len(hists); | |
b09e0190 | 668 | |
42b28ac0 | 669 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
670 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
671 | ||
90cf1fb5 | 672 | if (hists__filter_entry_by_thread(hists, h)) |
b09e0190 | 673 | continue; |
cc5edb0e | 674 | |
42b28ac0 | 675 | hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); |
b09e0190 ACM |
676 | } |
677 | } | |
ef7b93a1 | 678 | |
e94d53eb NK |
679 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
680 | struct hist_entry *he) | |
681 | { | |
682 | if (hists->symbol_filter_str != NULL && | |
683 | (!he->ms.sym || strstr(he->ms.sym->name, | |
684 | hists->symbol_filter_str) == NULL)) { | |
685 | he->filtered |= (1 << HIST_FILTER__SYMBOL); | |
686 | return true; | |
687 | } | |
688 | ||
689 | return false; | |
690 | } | |
691 | ||
692 | void hists__filter_by_symbol(struct hists *hists) | |
693 | { | |
694 | struct rb_node *nd; | |
695 | ||
696 | hists->nr_entries = hists->stats.total_period = 0; | |
697 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
698 | hists__reset_col_len(hists); | |
699 | ||
700 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | |
701 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | |
702 | ||
703 | if (hists__filter_entry_by_symbol(hists, h)) | |
704 | continue; | |
705 | ||
706 | hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); | |
707 | } | |
708 | } | |
709 | ||
2f525d01 | 710 | int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) |
ef7b93a1 | 711 | { |
2f525d01 | 712 | return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); |
ef7b93a1 ACM |
713 | } |
714 | ||
ce6f4fab | 715 | int hist_entry__annotate(struct hist_entry *he, size_t privsize) |
ef7b93a1 | 716 | { |
ce6f4fab | 717 | return symbol__annotate(he->ms.sym, he->ms.map, privsize); |
ef7b93a1 | 718 | } |
c8446b9b | 719 | |
28a6b6aa ACM |
720 | void events_stats__inc(struct events_stats *stats, u32 type) |
721 | { | |
722 | ++stats->nr_events[0]; | |
723 | ++stats->nr_events[type]; | |
724 | } | |
725 | ||
42b28ac0 | 726 | void hists__inc_nr_events(struct hists *hists, u32 type) |
c8446b9b | 727 | { |
28a6b6aa | 728 | events_stats__inc(&hists->stats, type); |
c8446b9b | 729 | } |
95529be4 | 730 | |
494d70a1 ACM |
731 | static struct hist_entry *hists__add_dummy_entry(struct hists *hists, |
732 | struct hist_entry *pair) | |
733 | { | |
ce74f60e NK |
734 | struct rb_root *root; |
735 | struct rb_node **p; | |
494d70a1 ACM |
736 | struct rb_node *parent = NULL; |
737 | struct hist_entry *he; | |
738 | int cmp; | |
739 | ||
ce74f60e NK |
740 | if (sort__need_collapse) |
741 | root = &hists->entries_collapsed; | |
742 | else | |
743 | root = hists->entries_in; | |
744 | ||
745 | p = &root->rb_node; | |
746 | ||
494d70a1 ACM |
747 | while (*p != NULL) { |
748 | parent = *p; | |
ce74f60e | 749 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
494d70a1 | 750 | |
ce74f60e | 751 | cmp = hist_entry__collapse(he, pair); |
494d70a1 ACM |
752 | |
753 | if (!cmp) | |
754 | goto out; | |
755 | ||
756 | if (cmp < 0) | |
757 | p = &(*p)->rb_left; | |
758 | else | |
759 | p = &(*p)->rb_right; | |
760 | } | |
761 | ||
762 | he = hist_entry__new(pair); | |
763 | if (he) { | |
30193d78 ACM |
764 | memset(&he->stat, 0, sizeof(he->stat)); |
765 | he->hists = hists; | |
ce74f60e NK |
766 | rb_link_node(&he->rb_node_in, parent, p); |
767 | rb_insert_color(&he->rb_node_in, root); | |
494d70a1 ACM |
768 | hists__inc_nr_entries(hists, he); |
769 | } | |
770 | out: | |
771 | return he; | |
772 | } | |
773 | ||
95529be4 ACM |
774 | static struct hist_entry *hists__find_entry(struct hists *hists, |
775 | struct hist_entry *he) | |
776 | { | |
ce74f60e NK |
777 | struct rb_node *n; |
778 | ||
779 | if (sort__need_collapse) | |
780 | n = hists->entries_collapsed.rb_node; | |
781 | else | |
782 | n = hists->entries_in->rb_node; | |
95529be4 ACM |
783 | |
784 | while (n) { | |
ce74f60e NK |
785 | struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); |
786 | int64_t cmp = hist_entry__collapse(iter, he); | |
95529be4 ACM |
787 | |
788 | if (cmp < 0) | |
789 | n = n->rb_left; | |
790 | else if (cmp > 0) | |
791 | n = n->rb_right; | |
792 | else | |
793 | return iter; | |
794 | } | |
795 | ||
796 | return NULL; | |
797 | } | |
798 | ||
799 | /* | |
800 | * Look for pairs to link to the leader buckets (hist_entries): | |
801 | */ | |
802 | void hists__match(struct hists *leader, struct hists *other) | |
803 | { | |
ce74f60e | 804 | struct rb_root *root; |
95529be4 ACM |
805 | struct rb_node *nd; |
806 | struct hist_entry *pos, *pair; | |
807 | ||
ce74f60e NK |
808 | if (sort__need_collapse) |
809 | root = &leader->entries_collapsed; | |
810 | else | |
811 | root = leader->entries_in; | |
812 | ||
813 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
814 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
95529be4 ACM |
815 | pair = hists__find_entry(other, pos); |
816 | ||
817 | if (pair) | |
5fa9041b | 818 | hist_entry__add_pair(pair, pos); |
95529be4 ACM |
819 | } |
820 | } | |
494d70a1 ACM |
821 | |
822 | /* | |
823 | * Look for entries in the other hists that are not present in the leader, if | |
824 | * we find them, just add a dummy entry on the leader hists, with period=0, | |
825 | * nr_events=0, to serve as the list header. | |
826 | */ | |
827 | int hists__link(struct hists *leader, struct hists *other) | |
828 | { | |
ce74f60e | 829 | struct rb_root *root; |
494d70a1 ACM |
830 | struct rb_node *nd; |
831 | struct hist_entry *pos, *pair; | |
832 | ||
ce74f60e NK |
833 | if (sort__need_collapse) |
834 | root = &other->entries_collapsed; | |
835 | else | |
836 | root = other->entries_in; | |
837 | ||
838 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
839 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
494d70a1 ACM |
840 | |
841 | if (!hist_entry__has_pairs(pos)) { | |
842 | pair = hists__add_dummy_entry(leader, pos); | |
843 | if (pair == NULL) | |
844 | return -1; | |
5fa9041b | 845 | hist_entry__add_pair(pos, pair); |
494d70a1 ACM |
846 | } |
847 | } | |
848 | ||
849 | return 0; | |
850 | } |