perf hist: Adopt filter by dso and by thread methods from the newt browser
[deliverable/linux.git] / tools / perf / util / hist.c
1 #include "hist.h"
2 #include "session.h"
3 #include "sort.h"
4 #include <math.h>
5
6 struct callchain_param callchain_param = {
7 .mode = CHAIN_GRAPH_REL,
8 .min_percent = 0.5
9 };
10
11 static void hist_entry__add_cpumode_count(struct hist_entry *self,
12 unsigned int cpumode, u64 count)
13 {
14 switch (cpumode) {
15 case PERF_RECORD_MISC_KERNEL:
16 self->count_sys += count;
17 break;
18 case PERF_RECORD_MISC_USER:
19 self->count_us += count;
20 break;
21 case PERF_RECORD_MISC_GUEST_KERNEL:
22 self->count_guest_sys += count;
23 break;
24 case PERF_RECORD_MISC_GUEST_USER:
25 self->count_guest_us += count;
26 break;
27 default:
28 break;
29 }
30 }
31
32 /*
33 * histogram, sorted on item, collects counts
34 */
35
36 static struct hist_entry *hist_entry__new(struct hist_entry *template)
37 {
38 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
39 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
40
41 if (self != NULL) {
42 *self = *template;
43 if (symbol_conf.use_callchain)
44 callchain_init(self->callchain);
45 }
46
47 return self;
48 }
49
50 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
51 {
52 if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
53 self->max_sym_namelen = entry->ms.sym->namelen;
54 ++self->nr_entries;
55 }
56
57 struct hist_entry *__hists__add_entry(struct hists *self,
58 struct addr_location *al,
59 struct symbol *sym_parent, u64 count)
60 {
61 struct rb_node **p = &self->entries.rb_node;
62 struct rb_node *parent = NULL;
63 struct hist_entry *he;
64 struct hist_entry entry = {
65 .thread = al->thread,
66 .ms = {
67 .map = al->map,
68 .sym = al->sym,
69 },
70 .ip = al->addr,
71 .level = al->level,
72 .count = count,
73 .parent = sym_parent,
74 };
75 int cmp;
76
77 while (*p != NULL) {
78 parent = *p;
79 he = rb_entry(parent, struct hist_entry, rb_node);
80
81 cmp = hist_entry__cmp(&entry, he);
82
83 if (!cmp) {
84 he->count += count;
85 goto out;
86 }
87
88 if (cmp < 0)
89 p = &(*p)->rb_left;
90 else
91 p = &(*p)->rb_right;
92 }
93
94 he = hist_entry__new(&entry);
95 if (!he)
96 return NULL;
97 rb_link_node(&he->rb_node, parent, p);
98 rb_insert_color(&he->rb_node, &self->entries);
99 hists__inc_nr_entries(self, he);
100 out:
101 hist_entry__add_cpumode_count(he, al->cpumode, count);
102 return he;
103 }
104
105 int64_t
106 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
107 {
108 struct sort_entry *se;
109 int64_t cmp = 0;
110
111 list_for_each_entry(se, &hist_entry__sort_list, list) {
112 cmp = se->se_cmp(left, right);
113 if (cmp)
114 break;
115 }
116
117 return cmp;
118 }
119
120 int64_t
121 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
122 {
123 struct sort_entry *se;
124 int64_t cmp = 0;
125
126 list_for_each_entry(se, &hist_entry__sort_list, list) {
127 int64_t (*f)(struct hist_entry *, struct hist_entry *);
128
129 f = se->se_collapse ?: se->se_cmp;
130
131 cmp = f(left, right);
132 if (cmp)
133 break;
134 }
135
136 return cmp;
137 }
138
139 void hist_entry__free(struct hist_entry *he)
140 {
141 free(he);
142 }
143
144 /*
145 * collapse the histogram
146 */
147
148 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
149 {
150 struct rb_node **p = &root->rb_node;
151 struct rb_node *parent = NULL;
152 struct hist_entry *iter;
153 int64_t cmp;
154
155 while (*p != NULL) {
156 parent = *p;
157 iter = rb_entry(parent, struct hist_entry, rb_node);
158
159 cmp = hist_entry__collapse(iter, he);
160
161 if (!cmp) {
162 iter->count += he->count;
163 hist_entry__free(he);
164 return false;
165 }
166
167 if (cmp < 0)
168 p = &(*p)->rb_left;
169 else
170 p = &(*p)->rb_right;
171 }
172
173 rb_link_node(&he->rb_node, parent, p);
174 rb_insert_color(&he->rb_node, root);
175 return true;
176 }
177
178 void hists__collapse_resort(struct hists *self)
179 {
180 struct rb_root tmp;
181 struct rb_node *next;
182 struct hist_entry *n;
183
184 if (!sort__need_collapse)
185 return;
186
187 tmp = RB_ROOT;
188 next = rb_first(&self->entries);
189 self->nr_entries = 0;
190 self->max_sym_namelen = 0;
191
192 while (next) {
193 n = rb_entry(next, struct hist_entry, rb_node);
194 next = rb_next(&n->rb_node);
195
196 rb_erase(&n->rb_node, &self->entries);
197 if (collapse__insert_entry(&tmp, n))
198 hists__inc_nr_entries(self, n);
199 }
200
201 self->entries = tmp;
202 }
203
204 /*
205 * reverse the map, sort on count.
206 */
207
208 static void __hists__insert_output_entry(struct rb_root *entries,
209 struct hist_entry *he,
210 u64 min_callchain_hits)
211 {
212 struct rb_node **p = &entries->rb_node;
213 struct rb_node *parent = NULL;
214 struct hist_entry *iter;
215
216 if (symbol_conf.use_callchain)
217 callchain_param.sort(&he->sorted_chain, he->callchain,
218 min_callchain_hits, &callchain_param);
219
220 while (*p != NULL) {
221 parent = *p;
222 iter = rb_entry(parent, struct hist_entry, rb_node);
223
224 if (he->count > iter->count)
225 p = &(*p)->rb_left;
226 else
227 p = &(*p)->rb_right;
228 }
229
230 rb_link_node(&he->rb_node, parent, p);
231 rb_insert_color(&he->rb_node, entries);
232 }
233
234 void hists__output_resort(struct hists *self)
235 {
236 struct rb_root tmp;
237 struct rb_node *next;
238 struct hist_entry *n;
239 u64 min_callchain_hits;
240
241 min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
242
243 tmp = RB_ROOT;
244 next = rb_first(&self->entries);
245
246 self->nr_entries = 0;
247 self->max_sym_namelen = 0;
248
249 while (next) {
250 n = rb_entry(next, struct hist_entry, rb_node);
251 next = rb_next(&n->rb_node);
252
253 rb_erase(&n->rb_node, &self->entries);
254 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
255 hists__inc_nr_entries(self, n);
256 }
257
258 self->entries = tmp;
259 }
260
261 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
262 {
263 int i;
264 int ret = fprintf(fp, " ");
265
266 for (i = 0; i < left_margin; i++)
267 ret += fprintf(fp, " ");
268
269 return ret;
270 }
271
272 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
273 int left_margin)
274 {
275 int i;
276 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
277
278 for (i = 0; i < depth; i++)
279 if (depth_mask & (1 << i))
280 ret += fprintf(fp, "| ");
281 else
282 ret += fprintf(fp, " ");
283
284 ret += fprintf(fp, "\n");
285
286 return ret;
287 }
288
289 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
290 int depth, int depth_mask, int count,
291 u64 total_samples, int hits,
292 int left_margin)
293 {
294 int i;
295 size_t ret = 0;
296
297 ret += callchain__fprintf_left_margin(fp, left_margin);
298 for (i = 0; i < depth; i++) {
299 if (depth_mask & (1 << i))
300 ret += fprintf(fp, "|");
301 else
302 ret += fprintf(fp, " ");
303 if (!count && i == depth - 1) {
304 double percent;
305
306 percent = hits * 100.0 / total_samples;
307 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
308 } else
309 ret += fprintf(fp, "%s", " ");
310 }
311 if (chain->ms.sym)
312 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
313 else
314 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
315
316 return ret;
317 }
318
319 static struct symbol *rem_sq_bracket;
320 static struct callchain_list rem_hits;
321
322 static void init_rem_hits(void)
323 {
324 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
325 if (!rem_sq_bracket) {
326 fprintf(stderr, "Not enough memory to display remaining hits\n");
327 return;
328 }
329
330 strcpy(rem_sq_bracket->name, "[...]");
331 rem_hits.ms.sym = rem_sq_bracket;
332 }
333
334 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
335 u64 total_samples, int depth,
336 int depth_mask, int left_margin)
337 {
338 struct rb_node *node, *next;
339 struct callchain_node *child;
340 struct callchain_list *chain;
341 int new_depth_mask = depth_mask;
342 u64 new_total;
343 u64 remaining;
344 size_t ret = 0;
345 int i;
346 uint entries_printed = 0;
347
348 if (callchain_param.mode == CHAIN_GRAPH_REL)
349 new_total = self->children_hit;
350 else
351 new_total = total_samples;
352
353 remaining = new_total;
354
355 node = rb_first(&self->rb_root);
356 while (node) {
357 u64 cumul;
358
359 child = rb_entry(node, struct callchain_node, rb_node);
360 cumul = cumul_hits(child);
361 remaining -= cumul;
362
363 /*
364 * The depth mask manages the output of pipes that show
365 * the depth. We don't want to keep the pipes of the current
366 * level for the last child of this depth.
367 * Except if we have remaining filtered hits. They will
368 * supersede the last child
369 */
370 next = rb_next(node);
371 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
372 new_depth_mask &= ~(1 << (depth - 1));
373
374 /*
375 * But we keep the older depth mask for the line separator
376 * to keep the level link until we reach the last child
377 */
378 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
379 left_margin);
380 i = 0;
381 list_for_each_entry(chain, &child->val, list) {
382 ret += ipchain__fprintf_graph(fp, chain, depth,
383 new_depth_mask, i++,
384 new_total,
385 cumul,
386 left_margin);
387 }
388 ret += __callchain__fprintf_graph(fp, child, new_total,
389 depth + 1,
390 new_depth_mask | (1 << depth),
391 left_margin);
392 node = next;
393 if (++entries_printed == callchain_param.print_limit)
394 break;
395 }
396
397 if (callchain_param.mode == CHAIN_GRAPH_REL &&
398 remaining && remaining != new_total) {
399
400 if (!rem_sq_bracket)
401 return ret;
402
403 new_depth_mask &= ~(1 << (depth - 1));
404
405 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
406 new_depth_mask, 0, new_total,
407 remaining, left_margin);
408 }
409
410 return ret;
411 }
412
413 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
414 u64 total_samples, int left_margin)
415 {
416 struct callchain_list *chain;
417 bool printed = false;
418 int i = 0;
419 int ret = 0;
420 u32 entries_printed = 0;
421
422 list_for_each_entry(chain, &self->val, list) {
423 if (!i++ && sort__first_dimension == SORT_SYM)
424 continue;
425
426 if (!printed) {
427 ret += callchain__fprintf_left_margin(fp, left_margin);
428 ret += fprintf(fp, "|\n");
429 ret += callchain__fprintf_left_margin(fp, left_margin);
430 ret += fprintf(fp, "---");
431
432 left_margin += 3;
433 printed = true;
434 } else
435 ret += callchain__fprintf_left_margin(fp, left_margin);
436
437 if (chain->ms.sym)
438 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
439 else
440 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
441
442 if (++entries_printed == callchain_param.print_limit)
443 break;
444 }
445
446 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
447
448 return ret;
449 }
450
451 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
452 u64 total_samples)
453 {
454 struct callchain_list *chain;
455 size_t ret = 0;
456
457 if (!self)
458 return 0;
459
460 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
461
462
463 list_for_each_entry(chain, &self->val, list) {
464 if (chain->ip >= PERF_CONTEXT_MAX)
465 continue;
466 if (chain->ms.sym)
467 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
468 else
469 ret += fprintf(fp, " %p\n",
470 (void *)(long)chain->ip);
471 }
472
473 return ret;
474 }
475
476 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
477 u64 total_samples, int left_margin)
478 {
479 struct rb_node *rb_node;
480 struct callchain_node *chain;
481 size_t ret = 0;
482 u32 entries_printed = 0;
483
484 rb_node = rb_first(&self->sorted_chain);
485 while (rb_node) {
486 double percent;
487
488 chain = rb_entry(rb_node, struct callchain_node, rb_node);
489 percent = chain->hit * 100.0 / total_samples;
490 switch (callchain_param.mode) {
491 case CHAIN_FLAT:
492 ret += percent_color_fprintf(fp, " %6.2f%%\n",
493 percent);
494 ret += callchain__fprintf_flat(fp, chain, total_samples);
495 break;
496 case CHAIN_GRAPH_ABS: /* Falldown */
497 case CHAIN_GRAPH_REL:
498 ret += callchain__fprintf_graph(fp, chain, total_samples,
499 left_margin);
500 case CHAIN_NONE:
501 default:
502 break;
503 }
504 ret += fprintf(fp, "\n");
505 if (++entries_printed == callchain_param.print_limit)
506 break;
507 rb_node = rb_next(rb_node);
508 }
509
510 return ret;
511 }
512
513 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
514 struct hists *pair_hists, bool show_displacement,
515 long displacement, bool color, u64 session_total)
516 {
517 struct sort_entry *se;
518 u64 count, total, count_sys, count_us, count_guest_sys, count_guest_us;
519 const char *sep = symbol_conf.field_sep;
520 int ret;
521
522 if (symbol_conf.exclude_other && !self->parent)
523 return 0;
524
525 if (pair_hists) {
526 count = self->pair ? self->pair->count : 0;
527 total = pair_hists->stats.total;
528 count_sys = self->pair ? self->pair->count_sys : 0;
529 count_us = self->pair ? self->pair->count_us : 0;
530 count_guest_sys = self->pair ? self->pair->count_guest_sys : 0;
531 count_guest_us = self->pair ? self->pair->count_guest_us : 0;
532 } else {
533 count = self->count;
534 total = session_total;
535 count_sys = self->count_sys;
536 count_us = self->count_us;
537 count_guest_sys = self->count_guest_sys;
538 count_guest_us = self->count_guest_us;
539 }
540
541 if (total) {
542 if (color)
543 ret = percent_color_snprintf(s, size,
544 sep ? "%.2f" : " %6.2f%%",
545 (count * 100.0) / total);
546 else
547 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
548 (count * 100.0) / total);
549 if (symbol_conf.show_cpu_utilization) {
550 ret += percent_color_snprintf(s + ret, size - ret,
551 sep ? "%.2f" : " %6.2f%%",
552 (count_sys * 100.0) / total);
553 ret += percent_color_snprintf(s + ret, size - ret,
554 sep ? "%.2f" : " %6.2f%%",
555 (count_us * 100.0) / total);
556 if (perf_guest) {
557 ret += percent_color_snprintf(s + ret,
558 size - ret,
559 sep ? "%.2f" : " %6.2f%%",
560 (count_guest_sys * 100.0) /
561 total);
562 ret += percent_color_snprintf(s + ret,
563 size - ret,
564 sep ? "%.2f" : " %6.2f%%",
565 (count_guest_us * 100.0) /
566 total);
567 }
568 }
569 } else
570 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", count);
571
572 if (symbol_conf.show_nr_samples) {
573 if (sep)
574 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, count);
575 else
576 ret += snprintf(s + ret, size - ret, "%11lld", count);
577 }
578
579 if (pair_hists) {
580 char bf[32];
581 double old_percent = 0, new_percent = 0, diff;
582
583 if (total > 0)
584 old_percent = (count * 100.0) / total;
585 if (session_total > 0)
586 new_percent = (self->count * 100.0) / session_total;
587
588 diff = new_percent - old_percent;
589
590 if (fabs(diff) >= 0.01)
591 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
592 else
593 snprintf(bf, sizeof(bf), " ");
594
595 if (sep)
596 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
597 else
598 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
599
600 if (show_displacement) {
601 if (displacement)
602 snprintf(bf, sizeof(bf), "%+4ld", displacement);
603 else
604 snprintf(bf, sizeof(bf), " ");
605
606 if (sep)
607 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
608 else
609 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
610 }
611 }
612
613 list_for_each_entry(se, &hist_entry__sort_list, list) {
614 if (se->elide)
615 continue;
616
617 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
618 ret += se->se_snprintf(self, s + ret, size - ret,
619 se->se_width ? *se->se_width : 0);
620 }
621
622 return ret;
623 }
624
625 int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
626 bool show_displacement, long displacement, FILE *fp,
627 u64 session_total)
628 {
629 char bf[512];
630 hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
631 show_displacement, displacement,
632 true, session_total);
633 return fprintf(fp, "%s\n", bf);
634 }
635
636 static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
637 u64 session_total)
638 {
639 int left_margin = 0;
640
641 if (sort__first_dimension == SORT_COMM) {
642 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
643 typeof(*se), list);
644 left_margin = se->se_width ? *se->se_width : 0;
645 left_margin -= thread__comm_len(self->thread);
646 }
647
648 return hist_entry_callchain__fprintf(fp, self, session_total,
649 left_margin);
650 }
651
652 size_t hists__fprintf(struct hists *self, struct hists *pair,
653 bool show_displacement, FILE *fp)
654 {
655 struct sort_entry *se;
656 struct rb_node *nd;
657 size_t ret = 0;
658 unsigned long position = 1;
659 long displacement = 0;
660 unsigned int width;
661 const char *sep = symbol_conf.field_sep;
662 char *col_width = symbol_conf.col_width_list_str;
663
664 init_rem_hits();
665
666 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
667
668 if (symbol_conf.show_nr_samples) {
669 if (sep)
670 fprintf(fp, "%cSamples", *sep);
671 else
672 fputs(" Samples ", fp);
673 }
674
675 if (symbol_conf.show_cpu_utilization) {
676 if (sep) {
677 ret += fprintf(fp, "%csys", *sep);
678 ret += fprintf(fp, "%cus", *sep);
679 if (perf_guest) {
680 ret += fprintf(fp, "%cguest sys", *sep);
681 ret += fprintf(fp, "%cguest us", *sep);
682 }
683 } else {
684 ret += fprintf(fp, " sys ");
685 ret += fprintf(fp, " us ");
686 if (perf_guest) {
687 ret += fprintf(fp, " guest sys ");
688 ret += fprintf(fp, " guest us ");
689 }
690 }
691 }
692
693 if (pair) {
694 if (sep)
695 ret += fprintf(fp, "%cDelta", *sep);
696 else
697 ret += fprintf(fp, " Delta ");
698
699 if (show_displacement) {
700 if (sep)
701 ret += fprintf(fp, "%cDisplacement", *sep);
702 else
703 ret += fprintf(fp, " Displ");
704 }
705 }
706
707 list_for_each_entry(se, &hist_entry__sort_list, list) {
708 if (se->elide)
709 continue;
710 if (sep) {
711 fprintf(fp, "%c%s", *sep, se->se_header);
712 continue;
713 }
714 width = strlen(se->se_header);
715 if (se->se_width) {
716 if (symbol_conf.col_width_list_str) {
717 if (col_width) {
718 *se->se_width = atoi(col_width);
719 col_width = strchr(col_width, ',');
720 if (col_width)
721 ++col_width;
722 }
723 }
724 width = *se->se_width = max(*se->se_width, width);
725 }
726 fprintf(fp, " %*s", width, se->se_header);
727 }
728 fprintf(fp, "\n");
729
730 if (sep)
731 goto print_entries;
732
733 fprintf(fp, "# ........");
734 if (symbol_conf.show_nr_samples)
735 fprintf(fp, " ..........");
736 if (pair) {
737 fprintf(fp, " ..........");
738 if (show_displacement)
739 fprintf(fp, " .....");
740 }
741 list_for_each_entry(se, &hist_entry__sort_list, list) {
742 unsigned int i;
743
744 if (se->elide)
745 continue;
746
747 fprintf(fp, " ");
748 if (se->se_width)
749 width = *se->se_width;
750 else
751 width = strlen(se->se_header);
752 for (i = 0; i < width; i++)
753 fprintf(fp, ".");
754 }
755
756 fprintf(fp, "\n#\n");
757
758 print_entries:
759 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
760 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
761
762 if (show_displacement) {
763 if (h->pair != NULL)
764 displacement = ((long)h->pair->position -
765 (long)position);
766 else
767 displacement = 0;
768 ++position;
769 }
770 ret += hist_entry__fprintf(h, pair, show_displacement,
771 displacement, fp, self->stats.total);
772
773 if (symbol_conf.use_callchain)
774 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total);
775
776 if (h->ms.map == NULL && verbose > 1) {
777 __map_groups__fprintf_maps(&h->thread->mg,
778 MAP__FUNCTION, verbose, fp);
779 fprintf(fp, "%.10s end\n", graph_dotted_line);
780 }
781 }
782
783 free(rem_sq_bracket);
784
785 return ret;
786 }
787
788 enum hist_filter {
789 HIST_FILTER__DSO,
790 HIST_FILTER__THREAD,
791 };
792
793 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
794 {
795 struct rb_node *nd;
796
797 self->nr_entries = self->stats.total = 0;
798 self->max_sym_namelen = 0;
799
800 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
801 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
802
803 if (symbol_conf.exclude_other && !h->parent)
804 continue;
805
806 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
807 h->filtered |= (1 << HIST_FILTER__DSO);
808 continue;
809 }
810
811 h->filtered &= ~(1 << HIST_FILTER__DSO);
812 if (!h->filtered) {
813 ++self->nr_entries;
814 self->stats.total += h->count;
815 if (h->ms.sym &&
816 self->max_sym_namelen < h->ms.sym->namelen)
817 self->max_sym_namelen = h->ms.sym->namelen;
818 }
819 }
820 }
821
822 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
823 {
824 struct rb_node *nd;
825
826 self->nr_entries = self->stats.total = 0;
827 self->max_sym_namelen = 0;
828
829 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
830 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
831
832 if (thread != NULL && h->thread != thread) {
833 h->filtered |= (1 << HIST_FILTER__THREAD);
834 continue;
835 }
836 h->filtered &= ~(1 << HIST_FILTER__THREAD);
837 if (!h->filtered) {
838 ++self->nr_entries;
839 self->stats.total += h->count;
840 if (h->ms.sym &&
841 self->max_sym_namelen < h->ms.sym->namelen)
842 self->max_sym_namelen = h->ms.sym->namelen;
843 }
844 }
845 }
This page took 0.079656 seconds and 5 git commands to generate.