8 #include <traceevent/event-parse.h>
11 const char default_parent_pattern
[] = "^sys_|^do_page_fault";
12 const char *parent_pattern
= default_parent_pattern
;
13 const char default_sort_order
[] = "comm,dso,symbol";
14 const char default_branch_sort_order
[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order
[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order
[] = "dso,symbol";
17 const char default_diff_sort_order
[] = "dso,symbol";
18 const char default_tracepoint_sort_order
[] = "trace";
19 const char *sort_order
;
20 const char *field_order
;
21 regex_t ignore_callees_regex
;
22 int have_ignore_callees
= 0;
23 int sort__need_collapse
= 0;
24 int sort__has_parent
= 0;
25 int sort__has_sym
= 0;
26 int sort__has_dso
= 0;
27 int sort__has_socket
= 0;
28 int sort__has_thread
= 0;
29 enum sort_mode sort__mode
= SORT_MODE__NORMAL
;
32 * Replaces all occurrences of a char used with the:
34 * -t, --field-separator
36 * option, that uses a special separator character and don't pad with spaces,
37 * replacing all occurances of this separator in symbol names (and other
38 * output) with a '.' character, that thus it's the only non valid separator.
40 static int repsep_snprintf(char *bf
, size_t size
, const char *fmt
, ...)
46 n
= vsnprintf(bf
, size
, fmt
, ap
);
47 if (symbol_conf
.field_sep
&& n
> 0) {
51 sep
= strchr(sep
, *symbol_conf
.field_sep
);
64 static int64_t cmp_null(const void *l
, const void *r
)
77 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
79 return right
->thread
->tid
- left
->thread
->tid
;
82 static int hist_entry__thread_snprintf(struct hist_entry
*he
, char *bf
,
83 size_t size
, unsigned int width
)
85 const char *comm
= thread__comm_str(he
->thread
);
87 width
= max(7U, width
) - 6;
88 return repsep_snprintf(bf
, size
, "%5d:%-*.*s", he
->thread
->tid
,
89 width
, width
, comm
?: "");
92 struct sort_entry sort_thread
= {
93 .se_header
= " Pid:Command",
94 .se_cmp
= sort__thread_cmp
,
95 .se_snprintf
= hist_entry__thread_snprintf
,
96 .se_width_idx
= HISTC_THREAD
,
102 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
104 /* Compare the addr that should be unique among comm */
105 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
109 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
111 /* Compare the addr that should be unique among comm */
112 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
116 sort__comm_sort(struct hist_entry
*left
, struct hist_entry
*right
)
118 return strcmp(comm__str(right
->comm
), comm__str(left
->comm
));
121 static int hist_entry__comm_snprintf(struct hist_entry
*he
, char *bf
,
122 size_t size
, unsigned int width
)
124 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, comm__str(he
->comm
));
127 struct sort_entry sort_comm
= {
128 .se_header
= "Command",
129 .se_cmp
= sort__comm_cmp
,
130 .se_collapse
= sort__comm_collapse
,
131 .se_sort
= sort__comm_sort
,
132 .se_snprintf
= hist_entry__comm_snprintf
,
133 .se_width_idx
= HISTC_COMM
,
138 static int64_t _sort__dso_cmp(struct map
*map_l
, struct map
*map_r
)
140 struct dso
*dso_l
= map_l
? map_l
->dso
: NULL
;
141 struct dso
*dso_r
= map_r
? map_r
->dso
: NULL
;
142 const char *dso_name_l
, *dso_name_r
;
144 if (!dso_l
|| !dso_r
)
145 return cmp_null(dso_r
, dso_l
);
148 dso_name_l
= dso_l
->long_name
;
149 dso_name_r
= dso_r
->long_name
;
151 dso_name_l
= dso_l
->short_name
;
152 dso_name_r
= dso_r
->short_name
;
155 return strcmp(dso_name_l
, dso_name_r
);
159 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
161 return _sort__dso_cmp(right
->ms
.map
, left
->ms
.map
);
164 static int _hist_entry__dso_snprintf(struct map
*map
, char *bf
,
165 size_t size
, unsigned int width
)
167 if (map
&& map
->dso
) {
168 const char *dso_name
= !verbose
? map
->dso
->short_name
:
170 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, dso_name
);
173 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "[unknown]");
176 static int hist_entry__dso_snprintf(struct hist_entry
*he
, char *bf
,
177 size_t size
, unsigned int width
)
179 return _hist_entry__dso_snprintf(he
->ms
.map
, bf
, size
, width
);
182 struct sort_entry sort_dso
= {
183 .se_header
= "Shared Object",
184 .se_cmp
= sort__dso_cmp
,
185 .se_snprintf
= hist_entry__dso_snprintf
,
186 .se_width_idx
= HISTC_DSO
,
191 static int64_t _sort__addr_cmp(u64 left_ip
, u64 right_ip
)
193 return (int64_t)(right_ip
- left_ip
);
196 static int64_t _sort__sym_cmp(struct symbol
*sym_l
, struct symbol
*sym_r
)
198 if (!sym_l
|| !sym_r
)
199 return cmp_null(sym_l
, sym_r
);
204 if (sym_l
->start
!= sym_r
->start
)
205 return (int64_t)(sym_r
->start
- sym_l
->start
);
207 return (int64_t)(sym_r
->end
- sym_l
->end
);
211 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
215 if (!left
->ms
.sym
&& !right
->ms
.sym
)
216 return _sort__addr_cmp(left
->ip
, right
->ip
);
219 * comparing symbol address alone is not enough since it's a
220 * relative address within a dso.
222 if (!sort__has_dso
) {
223 ret
= sort__dso_cmp(left
, right
);
228 return _sort__sym_cmp(left
->ms
.sym
, right
->ms
.sym
);
232 sort__sym_sort(struct hist_entry
*left
, struct hist_entry
*right
)
234 if (!left
->ms
.sym
|| !right
->ms
.sym
)
235 return cmp_null(left
->ms
.sym
, right
->ms
.sym
);
237 return strcmp(right
->ms
.sym
->name
, left
->ms
.sym
->name
);
240 static int _hist_entry__sym_snprintf(struct map
*map
, struct symbol
*sym
,
241 u64 ip
, char level
, char *bf
, size_t size
,
247 char o
= map
? dso__symtab_origin(map
->dso
) : '!';
248 ret
+= repsep_snprintf(bf
, size
, "%-#*llx %c ",
249 BITS_PER_LONG
/ 4 + 2, ip
, o
);
252 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "[%c] ", level
);
254 if (map
->type
== MAP__VARIABLE
) {
255 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%s", sym
->name
);
256 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "+0x%llx",
257 ip
- map
->unmap_ip(map
, sym
->start
));
259 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%.*s",
264 size_t len
= BITS_PER_LONG
/ 4;
265 ret
+= repsep_snprintf(bf
+ ret
, size
- ret
, "%-#.*llx",
272 static int hist_entry__sym_snprintf(struct hist_entry
*he
, char *bf
,
273 size_t size
, unsigned int width
)
275 return _hist_entry__sym_snprintf(he
->ms
.map
, he
->ms
.sym
, he
->ip
,
276 he
->level
, bf
, size
, width
);
279 struct sort_entry sort_sym
= {
280 .se_header
= "Symbol",
281 .se_cmp
= sort__sym_cmp
,
282 .se_sort
= sort__sym_sort
,
283 .se_snprintf
= hist_entry__sym_snprintf
,
284 .se_width_idx
= HISTC_SYMBOL
,
289 static char *hist_entry__get_srcline(struct hist_entry
*he
)
291 struct map
*map
= he
->ms
.map
;
294 return SRCLINE_UNKNOWN
;
296 return get_srcline(map
->dso
, map__rip_2objdump(map
, he
->ip
),
301 sort__srcline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
304 left
->srcline
= hist_entry__get_srcline(left
);
306 right
->srcline
= hist_entry__get_srcline(right
);
308 return strcmp(right
->srcline
, left
->srcline
);
311 static int hist_entry__srcline_snprintf(struct hist_entry
*he
, char *bf
,
312 size_t size
, unsigned int width
)
315 he
->srcline
= hist_entry__get_srcline(he
);
317 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->srcline
);
320 struct sort_entry sort_srcline
= {
321 .se_header
= "Source:Line",
322 .se_cmp
= sort__srcline_cmp
,
323 .se_snprintf
= hist_entry__srcline_snprintf
,
324 .se_width_idx
= HISTC_SRCLINE
,
329 static char no_srcfile
[1];
331 static char *hist_entry__get_srcfile(struct hist_entry
*e
)
334 struct map
*map
= e
->ms
.map
;
339 sf
= __get_srcline(map
->dso
, map__rip_2objdump(map
, e
->ip
),
340 e
->ms
.sym
, false, true);
341 if (!strcmp(sf
, SRCLINE_UNKNOWN
))
353 sort__srcfile_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
356 left
->srcfile
= hist_entry__get_srcfile(left
);
358 right
->srcfile
= hist_entry__get_srcfile(right
);
360 return strcmp(right
->srcfile
, left
->srcfile
);
363 static int hist_entry__srcfile_snprintf(struct hist_entry
*he
, char *bf
,
364 size_t size
, unsigned int width
)
367 he
->srcfile
= hist_entry__get_srcfile(he
);
369 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->srcfile
);
372 struct sort_entry sort_srcfile
= {
373 .se_header
= "Source File",
374 .se_cmp
= sort__srcfile_cmp
,
375 .se_snprintf
= hist_entry__srcfile_snprintf
,
376 .se_width_idx
= HISTC_SRCFILE
,
382 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
384 struct symbol
*sym_l
= left
->parent
;
385 struct symbol
*sym_r
= right
->parent
;
387 if (!sym_l
|| !sym_r
)
388 return cmp_null(sym_l
, sym_r
);
390 return strcmp(sym_r
->name
, sym_l
->name
);
393 static int hist_entry__parent_snprintf(struct hist_entry
*he
, char *bf
,
394 size_t size
, unsigned int width
)
396 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
,
397 he
->parent
? he
->parent
->name
: "[other]");
400 struct sort_entry sort_parent
= {
401 .se_header
= "Parent symbol",
402 .se_cmp
= sort__parent_cmp
,
403 .se_snprintf
= hist_entry__parent_snprintf
,
404 .se_width_idx
= HISTC_PARENT
,
410 sort__cpu_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
412 return right
->cpu
- left
->cpu
;
415 static int hist_entry__cpu_snprintf(struct hist_entry
*he
, char *bf
,
416 size_t size
, unsigned int width
)
418 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
, he
->cpu
);
421 struct sort_entry sort_cpu
= {
423 .se_cmp
= sort__cpu_cmp
,
424 .se_snprintf
= hist_entry__cpu_snprintf
,
425 .se_width_idx
= HISTC_CPU
,
431 sort__socket_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
433 return right
->socket
- left
->socket
;
436 static int hist_entry__socket_snprintf(struct hist_entry
*he
, char *bf
,
437 size_t size
, unsigned int width
)
439 return repsep_snprintf(bf
, size
, "%*.*d", width
, width
-3, he
->socket
);
442 struct sort_entry sort_socket
= {
443 .se_header
= "Socket",
444 .se_cmp
= sort__socket_cmp
,
445 .se_snprintf
= hist_entry__socket_snprintf
,
446 .se_width_idx
= HISTC_SOCKET
,
451 static char *get_trace_output(struct hist_entry
*he
)
453 struct trace_seq seq
;
454 struct perf_evsel
*evsel
;
455 struct pevent_record rec
= {
456 .data
= he
->raw_data
,
457 .size
= he
->raw_size
,
460 evsel
= hists_to_evsel(he
->hists
);
462 trace_seq_init(&seq
);
463 if (symbol_conf
.raw_trace
) {
464 pevent_print_fields(&seq
, he
->raw_data
, he
->raw_size
,
467 pevent_event_info(&seq
, evsel
->tp_format
, &rec
);
473 sort__trace_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
475 struct perf_evsel
*evsel
;
477 evsel
= hists_to_evsel(left
->hists
);
478 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
481 if (left
->trace_output
== NULL
)
482 left
->trace_output
= get_trace_output(left
);
483 if (right
->trace_output
== NULL
)
484 right
->trace_output
= get_trace_output(right
);
486 return strcmp(right
->trace_output
, left
->trace_output
);
489 static int hist_entry__trace_snprintf(struct hist_entry
*he
, char *bf
,
490 size_t size
, unsigned int width
)
492 struct perf_evsel
*evsel
;
494 evsel
= hists_to_evsel(he
->hists
);
495 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
496 return scnprintf(bf
, size
, "%-.*s", width
, "N/A");
498 if (he
->trace_output
== NULL
)
499 he
->trace_output
= get_trace_output(he
);
500 return repsep_snprintf(bf
, size
, "%-.*s", width
, he
->trace_output
);
503 struct sort_entry sort_trace
= {
504 .se_header
= "Trace output",
505 .se_cmp
= sort__trace_cmp
,
506 .se_snprintf
= hist_entry__trace_snprintf
,
507 .se_width_idx
= HISTC_TRACE
,
510 /* sort keys for branch stacks */
513 sort__dso_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
515 if (!left
->branch_info
|| !right
->branch_info
)
516 return cmp_null(left
->branch_info
, right
->branch_info
);
518 return _sort__dso_cmp(left
->branch_info
->from
.map
,
519 right
->branch_info
->from
.map
);
522 static int hist_entry__dso_from_snprintf(struct hist_entry
*he
, char *bf
,
523 size_t size
, unsigned int width
)
526 return _hist_entry__dso_snprintf(he
->branch_info
->from
.map
,
529 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
533 sort__dso_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
535 if (!left
->branch_info
|| !right
->branch_info
)
536 return cmp_null(left
->branch_info
, right
->branch_info
);
538 return _sort__dso_cmp(left
->branch_info
->to
.map
,
539 right
->branch_info
->to
.map
);
542 static int hist_entry__dso_to_snprintf(struct hist_entry
*he
, char *bf
,
543 size_t size
, unsigned int width
)
546 return _hist_entry__dso_snprintf(he
->branch_info
->to
.map
,
549 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
553 sort__sym_from_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
555 struct addr_map_symbol
*from_l
= &left
->branch_info
->from
;
556 struct addr_map_symbol
*from_r
= &right
->branch_info
->from
;
558 if (!left
->branch_info
|| !right
->branch_info
)
559 return cmp_null(left
->branch_info
, right
->branch_info
);
561 from_l
= &left
->branch_info
->from
;
562 from_r
= &right
->branch_info
->from
;
564 if (!from_l
->sym
&& !from_r
->sym
)
565 return _sort__addr_cmp(from_l
->addr
, from_r
->addr
);
567 return _sort__sym_cmp(from_l
->sym
, from_r
->sym
);
571 sort__sym_to_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
573 struct addr_map_symbol
*to_l
, *to_r
;
575 if (!left
->branch_info
|| !right
->branch_info
)
576 return cmp_null(left
->branch_info
, right
->branch_info
);
578 to_l
= &left
->branch_info
->to
;
579 to_r
= &right
->branch_info
->to
;
581 if (!to_l
->sym
&& !to_r
->sym
)
582 return _sort__addr_cmp(to_l
->addr
, to_r
->addr
);
584 return _sort__sym_cmp(to_l
->sym
, to_r
->sym
);
587 static int hist_entry__sym_from_snprintf(struct hist_entry
*he
, char *bf
,
588 size_t size
, unsigned int width
)
590 if (he
->branch_info
) {
591 struct addr_map_symbol
*from
= &he
->branch_info
->from
;
593 return _hist_entry__sym_snprintf(from
->map
, from
->sym
, from
->addr
,
594 he
->level
, bf
, size
, width
);
597 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
600 static int hist_entry__sym_to_snprintf(struct hist_entry
*he
, char *bf
,
601 size_t size
, unsigned int width
)
603 if (he
->branch_info
) {
604 struct addr_map_symbol
*to
= &he
->branch_info
->to
;
606 return _hist_entry__sym_snprintf(to
->map
, to
->sym
, to
->addr
,
607 he
->level
, bf
, size
, width
);
610 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, "N/A");
613 struct sort_entry sort_dso_from
= {
614 .se_header
= "Source Shared Object",
615 .se_cmp
= sort__dso_from_cmp
,
616 .se_snprintf
= hist_entry__dso_from_snprintf
,
617 .se_width_idx
= HISTC_DSO_FROM
,
620 struct sort_entry sort_dso_to
= {
621 .se_header
= "Target Shared Object",
622 .se_cmp
= sort__dso_to_cmp
,
623 .se_snprintf
= hist_entry__dso_to_snprintf
,
624 .se_width_idx
= HISTC_DSO_TO
,
627 struct sort_entry sort_sym_from
= {
628 .se_header
= "Source Symbol",
629 .se_cmp
= sort__sym_from_cmp
,
630 .se_snprintf
= hist_entry__sym_from_snprintf
,
631 .se_width_idx
= HISTC_SYMBOL_FROM
,
634 struct sort_entry sort_sym_to
= {
635 .se_header
= "Target Symbol",
636 .se_cmp
= sort__sym_to_cmp
,
637 .se_snprintf
= hist_entry__sym_to_snprintf
,
638 .se_width_idx
= HISTC_SYMBOL_TO
,
642 sort__mispredict_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
646 if (!left
->branch_info
|| !right
->branch_info
)
647 return cmp_null(left
->branch_info
, right
->branch_info
);
649 mp
= left
->branch_info
->flags
.mispred
!= right
->branch_info
->flags
.mispred
;
650 p
= left
->branch_info
->flags
.predicted
!= right
->branch_info
->flags
.predicted
;
654 static int hist_entry__mispredict_snprintf(struct hist_entry
*he
, char *bf
,
655 size_t size
, unsigned int width
){
656 static const char *out
= "N/A";
658 if (he
->branch_info
) {
659 if (he
->branch_info
->flags
.predicted
)
661 else if (he
->branch_info
->flags
.mispred
)
665 return repsep_snprintf(bf
, size
, "%-*.*s", width
, width
, out
);
669 sort__cycles_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
671 return left
->branch_info
->flags
.cycles
-
672 right
->branch_info
->flags
.cycles
;
675 static int hist_entry__cycles_snprintf(struct hist_entry
*he
, char *bf
,
676 size_t size
, unsigned int width
)
678 if (he
->branch_info
->flags
.cycles
== 0)
679 return repsep_snprintf(bf
, size
, "%-*s", width
, "-");
680 return repsep_snprintf(bf
, size
, "%-*hd", width
,
681 he
->branch_info
->flags
.cycles
);
684 struct sort_entry sort_cycles
= {
685 .se_header
= "Basic Block Cycles",
686 .se_cmp
= sort__cycles_cmp
,
687 .se_snprintf
= hist_entry__cycles_snprintf
,
688 .se_width_idx
= HISTC_CYCLES
,
691 /* --sort daddr_sym */
693 sort__daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
695 uint64_t l
= 0, r
= 0;
698 l
= left
->mem_info
->daddr
.addr
;
700 r
= right
->mem_info
->daddr
.addr
;
702 return (int64_t)(r
- l
);
705 static int hist_entry__daddr_snprintf(struct hist_entry
*he
, char *bf
,
706 size_t size
, unsigned int width
)
709 struct map
*map
= NULL
;
710 struct symbol
*sym
= NULL
;
713 addr
= he
->mem_info
->daddr
.addr
;
714 map
= he
->mem_info
->daddr
.map
;
715 sym
= he
->mem_info
->daddr
.sym
;
717 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
722 sort__iaddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
724 uint64_t l
= 0, r
= 0;
727 l
= left
->mem_info
->iaddr
.addr
;
729 r
= right
->mem_info
->iaddr
.addr
;
731 return (int64_t)(r
- l
);
734 static int hist_entry__iaddr_snprintf(struct hist_entry
*he
, char *bf
,
735 size_t size
, unsigned int width
)
738 struct map
*map
= NULL
;
739 struct symbol
*sym
= NULL
;
742 addr
= he
->mem_info
->iaddr
.addr
;
743 map
= he
->mem_info
->iaddr
.map
;
744 sym
= he
->mem_info
->iaddr
.sym
;
746 return _hist_entry__sym_snprintf(map
, sym
, addr
, he
->level
, bf
, size
,
751 sort__dso_daddr_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
753 struct map
*map_l
= NULL
;
754 struct map
*map_r
= NULL
;
757 map_l
= left
->mem_info
->daddr
.map
;
759 map_r
= right
->mem_info
->daddr
.map
;
761 return _sort__dso_cmp(map_l
, map_r
);
764 static int hist_entry__dso_daddr_snprintf(struct hist_entry
*he
, char *bf
,
765 size_t size
, unsigned int width
)
767 struct map
*map
= NULL
;
770 map
= he
->mem_info
->daddr
.map
;
772 return _hist_entry__dso_snprintf(map
, bf
, size
, width
);
776 sort__locked_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
778 union perf_mem_data_src data_src_l
;
779 union perf_mem_data_src data_src_r
;
782 data_src_l
= left
->mem_info
->data_src
;
784 data_src_l
.mem_lock
= PERF_MEM_LOCK_NA
;
787 data_src_r
= right
->mem_info
->data_src
;
789 data_src_r
.mem_lock
= PERF_MEM_LOCK_NA
;
791 return (int64_t)(data_src_r
.mem_lock
- data_src_l
.mem_lock
);
794 static int hist_entry__locked_snprintf(struct hist_entry
*he
, char *bf
,
795 size_t size
, unsigned int width
)
798 u64 mask
= PERF_MEM_LOCK_NA
;
801 mask
= he
->mem_info
->data_src
.mem_lock
;
803 if (mask
& PERF_MEM_LOCK_NA
)
805 else if (mask
& PERF_MEM_LOCK_LOCKED
)
810 return repsep_snprintf(bf
, size
, "%.*s", width
, out
);
814 sort__tlb_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
816 union perf_mem_data_src data_src_l
;
817 union perf_mem_data_src data_src_r
;
820 data_src_l
= left
->mem_info
->data_src
;
822 data_src_l
.mem_dtlb
= PERF_MEM_TLB_NA
;
825 data_src_r
= right
->mem_info
->data_src
;
827 data_src_r
.mem_dtlb
= PERF_MEM_TLB_NA
;
829 return (int64_t)(data_src_r
.mem_dtlb
- data_src_l
.mem_dtlb
);
832 static const char * const tlb_access
[] = {
841 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
843 static int hist_entry__tlb_snprintf(struct hist_entry
*he
, char *bf
,
844 size_t size
, unsigned int width
)
847 size_t sz
= sizeof(out
) - 1; /* -1 for null termination */
849 u64 m
= PERF_MEM_TLB_NA
;
855 m
= he
->mem_info
->data_src
.mem_dtlb
;
857 hit
= m
& PERF_MEM_TLB_HIT
;
858 miss
= m
& PERF_MEM_TLB_MISS
;
860 /* already taken care of */
861 m
&= ~(PERF_MEM_TLB_HIT
|PERF_MEM_TLB_MISS
);
863 for (i
= 0; m
&& i
< NUM_TLB_ACCESS
; i
++, m
>>= 1) {
870 strncat(out
, tlb_access
[i
], sz
- l
);
871 l
+= strlen(tlb_access
[i
]);
876 strncat(out
, " hit", sz
- l
);
878 strncat(out
, " miss", sz
- l
);
880 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
884 sort__lvl_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
886 union perf_mem_data_src data_src_l
;
887 union perf_mem_data_src data_src_r
;
890 data_src_l
= left
->mem_info
->data_src
;
892 data_src_l
.mem_lvl
= PERF_MEM_LVL_NA
;
895 data_src_r
= right
->mem_info
->data_src
;
897 data_src_r
.mem_lvl
= PERF_MEM_LVL_NA
;
899 return (int64_t)(data_src_r
.mem_lvl
- data_src_l
.mem_lvl
);
902 static const char * const mem_lvl
[] = {
911 "Remote RAM (1 hop)",
912 "Remote RAM (2 hops)",
913 "Remote Cache (1 hop)",
914 "Remote Cache (2 hops)",
918 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
920 static int hist_entry__lvl_snprintf(struct hist_entry
*he
, char *bf
,
921 size_t size
, unsigned int width
)
924 size_t sz
= sizeof(out
) - 1; /* -1 for null termination */
926 u64 m
= PERF_MEM_LVL_NA
;
930 m
= he
->mem_info
->data_src
.mem_lvl
;
934 hit
= m
& PERF_MEM_LVL_HIT
;
935 miss
= m
& PERF_MEM_LVL_MISS
;
937 /* already taken care of */
938 m
&= ~(PERF_MEM_LVL_HIT
|PERF_MEM_LVL_MISS
);
940 for (i
= 0; m
&& i
< NUM_MEM_LVL
; i
++, m
>>= 1) {
947 strncat(out
, mem_lvl
[i
], sz
- l
);
948 l
+= strlen(mem_lvl
[i
]);
953 strncat(out
, " hit", sz
- l
);
955 strncat(out
, " miss", sz
- l
);
957 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
961 sort__snoop_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
963 union perf_mem_data_src data_src_l
;
964 union perf_mem_data_src data_src_r
;
967 data_src_l
= left
->mem_info
->data_src
;
969 data_src_l
.mem_snoop
= PERF_MEM_SNOOP_NA
;
972 data_src_r
= right
->mem_info
->data_src
;
974 data_src_r
.mem_snoop
= PERF_MEM_SNOOP_NA
;
976 return (int64_t)(data_src_r
.mem_snoop
- data_src_l
.mem_snoop
);
979 static const char * const snoop_access
[] = {
986 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
988 static int hist_entry__snoop_snprintf(struct hist_entry
*he
, char *bf
,
989 size_t size
, unsigned int width
)
992 size_t sz
= sizeof(out
) - 1; /* -1 for null termination */
994 u64 m
= PERF_MEM_SNOOP_NA
;
999 m
= he
->mem_info
->data_src
.mem_snoop
;
1001 for (i
= 0; m
&& i
< NUM_SNOOP_ACCESS
; i
++, m
>>= 1) {
1005 strcat(out
, " or ");
1008 strncat(out
, snoop_access
[i
], sz
- l
);
1009 l
+= strlen(snoop_access
[i
]);
1015 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1018 static inline u64
cl_address(u64 address
)
1020 /* return the cacheline of the address */
1021 return (address
& ~(cacheline_size
- 1));
1025 sort__dcacheline_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1028 struct map
*l_map
, *r_map
;
1030 if (!left
->mem_info
) return -1;
1031 if (!right
->mem_info
) return 1;
1033 /* group event types together */
1034 if (left
->cpumode
> right
->cpumode
) return -1;
1035 if (left
->cpumode
< right
->cpumode
) return 1;
1037 l_map
= left
->mem_info
->daddr
.map
;
1038 r_map
= right
->mem_info
->daddr
.map
;
1040 /* if both are NULL, jump to sort on al_addr instead */
1041 if (!l_map
&& !r_map
)
1044 if (!l_map
) return -1;
1045 if (!r_map
) return 1;
1047 if (l_map
->maj
> r_map
->maj
) return -1;
1048 if (l_map
->maj
< r_map
->maj
) return 1;
1050 if (l_map
->min
> r_map
->min
) return -1;
1051 if (l_map
->min
< r_map
->min
) return 1;
1053 if (l_map
->ino
> r_map
->ino
) return -1;
1054 if (l_map
->ino
< r_map
->ino
) return 1;
1056 if (l_map
->ino_generation
> r_map
->ino_generation
) return -1;
1057 if (l_map
->ino_generation
< r_map
->ino_generation
) return 1;
1060 * Addresses with no major/minor numbers are assumed to be
1061 * anonymous in userspace. Sort those on pid then address.
1063 * The kernel and non-zero major/minor mapped areas are
1064 * assumed to be unity mapped. Sort those on address.
1067 if ((left
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1068 (!(l_map
->flags
& MAP_SHARED
)) &&
1069 !l_map
->maj
&& !l_map
->min
&& !l_map
->ino
&&
1070 !l_map
->ino_generation
) {
1071 /* userspace anonymous */
1073 if (left
->thread
->pid_
> right
->thread
->pid_
) return -1;
1074 if (left
->thread
->pid_
< right
->thread
->pid_
) return 1;
1078 /* al_addr does all the right addr - start + offset calculations */
1079 l
= cl_address(left
->mem_info
->daddr
.al_addr
);
1080 r
= cl_address(right
->mem_info
->daddr
.al_addr
);
1082 if (l
> r
) return -1;
1083 if (l
< r
) return 1;
1088 static int hist_entry__dcacheline_snprintf(struct hist_entry
*he
, char *bf
,
1089 size_t size
, unsigned int width
)
1093 struct map
*map
= NULL
;
1094 struct symbol
*sym
= NULL
;
1095 char level
= he
->level
;
1098 addr
= cl_address(he
->mem_info
->daddr
.al_addr
);
1099 map
= he
->mem_info
->daddr
.map
;
1100 sym
= he
->mem_info
->daddr
.sym
;
1102 /* print [s] for shared data mmaps */
1103 if ((he
->cpumode
!= PERF_RECORD_MISC_KERNEL
) &&
1104 map
&& (map
->type
== MAP__VARIABLE
) &&
1105 (map
->flags
& MAP_SHARED
) &&
1106 (map
->maj
|| map
->min
|| map
->ino
||
1107 map
->ino_generation
))
1112 return _hist_entry__sym_snprintf(map
, sym
, addr
, level
, bf
, size
,
1116 struct sort_entry sort_mispredict
= {
1117 .se_header
= "Branch Mispredicted",
1118 .se_cmp
= sort__mispredict_cmp
,
1119 .se_snprintf
= hist_entry__mispredict_snprintf
,
1120 .se_width_idx
= HISTC_MISPREDICT
,
1123 static u64
he_weight(struct hist_entry
*he
)
1125 return he
->stat
.nr_events
? he
->stat
.weight
/ he
->stat
.nr_events
: 0;
1129 sort__local_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1131 return he_weight(left
) - he_weight(right
);
1134 static int hist_entry__local_weight_snprintf(struct hist_entry
*he
, char *bf
,
1135 size_t size
, unsigned int width
)
1137 return repsep_snprintf(bf
, size
, "%-*llu", width
, he_weight(he
));
1140 struct sort_entry sort_local_weight
= {
1141 .se_header
= "Local Weight",
1142 .se_cmp
= sort__local_weight_cmp
,
1143 .se_snprintf
= hist_entry__local_weight_snprintf
,
1144 .se_width_idx
= HISTC_LOCAL_WEIGHT
,
1148 sort__global_weight_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1150 return left
->stat
.weight
- right
->stat
.weight
;
1153 static int hist_entry__global_weight_snprintf(struct hist_entry
*he
, char *bf
,
1154 size_t size
, unsigned int width
)
1156 return repsep_snprintf(bf
, size
, "%-*llu", width
, he
->stat
.weight
);
1159 struct sort_entry sort_global_weight
= {
1160 .se_header
= "Weight",
1161 .se_cmp
= sort__global_weight_cmp
,
1162 .se_snprintf
= hist_entry__global_weight_snprintf
,
1163 .se_width_idx
= HISTC_GLOBAL_WEIGHT
,
1166 struct sort_entry sort_mem_daddr_sym
= {
1167 .se_header
= "Data Symbol",
1168 .se_cmp
= sort__daddr_cmp
,
1169 .se_snprintf
= hist_entry__daddr_snprintf
,
1170 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1173 struct sort_entry sort_mem_iaddr_sym
= {
1174 .se_header
= "Code Symbol",
1175 .se_cmp
= sort__iaddr_cmp
,
1176 .se_snprintf
= hist_entry__iaddr_snprintf
,
1177 .se_width_idx
= HISTC_MEM_IADDR_SYMBOL
,
1180 struct sort_entry sort_mem_daddr_dso
= {
1181 .se_header
= "Data Object",
1182 .se_cmp
= sort__dso_daddr_cmp
,
1183 .se_snprintf
= hist_entry__dso_daddr_snprintf
,
1184 .se_width_idx
= HISTC_MEM_DADDR_SYMBOL
,
1187 struct sort_entry sort_mem_locked
= {
1188 .se_header
= "Locked",
1189 .se_cmp
= sort__locked_cmp
,
1190 .se_snprintf
= hist_entry__locked_snprintf
,
1191 .se_width_idx
= HISTC_MEM_LOCKED
,
1194 struct sort_entry sort_mem_tlb
= {
1195 .se_header
= "TLB access",
1196 .se_cmp
= sort__tlb_cmp
,
1197 .se_snprintf
= hist_entry__tlb_snprintf
,
1198 .se_width_idx
= HISTC_MEM_TLB
,
1201 struct sort_entry sort_mem_lvl
= {
1202 .se_header
= "Memory access",
1203 .se_cmp
= sort__lvl_cmp
,
1204 .se_snprintf
= hist_entry__lvl_snprintf
,
1205 .se_width_idx
= HISTC_MEM_LVL
,
1208 struct sort_entry sort_mem_snoop
= {
1209 .se_header
= "Snoop",
1210 .se_cmp
= sort__snoop_cmp
,
1211 .se_snprintf
= hist_entry__snoop_snprintf
,
1212 .se_width_idx
= HISTC_MEM_SNOOP
,
1215 struct sort_entry sort_mem_dcacheline
= {
1216 .se_header
= "Data Cacheline",
1217 .se_cmp
= sort__dcacheline_cmp
,
1218 .se_snprintf
= hist_entry__dcacheline_snprintf
,
1219 .se_width_idx
= HISTC_MEM_DCACHELINE
,
1223 sort__abort_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1225 if (!left
->branch_info
|| !right
->branch_info
)
1226 return cmp_null(left
->branch_info
, right
->branch_info
);
1228 return left
->branch_info
->flags
.abort
!=
1229 right
->branch_info
->flags
.abort
;
1232 static int hist_entry__abort_snprintf(struct hist_entry
*he
, char *bf
,
1233 size_t size
, unsigned int width
)
1235 static const char *out
= "N/A";
1237 if (he
->branch_info
) {
1238 if (he
->branch_info
->flags
.abort
)
1244 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1247 struct sort_entry sort_abort
= {
1248 .se_header
= "Transaction abort",
1249 .se_cmp
= sort__abort_cmp
,
1250 .se_snprintf
= hist_entry__abort_snprintf
,
1251 .se_width_idx
= HISTC_ABORT
,
1255 sort__in_tx_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1257 if (!left
->branch_info
|| !right
->branch_info
)
1258 return cmp_null(left
->branch_info
, right
->branch_info
);
1260 return left
->branch_info
->flags
.in_tx
!=
1261 right
->branch_info
->flags
.in_tx
;
1264 static int hist_entry__in_tx_snprintf(struct hist_entry
*he
, char *bf
,
1265 size_t size
, unsigned int width
)
1267 static const char *out
= "N/A";
1269 if (he
->branch_info
) {
1270 if (he
->branch_info
->flags
.in_tx
)
1276 return repsep_snprintf(bf
, size
, "%-*s", width
, out
);
1279 struct sort_entry sort_in_tx
= {
1280 .se_header
= "Branch in transaction",
1281 .se_cmp
= sort__in_tx_cmp
,
1282 .se_snprintf
= hist_entry__in_tx_snprintf
,
1283 .se_width_idx
= HISTC_IN_TX
,
1287 sort__transaction_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1289 return left
->transaction
- right
->transaction
;
1292 static inline char *add_str(char *p
, const char *str
)
1295 return p
+ strlen(str
);
1298 static struct txbit
{
1303 { PERF_TXN_ELISION
, "EL ", 0 },
1304 { PERF_TXN_TRANSACTION
, "TX ", 1 },
1305 { PERF_TXN_SYNC
, "SYNC ", 1 },
1306 { PERF_TXN_ASYNC
, "ASYNC ", 0 },
1307 { PERF_TXN_RETRY
, "RETRY ", 0 },
1308 { PERF_TXN_CONFLICT
, "CON ", 0 },
1309 { PERF_TXN_CAPACITY_WRITE
, "CAP-WRITE ", 1 },
1310 { PERF_TXN_CAPACITY_READ
, "CAP-READ ", 0 },
1314 int hist_entry__transaction_len(void)
1319 for (i
= 0; txbits
[i
].name
; i
++) {
1320 if (!txbits
[i
].skip_for_len
)
1321 len
+= strlen(txbits
[i
].name
);
1323 len
+= 4; /* :XX<space> */
1327 static int hist_entry__transaction_snprintf(struct hist_entry
*he
, char *bf
,
1328 size_t size
, unsigned int width
)
1330 u64 t
= he
->transaction
;
1336 for (i
= 0; txbits
[i
].name
; i
++)
1337 if (txbits
[i
].flag
& t
)
1338 p
= add_str(p
, txbits
[i
].name
);
1339 if (t
&& !(t
& (PERF_TXN_SYNC
|PERF_TXN_ASYNC
)))
1340 p
= add_str(p
, "NEITHER ");
1341 if (t
& PERF_TXN_ABORT_MASK
) {
1342 sprintf(p
, ":%" PRIx64
,
1343 (t
& PERF_TXN_ABORT_MASK
) >>
1344 PERF_TXN_ABORT_SHIFT
);
1348 return repsep_snprintf(bf
, size
, "%-*s", width
, buf
);
1351 struct sort_entry sort_transaction
= {
1352 .se_header
= "Transaction ",
1353 .se_cmp
= sort__transaction_cmp
,
1354 .se_snprintf
= hist_entry__transaction_snprintf
,
1355 .se_width_idx
= HISTC_TRANSACTION
,
1358 struct sort_dimension
{
1360 struct sort_entry
*entry
;
1364 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1366 static struct sort_dimension common_sort_dimensions
[] = {
1367 DIM(SORT_PID
, "pid", sort_thread
),
1368 DIM(SORT_COMM
, "comm", sort_comm
),
1369 DIM(SORT_DSO
, "dso", sort_dso
),
1370 DIM(SORT_SYM
, "symbol", sort_sym
),
1371 DIM(SORT_PARENT
, "parent", sort_parent
),
1372 DIM(SORT_CPU
, "cpu", sort_cpu
),
1373 DIM(SORT_SOCKET
, "socket", sort_socket
),
1374 DIM(SORT_SRCLINE
, "srcline", sort_srcline
),
1375 DIM(SORT_SRCFILE
, "srcfile", sort_srcfile
),
1376 DIM(SORT_LOCAL_WEIGHT
, "local_weight", sort_local_weight
),
1377 DIM(SORT_GLOBAL_WEIGHT
, "weight", sort_global_weight
),
1378 DIM(SORT_TRANSACTION
, "transaction", sort_transaction
),
1379 DIM(SORT_TRACE
, "trace", sort_trace
),
1384 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1386 static struct sort_dimension bstack_sort_dimensions
[] = {
1387 DIM(SORT_DSO_FROM
, "dso_from", sort_dso_from
),
1388 DIM(SORT_DSO_TO
, "dso_to", sort_dso_to
),
1389 DIM(SORT_SYM_FROM
, "symbol_from", sort_sym_from
),
1390 DIM(SORT_SYM_TO
, "symbol_to", sort_sym_to
),
1391 DIM(SORT_MISPREDICT
, "mispredict", sort_mispredict
),
1392 DIM(SORT_IN_TX
, "in_tx", sort_in_tx
),
1393 DIM(SORT_ABORT
, "abort", sort_abort
),
1394 DIM(SORT_CYCLES
, "cycles", sort_cycles
),
1399 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1401 static struct sort_dimension memory_sort_dimensions
[] = {
1402 DIM(SORT_MEM_DADDR_SYMBOL
, "symbol_daddr", sort_mem_daddr_sym
),
1403 DIM(SORT_MEM_IADDR_SYMBOL
, "symbol_iaddr", sort_mem_iaddr_sym
),
1404 DIM(SORT_MEM_DADDR_DSO
, "dso_daddr", sort_mem_daddr_dso
),
1405 DIM(SORT_MEM_LOCKED
, "locked", sort_mem_locked
),
1406 DIM(SORT_MEM_TLB
, "tlb", sort_mem_tlb
),
1407 DIM(SORT_MEM_LVL
, "mem", sort_mem_lvl
),
1408 DIM(SORT_MEM_SNOOP
, "snoop", sort_mem_snoop
),
1409 DIM(SORT_MEM_DCACHELINE
, "dcacheline", sort_mem_dcacheline
),
1414 struct hpp_dimension
{
1416 struct perf_hpp_fmt
*fmt
;
1420 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1422 static struct hpp_dimension hpp_sort_dimensions
[] = {
1423 DIM(PERF_HPP__OVERHEAD
, "overhead"),
1424 DIM(PERF_HPP__OVERHEAD_SYS
, "overhead_sys"),
1425 DIM(PERF_HPP__OVERHEAD_US
, "overhead_us"),
1426 DIM(PERF_HPP__OVERHEAD_GUEST_SYS
, "overhead_guest_sys"),
1427 DIM(PERF_HPP__OVERHEAD_GUEST_US
, "overhead_guest_us"),
1428 DIM(PERF_HPP__OVERHEAD_ACC
, "overhead_children"),
1429 DIM(PERF_HPP__SAMPLES
, "sample"),
1430 DIM(PERF_HPP__PERIOD
, "period"),
1435 struct hpp_sort_entry
{
1436 struct perf_hpp_fmt hpp
;
1437 struct sort_entry
*se
;
1440 void perf_hpp__reset_sort_width(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1442 struct hpp_sort_entry
*hse
;
1444 if (!perf_hpp__is_sort_entry(fmt
))
1447 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1448 hists__new_col_len(hists
, hse
->se
->se_width_idx
, strlen(fmt
->name
));
1451 static int __sort__hpp_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1452 struct perf_evsel
*evsel
)
1454 struct hpp_sort_entry
*hse
;
1455 size_t len
= fmt
->user_len
;
1457 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1460 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1462 return scnprintf(hpp
->buf
, hpp
->size
, "%-*.*s", len
, len
, fmt
->name
);
1465 static int __sort__hpp_width(struct perf_hpp_fmt
*fmt
,
1466 struct perf_hpp
*hpp __maybe_unused
,
1467 struct perf_evsel
*evsel
)
1469 struct hpp_sort_entry
*hse
;
1470 size_t len
= fmt
->user_len
;
1472 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1475 len
= hists__col_len(evsel__hists(evsel
), hse
->se
->se_width_idx
);
1480 static int __sort__hpp_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1481 struct hist_entry
*he
)
1483 struct hpp_sort_entry
*hse
;
1484 size_t len
= fmt
->user_len
;
1486 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1489 len
= hists__col_len(he
->hists
, hse
->se
->se_width_idx
);
1491 return hse
->se
->se_snprintf(he
, hpp
->buf
, hpp
->size
, len
);
1494 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt
*fmt
,
1495 struct hist_entry
*a
, struct hist_entry
*b
)
1497 struct hpp_sort_entry
*hse
;
1499 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1500 return hse
->se
->se_cmp(a
, b
);
1503 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt
*fmt
,
1504 struct hist_entry
*a
, struct hist_entry
*b
)
1506 struct hpp_sort_entry
*hse
;
1507 int64_t (*collapse_fn
)(struct hist_entry
*, struct hist_entry
*);
1509 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1510 collapse_fn
= hse
->se
->se_collapse
?: hse
->se
->se_cmp
;
1511 return collapse_fn(a
, b
);
1514 static int64_t __sort__hpp_sort(struct perf_hpp_fmt
*fmt
,
1515 struct hist_entry
*a
, struct hist_entry
*b
)
1517 struct hpp_sort_entry
*hse
;
1518 int64_t (*sort_fn
)(struct hist_entry
*, struct hist_entry
*);
1520 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1521 sort_fn
= hse
->se
->se_sort
?: hse
->se
->se_cmp
;
1522 return sort_fn(a
, b
);
1525 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt
*format
)
1527 return format
->header
== __sort__hpp_header
;
1530 static bool __sort__hpp_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1532 struct hpp_sort_entry
*hse_a
;
1533 struct hpp_sort_entry
*hse_b
;
1535 if (!perf_hpp__is_sort_entry(a
) || !perf_hpp__is_sort_entry(b
))
1538 hse_a
= container_of(a
, struct hpp_sort_entry
, hpp
);
1539 hse_b
= container_of(b
, struct hpp_sort_entry
, hpp
);
1541 return hse_a
->se
== hse_b
->se
;
1544 static void hse_free(struct perf_hpp_fmt
*fmt
)
1546 struct hpp_sort_entry
*hse
;
1548 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
1552 static struct hpp_sort_entry
*
1553 __sort_dimension__alloc_hpp(struct sort_dimension
*sd
)
1555 struct hpp_sort_entry
*hse
;
1557 hse
= malloc(sizeof(*hse
));
1559 pr_err("Memory allocation failed\n");
1563 hse
->se
= sd
->entry
;
1564 hse
->hpp
.name
= sd
->entry
->se_header
;
1565 hse
->hpp
.header
= __sort__hpp_header
;
1566 hse
->hpp
.width
= __sort__hpp_width
;
1567 hse
->hpp
.entry
= __sort__hpp_entry
;
1568 hse
->hpp
.color
= NULL
;
1570 hse
->hpp
.cmp
= __sort__hpp_cmp
;
1571 hse
->hpp
.collapse
= __sort__hpp_collapse
;
1572 hse
->hpp
.sort
= __sort__hpp_sort
;
1573 hse
->hpp
.equal
= __sort__hpp_equal
;
1574 hse
->hpp
.free
= hse_free
;
1576 INIT_LIST_HEAD(&hse
->hpp
.list
);
1577 INIT_LIST_HEAD(&hse
->hpp
.sort_list
);
1578 hse
->hpp
.elide
= false;
1580 hse
->hpp
.user_len
= 0;
1585 static void hpp_free(struct perf_hpp_fmt
*fmt
)
1590 static struct perf_hpp_fmt
*__hpp_dimension__alloc_hpp(struct hpp_dimension
*hd
)
1592 struct perf_hpp_fmt
*fmt
;
1594 fmt
= memdup(hd
->fmt
, sizeof(*fmt
));
1596 INIT_LIST_HEAD(&fmt
->list
);
1597 INIT_LIST_HEAD(&fmt
->sort_list
);
1598 fmt
->free
= hpp_free
;
1604 static int __sort_dimension__add_hpp_sort(struct sort_dimension
*sd
)
1606 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
);
1611 perf_hpp__register_sort_field(&hse
->hpp
);
1615 static int __sort_dimension__add_hpp_output(struct perf_hpp_list
*list
,
1616 struct sort_dimension
*sd
)
1618 struct hpp_sort_entry
*hse
= __sort_dimension__alloc_hpp(sd
);
1623 perf_hpp_list__column_register(list
, &hse
->hpp
);
1627 struct hpp_dynamic_entry
{
1628 struct perf_hpp_fmt hpp
;
1629 struct perf_evsel
*evsel
;
1630 struct format_field
*field
;
1631 unsigned dynamic_len
;
1635 static int hde_width(struct hpp_dynamic_entry
*hde
)
1637 if (!hde
->hpp
.len
) {
1638 int len
= hde
->dynamic_len
;
1639 int namelen
= strlen(hde
->field
->name
);
1640 int fieldlen
= hde
->field
->size
;
1645 if (!(hde
->field
->flags
& FIELD_IS_STRING
)) {
1646 /* length for print hex numbers */
1647 fieldlen
= hde
->field
->size
* 2 + 2;
1654 return hde
->hpp
.len
;
1657 static void update_dynamic_len(struct hpp_dynamic_entry
*hde
,
1658 struct hist_entry
*he
)
1661 struct format_field
*field
= hde
->field
;
1668 /* parse pretty print result and update max length */
1669 if (!he
->trace_output
)
1670 he
->trace_output
= get_trace_output(he
);
1672 namelen
= strlen(field
->name
);
1673 str
= he
->trace_output
;
1676 pos
= strchr(str
, ' ');
1679 pos
= str
+ strlen(str
);
1682 if (!strncmp(str
, field
->name
, namelen
)) {
1688 if (len
> hde
->dynamic_len
)
1689 hde
->dynamic_len
= len
;
1700 static int __sort__hde_header(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1701 struct perf_evsel
*evsel __maybe_unused
)
1703 struct hpp_dynamic_entry
*hde
;
1704 size_t len
= fmt
->user_len
;
1706 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1709 len
= hde_width(hde
);
1711 return scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, hde
->field
->name
);
1714 static int __sort__hde_width(struct perf_hpp_fmt
*fmt
,
1715 struct perf_hpp
*hpp __maybe_unused
,
1716 struct perf_evsel
*evsel __maybe_unused
)
1718 struct hpp_dynamic_entry
*hde
;
1719 size_t len
= fmt
->user_len
;
1721 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1724 len
= hde_width(hde
);
1729 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
1731 struct hpp_dynamic_entry
*hde
;
1733 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1735 return hists_to_evsel(hists
) == hde
->evsel
;
1738 static int __sort__hde_entry(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
1739 struct hist_entry
*he
)
1741 struct hpp_dynamic_entry
*hde
;
1742 size_t len
= fmt
->user_len
;
1744 struct format_field
*field
;
1749 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1752 len
= hde_width(hde
);
1758 namelen
= strlen(field
->name
);
1759 str
= he
->trace_output
;
1762 pos
= strchr(str
, ' ');
1765 pos
= str
+ strlen(str
);
1768 if (!strncmp(str
, field
->name
, namelen
)) {
1770 str
= strndup(str
, pos
- str
);
1773 return scnprintf(hpp
->buf
, hpp
->size
,
1774 "%*.*s", len
, len
, "ERROR");
1785 struct trace_seq seq
;
1787 trace_seq_init(&seq
);
1788 pevent_print_field(&seq
, he
->raw_data
, hde
->field
);
1792 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*.*s", len
, len
, str
);
1797 static int64_t __sort__hde_cmp(struct perf_hpp_fmt
*fmt
,
1798 struct hist_entry
*a
, struct hist_entry
*b
)
1800 struct hpp_dynamic_entry
*hde
;
1801 struct format_field
*field
;
1802 unsigned offset
, size
;
1804 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1807 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1808 unsigned long long dyn
;
1810 pevent_read_number_field(field
, a
->raw_data
, &dyn
);
1811 offset
= dyn
& 0xffff;
1812 size
= (dyn
>> 16) & 0xffff;
1814 /* record max width for output */
1815 if (size
> hde
->dynamic_len
)
1816 hde
->dynamic_len
= size
;
1818 offset
= field
->offset
;
1821 update_dynamic_len(hde
, a
);
1822 update_dynamic_len(hde
, b
);
1825 return memcmp(a
->raw_data
+ offset
, b
->raw_data
+ offset
, size
);
1828 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt
*fmt
)
1830 return fmt
->cmp
== __sort__hde_cmp
;
1833 static bool __sort__hde_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
1835 struct hpp_dynamic_entry
*hde_a
;
1836 struct hpp_dynamic_entry
*hde_b
;
1838 if (!perf_hpp__is_dynamic_entry(a
) || !perf_hpp__is_dynamic_entry(b
))
1841 hde_a
= container_of(a
, struct hpp_dynamic_entry
, hpp
);
1842 hde_b
= container_of(b
, struct hpp_dynamic_entry
, hpp
);
1844 return hde_a
->field
== hde_b
->field
;
1847 static void hde_free(struct perf_hpp_fmt
*fmt
)
1849 struct hpp_dynamic_entry
*hde
;
1851 hde
= container_of(fmt
, struct hpp_dynamic_entry
, hpp
);
1855 static struct hpp_dynamic_entry
*
1856 __alloc_dynamic_entry(struct perf_evsel
*evsel
, struct format_field
*field
)
1858 struct hpp_dynamic_entry
*hde
;
1860 hde
= malloc(sizeof(*hde
));
1862 pr_debug("Memory allocation failed\n");
1868 hde
->dynamic_len
= 0;
1870 hde
->hpp
.name
= field
->name
;
1871 hde
->hpp
.header
= __sort__hde_header
;
1872 hde
->hpp
.width
= __sort__hde_width
;
1873 hde
->hpp
.entry
= __sort__hde_entry
;
1874 hde
->hpp
.color
= NULL
;
1876 hde
->hpp
.cmp
= __sort__hde_cmp
;
1877 hde
->hpp
.collapse
= __sort__hde_cmp
;
1878 hde
->hpp
.sort
= __sort__hde_cmp
;
1879 hde
->hpp
.equal
= __sort__hde_equal
;
1880 hde
->hpp
.free
= hde_free
;
1882 INIT_LIST_HEAD(&hde
->hpp
.list
);
1883 INIT_LIST_HEAD(&hde
->hpp
.sort_list
);
1884 hde
->hpp
.elide
= false;
1886 hde
->hpp
.user_len
= 0;
1891 static int parse_field_name(char *str
, char **event
, char **field
, char **opt
)
1893 char *event_name
, *field_name
, *opt_name
;
1896 field_name
= strchr(str
, '.');
1899 *field_name
++ = '\0';
1905 opt_name
= strchr(field_name
, '/');
1909 *event
= event_name
;
1910 *field
= field_name
;
1916 /* find match evsel using a given event name. The event name can be:
1917 * 1. '%' + event index (e.g. '%1' for first event)
1918 * 2. full event name (e.g. sched:sched_switch)
1919 * 3. partial event name (should not contain ':')
1921 static struct perf_evsel
*find_evsel(struct perf_evlist
*evlist
, char *event_name
)
1923 struct perf_evsel
*evsel
= NULL
;
1924 struct perf_evsel
*pos
;
1928 if (event_name
[0] == '%') {
1929 int nr
= strtol(event_name
+1, NULL
, 0);
1931 if (nr
> evlist
->nr_entries
)
1934 evsel
= perf_evlist__first(evlist
);
1936 evsel
= perf_evsel__next(evsel
);
1941 full_name
= !!strchr(event_name
, ':');
1942 evlist__for_each(evlist
, pos
) {
1944 if (full_name
&& !strcmp(pos
->name
, event_name
))
1947 if (!full_name
&& strstr(pos
->name
, event_name
)) {
1949 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1950 event_name
, evsel
->name
, pos
->name
);
1960 static int __dynamic_dimension__add(struct perf_evsel
*evsel
,
1961 struct format_field
*field
,
1964 struct hpp_dynamic_entry
*hde
;
1966 hde
= __alloc_dynamic_entry(evsel
, field
);
1970 hde
->raw_trace
= raw_trace
;
1972 perf_hpp__register_sort_field(&hde
->hpp
);
1976 static int add_evsel_fields(struct perf_evsel
*evsel
, bool raw_trace
)
1979 struct format_field
*field
;
1981 field
= evsel
->tp_format
->format
.fields
;
1983 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
);
1987 field
= field
->next
;
1992 static int add_all_dynamic_fields(struct perf_evlist
*evlist
, bool raw_trace
)
1995 struct perf_evsel
*evsel
;
1997 evlist__for_each(evlist
, evsel
) {
1998 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2001 ret
= add_evsel_fields(evsel
, raw_trace
);
2008 static int add_all_matching_fields(struct perf_evlist
*evlist
,
2009 char *field_name
, bool raw_trace
)
2012 struct perf_evsel
*evsel
;
2013 struct format_field
*field
;
2015 evlist__for_each(evlist
, evsel
) {
2016 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2019 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2023 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
);
2030 static int add_dynamic_entry(struct perf_evlist
*evlist
, const char *tok
)
2032 char *str
, *event_name
, *field_name
, *opt_name
;
2033 struct perf_evsel
*evsel
;
2034 struct format_field
*field
;
2035 bool raw_trace
= symbol_conf
.raw_trace
;
2045 if (parse_field_name(str
, &event_name
, &field_name
, &opt_name
) < 0) {
2051 if (strcmp(opt_name
, "raw")) {
2052 pr_debug("unsupported field option %s\n", opt_name
);
2059 if (!strcmp(field_name
, "trace_fields")) {
2060 ret
= add_all_dynamic_fields(evlist
, raw_trace
);
2064 if (event_name
== NULL
) {
2065 ret
= add_all_matching_fields(evlist
, field_name
, raw_trace
);
2069 evsel
= find_evsel(evlist
, event_name
);
2070 if (evsel
== NULL
) {
2071 pr_debug("Cannot find event: %s\n", event_name
);
2076 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2077 pr_debug("%s is not a tracepoint event\n", event_name
);
2082 if (!strcmp(field_name
, "*")) {
2083 ret
= add_evsel_fields(evsel
, raw_trace
);
2085 field
= pevent_find_any_field(evsel
->tp_format
, field_name
);
2086 if (field
== NULL
) {
2087 pr_debug("Cannot find event field for %s.%s\n",
2088 event_name
, field_name
);
2092 ret
= __dynamic_dimension__add(evsel
, field
, raw_trace
);
2100 static int __sort_dimension__add(struct sort_dimension
*sd
)
2105 if (__sort_dimension__add_hpp_sort(sd
) < 0)
2108 if (sd
->entry
->se_collapse
)
2109 sort__need_collapse
= 1;
2116 static int __hpp_dimension__add(struct hpp_dimension
*hd
)
2118 struct perf_hpp_fmt
*fmt
;
2123 fmt
= __hpp_dimension__alloc_hpp(hd
);
2128 perf_hpp__register_sort_field(fmt
);
2132 static int __sort_dimension__add_output(struct perf_hpp_list
*list
,
2133 struct sort_dimension
*sd
)
2138 if (__sort_dimension__add_hpp_output(list
, sd
) < 0)
2145 static int __hpp_dimension__add_output(struct perf_hpp_list
*list
,
2146 struct hpp_dimension
*hd
)
2148 struct perf_hpp_fmt
*fmt
;
2153 fmt
= __hpp_dimension__alloc_hpp(hd
);
2158 perf_hpp_list__column_register(list
, fmt
);
2162 int hpp_dimension__add_output(unsigned col
)
2164 BUG_ON(col
>= PERF_HPP__MAX_INDEX
);
2165 return __hpp_dimension__add_output(&perf_hpp_list
, &hpp_sort_dimensions
[col
]);
2168 static int sort_dimension__add(const char *tok
,
2169 struct perf_evlist
*evlist __maybe_unused
)
2173 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2174 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2176 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2179 if (sd
->entry
== &sort_parent
) {
2180 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
2184 regerror(ret
, &parent_regex
, err
, sizeof(err
));
2185 pr_err("Invalid regex: %s\n%s", parent_pattern
, err
);
2188 sort__has_parent
= 1;
2189 } else if (sd
->entry
== &sort_sym
) {
2192 * perf diff displays the performance difference amongst
2193 * two or more perf.data files. Those files could come
2194 * from different binaries. So we should not compare
2195 * their ips, but the name of symbol.
2197 if (sort__mode
== SORT_MODE__DIFF
)
2198 sd
->entry
->se_collapse
= sort__sym_sort
;
2200 } else if (sd
->entry
== &sort_dso
) {
2202 } else if (sd
->entry
== &sort_socket
) {
2203 sort__has_socket
= 1;
2204 } else if (sd
->entry
== &sort_thread
) {
2205 sort__has_thread
= 1;
2208 return __sort_dimension__add(sd
);
2211 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2212 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2214 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2217 return __hpp_dimension__add(hd
);
2220 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2221 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2223 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2226 if (sort__mode
!= SORT_MODE__BRANCH
)
2229 if (sd
->entry
== &sort_sym_from
|| sd
->entry
== &sort_sym_to
)
2232 __sort_dimension__add(sd
);
2236 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2237 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2239 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2242 if (sort__mode
!= SORT_MODE__MEMORY
)
2245 if (sd
->entry
== &sort_mem_daddr_sym
)
2248 __sort_dimension__add(sd
);
2252 if (!add_dynamic_entry(evlist
, tok
))
2258 static int setup_sort_list(char *str
, struct perf_evlist
*evlist
)
2263 for (tok
= strtok_r(str
, ", ", &tmp
);
2264 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2265 ret
= sort_dimension__add(tok
, evlist
);
2266 if (ret
== -EINVAL
) {
2267 error("Invalid --sort key: `%s'", tok
);
2269 } else if (ret
== -ESRCH
) {
2270 error("Unknown --sort key: `%s'", tok
);
2278 static const char *get_default_sort_order(struct perf_evlist
*evlist
)
2280 const char *default_sort_orders
[] = {
2282 default_branch_sort_order
,
2283 default_mem_sort_order
,
2284 default_top_sort_order
,
2285 default_diff_sort_order
,
2286 default_tracepoint_sort_order
,
2288 bool use_trace
= true;
2289 struct perf_evsel
*evsel
;
2291 BUG_ON(sort__mode
>= ARRAY_SIZE(default_sort_orders
));
2296 evlist__for_each(evlist
, evsel
) {
2297 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2304 sort__mode
= SORT_MODE__TRACEPOINT
;
2305 if (symbol_conf
.raw_trace
)
2306 return "trace_fields";
2309 return default_sort_orders
[sort__mode
];
2312 static int setup_sort_order(struct perf_evlist
*evlist
)
2314 char *new_sort_order
;
2317 * Append '+'-prefixed sort order to the default sort
2320 if (!sort_order
|| is_strict_order(sort_order
))
2323 if (sort_order
[1] == '\0') {
2324 error("Invalid --sort key: `+'");
2329 * We allocate new sort_order string, but we never free it,
2330 * because it's checked over the rest of the code.
2332 if (asprintf(&new_sort_order
, "%s,%s",
2333 get_default_sort_order(evlist
), sort_order
+ 1) < 0) {
2334 error("Not enough memory to set up --sort");
2338 sort_order
= new_sort_order
;
2343 * Adds 'pre,' prefix into 'str' is 'pre' is
2344 * not already part of 'str'.
2346 static char *prefix_if_not_in(const char *pre
, char *str
)
2350 if (!str
|| strstr(str
, pre
))
2353 if (asprintf(&n
, "%s,%s", pre
, str
) < 0)
2360 static char *setup_overhead(char *keys
)
2362 keys
= prefix_if_not_in("overhead", keys
);
2364 if (symbol_conf
.cumulate_callchain
)
2365 keys
= prefix_if_not_in("overhead_children", keys
);
2370 static int __setup_sorting(struct perf_evlist
*evlist
)
2373 const char *sort_keys
;
2376 ret
= setup_sort_order(evlist
);
2380 sort_keys
= sort_order
;
2381 if (sort_keys
== NULL
) {
2382 if (is_strict_order(field_order
)) {
2384 * If user specified field order but no sort order,
2385 * we'll honor it and not add default sort orders.
2390 sort_keys
= get_default_sort_order(evlist
);
2393 str
= strdup(sort_keys
);
2395 error("Not enough memory to setup sort keys");
2400 * Prepend overhead fields for backward compatibility.
2402 if (!is_strict_order(field_order
)) {
2403 str
= setup_overhead(str
);
2405 error("Not enough memory to setup overhead keys");
2410 ret
= setup_sort_list(str
, evlist
);
2416 void perf_hpp__set_elide(int idx
, bool elide
)
2418 struct perf_hpp_fmt
*fmt
;
2419 struct hpp_sort_entry
*hse
;
2421 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2422 if (!perf_hpp__is_sort_entry(fmt
))
2425 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2426 if (hse
->se
->se_width_idx
== idx
) {
2433 static bool __get_elide(struct strlist
*list
, const char *list_name
, FILE *fp
)
2435 if (list
&& strlist__nr_entries(list
) == 1) {
2437 fprintf(fp
, "# %s: %s\n", list_name
,
2438 strlist__entry(list
, 0)->s
);
2444 static bool get_elide(int idx
, FILE *output
)
2448 return __get_elide(symbol_conf
.sym_list
, "symbol", output
);
2450 return __get_elide(symbol_conf
.dso_list
, "dso", output
);
2452 return __get_elide(symbol_conf
.comm_list
, "comm", output
);
2457 if (sort__mode
!= SORT_MODE__BRANCH
)
2461 case HISTC_SYMBOL_FROM
:
2462 return __get_elide(symbol_conf
.sym_from_list
, "sym_from", output
);
2463 case HISTC_SYMBOL_TO
:
2464 return __get_elide(symbol_conf
.sym_to_list
, "sym_to", output
);
2465 case HISTC_DSO_FROM
:
2466 return __get_elide(symbol_conf
.dso_from_list
, "dso_from", output
);
2468 return __get_elide(symbol_conf
.dso_to_list
, "dso_to", output
);
2476 void sort__setup_elide(FILE *output
)
2478 struct perf_hpp_fmt
*fmt
;
2479 struct hpp_sort_entry
*hse
;
2481 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2482 if (!perf_hpp__is_sort_entry(fmt
))
2485 hse
= container_of(fmt
, struct hpp_sort_entry
, hpp
);
2486 fmt
->elide
= get_elide(hse
->se
->se_width_idx
, output
);
2490 * It makes no sense to elide all of sort entries.
2491 * Just revert them to show up again.
2493 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2494 if (!perf_hpp__is_sort_entry(fmt
))
2501 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
2502 if (!perf_hpp__is_sort_entry(fmt
))
2509 static int output_field_add(struct perf_hpp_list
*list
, char *tok
)
2513 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++) {
2514 struct sort_dimension
*sd
= &common_sort_dimensions
[i
];
2516 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2519 return __sort_dimension__add_output(list
, sd
);
2522 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++) {
2523 struct hpp_dimension
*hd
= &hpp_sort_dimensions
[i
];
2525 if (strncasecmp(tok
, hd
->name
, strlen(tok
)))
2528 return __hpp_dimension__add_output(list
, hd
);
2531 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++) {
2532 struct sort_dimension
*sd
= &bstack_sort_dimensions
[i
];
2534 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2537 return __sort_dimension__add_output(list
, sd
);
2540 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++) {
2541 struct sort_dimension
*sd
= &memory_sort_dimensions
[i
];
2543 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
2546 return __sort_dimension__add_output(list
, sd
);
2552 static int setup_output_list(struct perf_hpp_list
*list
, char *str
)
2557 for (tok
= strtok_r(str
, ", ", &tmp
);
2558 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2559 ret
= output_field_add(list
, tok
);
2560 if (ret
== -EINVAL
) {
2561 error("Invalid --fields key: `%s'", tok
);
2563 } else if (ret
== -ESRCH
) {
2564 error("Unknown --fields key: `%s'", tok
);
2572 static void reset_dimensions(void)
2576 for (i
= 0; i
< ARRAY_SIZE(common_sort_dimensions
); i
++)
2577 common_sort_dimensions
[i
].taken
= 0;
2579 for (i
= 0; i
< ARRAY_SIZE(hpp_sort_dimensions
); i
++)
2580 hpp_sort_dimensions
[i
].taken
= 0;
2582 for (i
= 0; i
< ARRAY_SIZE(bstack_sort_dimensions
); i
++)
2583 bstack_sort_dimensions
[i
].taken
= 0;
2585 for (i
= 0; i
< ARRAY_SIZE(memory_sort_dimensions
); i
++)
2586 memory_sort_dimensions
[i
].taken
= 0;
2589 bool is_strict_order(const char *order
)
2591 return order
&& (*order
!= '+');
2594 static int __setup_output_field(void)
2599 if (field_order
== NULL
)
2602 strp
= str
= strdup(field_order
);
2604 error("Not enough memory to setup output fields");
2608 if (!is_strict_order(field_order
))
2611 if (!strlen(strp
)) {
2612 error("Invalid --fields key: `+'");
2616 ret
= setup_output_list(&perf_hpp_list
, strp
);
2623 int setup_sorting(struct perf_evlist
*evlist
)
2627 err
= __setup_sorting(evlist
);
2631 if (parent_pattern
!= default_parent_pattern
) {
2632 err
= sort_dimension__add("parent", evlist
);
2640 * perf diff doesn't use default hpp output fields.
2642 if (sort__mode
!= SORT_MODE__DIFF
)
2645 err
= __setup_output_field();
2649 /* copy sort keys to output fields */
2650 perf_hpp__setup_output_field(&perf_hpp_list
);
2651 /* and then copy output fields to sort keys */
2652 perf_hpp__append_sort_keys(&perf_hpp_list
);
2657 void reset_output_field(void)
2659 sort__need_collapse
= 0;
2660 sort__has_parent
= 0;
2668 perf_hpp__reset_output_field(&perf_hpp_list
);