Merge remote-tracking branch 'asoc/topic/sti' into asoc-next
[deliverable/linux.git] / tools / perf / util / sort.c
... / ...
CommitLineData
1#include <sys/mman.h>
2#include "sort.h"
3#include "hist.h"
4#include "comm.h"
5#include "symbol.h"
6#include "evsel.h"
7#include "evlist.h"
8#include <traceevent/event-parse.h>
9#include "mem-events.h"
10
11regex_t parent_regex;
12const char default_parent_pattern[] = "^sys_|^do_page_fault";
13const char *parent_pattern = default_parent_pattern;
14const char default_sort_order[] = "comm,dso,symbol";
15const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17const char default_top_sort_order[] = "dso,symbol";
18const char default_diff_sort_order[] = "dso,symbol";
19const char default_tracepoint_sort_order[] = "trace";
20const char *sort_order;
21const char *field_order;
22regex_t ignore_callees_regex;
23int have_ignore_callees = 0;
24int sort__need_collapse = 0;
25int sort__has_parent = 0;
26int sort__has_sym = 0;
27int sort__has_dso = 0;
28int sort__has_socket = 0;
29int sort__has_thread = 0;
30int sort__has_comm = 0;
31enum sort_mode sort__mode = SORT_MODE__NORMAL;
32
33/*
34 * Replaces all occurrences of a char used with the:
35 *
36 * -t, --field-separator
37 *
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
41*/
42static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
43{
44 int n;
45 va_list ap;
46
47 va_start(ap, fmt);
48 n = vsnprintf(bf, size, fmt, ap);
49 if (symbol_conf.field_sep && n > 0) {
50 char *sep = bf;
51
52 while (1) {
53 sep = strchr(sep, *symbol_conf.field_sep);
54 if (sep == NULL)
55 break;
56 *sep = '.';
57 }
58 }
59 va_end(ap);
60
61 if (n >= (int)size)
62 return size - 1;
63 return n;
64}
65
66static int64_t cmp_null(const void *l, const void *r)
67{
68 if (!l && !r)
69 return 0;
70 else if (!l)
71 return -1;
72 else
73 return 1;
74}
75
76/* --sort pid */
77
78static int64_t
79sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
80{
81 return right->thread->tid - left->thread->tid;
82}
83
84static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
85 size_t size, unsigned int width)
86{
87 const char *comm = thread__comm_str(he->thread);
88
89 width = max(7U, width) - 6;
90 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
91 width, width, comm ?: "");
92}
93
94static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
95{
96 const struct thread *th = arg;
97
98 if (type != HIST_FILTER__THREAD)
99 return -1;
100
101 return th && he->thread != th;
102}
103
104struct sort_entry sort_thread = {
105 .se_header = " Pid:Command",
106 .se_cmp = sort__thread_cmp,
107 .se_snprintf = hist_entry__thread_snprintf,
108 .se_filter = hist_entry__thread_filter,
109 .se_width_idx = HISTC_THREAD,
110};
111
112/* --sort comm */
113
114static int64_t
115sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
116{
117 /* Compare the addr that should be unique among comm */
118 return strcmp(comm__str(right->comm), comm__str(left->comm));
119}
120
121static int64_t
122sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
123{
124 /* Compare the addr that should be unique among comm */
125 return strcmp(comm__str(right->comm), comm__str(left->comm));
126}
127
128static int64_t
129sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
130{
131 return strcmp(comm__str(right->comm), comm__str(left->comm));
132}
133
134static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
135 size_t size, unsigned int width)
136{
137 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
138}
139
140struct sort_entry sort_comm = {
141 .se_header = "Command",
142 .se_cmp = sort__comm_cmp,
143 .se_collapse = sort__comm_collapse,
144 .se_sort = sort__comm_sort,
145 .se_snprintf = hist_entry__comm_snprintf,
146 .se_filter = hist_entry__thread_filter,
147 .se_width_idx = HISTC_COMM,
148};
149
150/* --sort dso */
151
152static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
153{
154 struct dso *dso_l = map_l ? map_l->dso : NULL;
155 struct dso *dso_r = map_r ? map_r->dso : NULL;
156 const char *dso_name_l, *dso_name_r;
157
158 if (!dso_l || !dso_r)
159 return cmp_null(dso_r, dso_l);
160
161 if (verbose) {
162 dso_name_l = dso_l->long_name;
163 dso_name_r = dso_r->long_name;
164 } else {
165 dso_name_l = dso_l->short_name;
166 dso_name_r = dso_r->short_name;
167 }
168
169 return strcmp(dso_name_l, dso_name_r);
170}
171
172static int64_t
173sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
174{
175 return _sort__dso_cmp(right->ms.map, left->ms.map);
176}
177
178static int _hist_entry__dso_snprintf(struct map *map, char *bf,
179 size_t size, unsigned int width)
180{
181 if (map && map->dso) {
182 const char *dso_name = !verbose ? map->dso->short_name :
183 map->dso->long_name;
184 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
185 }
186
187 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
188}
189
190static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
191 size_t size, unsigned int width)
192{
193 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
194}
195
196static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
197{
198 const struct dso *dso = arg;
199
200 if (type != HIST_FILTER__DSO)
201 return -1;
202
203 return dso && (!he->ms.map || he->ms.map->dso != dso);
204}
205
206struct sort_entry sort_dso = {
207 .se_header = "Shared Object",
208 .se_cmp = sort__dso_cmp,
209 .se_snprintf = hist_entry__dso_snprintf,
210 .se_filter = hist_entry__dso_filter,
211 .se_width_idx = HISTC_DSO,
212};
213
214/* --sort symbol */
215
216static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
217{
218 return (int64_t)(right_ip - left_ip);
219}
220
221static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
222{
223 if (!sym_l || !sym_r)
224 return cmp_null(sym_l, sym_r);
225
226 if (sym_l == sym_r)
227 return 0;
228
229 if (sym_l->start != sym_r->start)
230 return (int64_t)(sym_r->start - sym_l->start);
231
232 return (int64_t)(sym_r->end - sym_l->end);
233}
234
235static int64_t
236sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
237{
238 int64_t ret;
239
240 if (!left->ms.sym && !right->ms.sym)
241 return _sort__addr_cmp(left->ip, right->ip);
242
243 /*
244 * comparing symbol address alone is not enough since it's a
245 * relative address within a dso.
246 */
247 if (!sort__has_dso) {
248 ret = sort__dso_cmp(left, right);
249 if (ret != 0)
250 return ret;
251 }
252
253 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
254}
255
256static int64_t
257sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
258{
259 if (!left->ms.sym || !right->ms.sym)
260 return cmp_null(left->ms.sym, right->ms.sym);
261
262 return strcmp(right->ms.sym->name, left->ms.sym->name);
263}
264
265static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
266 u64 ip, char level, char *bf, size_t size,
267 unsigned int width)
268{
269 size_t ret = 0;
270
271 if (verbose) {
272 char o = map ? dso__symtab_origin(map->dso) : '!';
273 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
274 BITS_PER_LONG / 4 + 2, ip, o);
275 }
276
277 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
278 if (sym && map) {
279 if (map->type == MAP__VARIABLE) {
280 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
281 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
282 ip - map->unmap_ip(map, sym->start));
283 } else {
284 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
285 width - ret,
286 sym->name);
287 }
288 } else {
289 size_t len = BITS_PER_LONG / 4;
290 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
291 len, ip);
292 }
293
294 return ret;
295}
296
297static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
298 size_t size, unsigned int width)
299{
300 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
301 he->level, bf, size, width);
302}
303
304static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
305{
306 const char *sym = arg;
307
308 if (type != HIST_FILTER__SYMBOL)
309 return -1;
310
311 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
312}
313
314struct sort_entry sort_sym = {
315 .se_header = "Symbol",
316 .se_cmp = sort__sym_cmp,
317 .se_sort = sort__sym_sort,
318 .se_snprintf = hist_entry__sym_snprintf,
319 .se_filter = hist_entry__sym_filter,
320 .se_width_idx = HISTC_SYMBOL,
321};
322
323/* --sort srcline */
324
325static char *hist_entry__get_srcline(struct hist_entry *he)
326{
327 struct map *map = he->ms.map;
328
329 if (!map)
330 return SRCLINE_UNKNOWN;
331
332 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
333 he->ms.sym, true);
334}
335
336static int64_t
337sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
338{
339 if (!left->srcline)
340 left->srcline = hist_entry__get_srcline(left);
341 if (!right->srcline)
342 right->srcline = hist_entry__get_srcline(right);
343
344 return strcmp(right->srcline, left->srcline);
345}
346
347static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
348 size_t size, unsigned int width)
349{
350 if (!he->srcline)
351 he->srcline = hist_entry__get_srcline(he);
352
353 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
354}
355
356struct sort_entry sort_srcline = {
357 .se_header = "Source:Line",
358 .se_cmp = sort__srcline_cmp,
359 .se_snprintf = hist_entry__srcline_snprintf,
360 .se_width_idx = HISTC_SRCLINE,
361};
362
363/* --sort srcfile */
364
365static char no_srcfile[1];
366
367static char *hist_entry__get_srcfile(struct hist_entry *e)
368{
369 char *sf, *p;
370 struct map *map = e->ms.map;
371
372 if (!map)
373 return no_srcfile;
374
375 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
376 e->ms.sym, false, true);
377 if (!strcmp(sf, SRCLINE_UNKNOWN))
378 return no_srcfile;
379 p = strchr(sf, ':');
380 if (p && *sf) {
381 *p = 0;
382 return sf;
383 }
384 free(sf);
385 return no_srcfile;
386}
387
388static int64_t
389sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
390{
391 if (!left->srcfile)
392 left->srcfile = hist_entry__get_srcfile(left);
393 if (!right->srcfile)
394 right->srcfile = hist_entry__get_srcfile(right);
395
396 return strcmp(right->srcfile, left->srcfile);
397}
398
399static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
400 size_t size, unsigned int width)
401{
402 if (!he->srcfile)
403 he->srcfile = hist_entry__get_srcfile(he);
404
405 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
406}
407
408struct sort_entry sort_srcfile = {
409 .se_header = "Source File",
410 .se_cmp = sort__srcfile_cmp,
411 .se_snprintf = hist_entry__srcfile_snprintf,
412 .se_width_idx = HISTC_SRCFILE,
413};
414
415/* --sort parent */
416
417static int64_t
418sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
419{
420 struct symbol *sym_l = left->parent;
421 struct symbol *sym_r = right->parent;
422
423 if (!sym_l || !sym_r)
424 return cmp_null(sym_l, sym_r);
425
426 return strcmp(sym_r->name, sym_l->name);
427}
428
429static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
430 size_t size, unsigned int width)
431{
432 return repsep_snprintf(bf, size, "%-*.*s", width, width,
433 he->parent ? he->parent->name : "[other]");
434}
435
436struct sort_entry sort_parent = {
437 .se_header = "Parent symbol",
438 .se_cmp = sort__parent_cmp,
439 .se_snprintf = hist_entry__parent_snprintf,
440 .se_width_idx = HISTC_PARENT,
441};
442
443/* --sort cpu */
444
445static int64_t
446sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
447{
448 return right->cpu - left->cpu;
449}
450
451static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
452 size_t size, unsigned int width)
453{
454 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
455}
456
457struct sort_entry sort_cpu = {
458 .se_header = "CPU",
459 .se_cmp = sort__cpu_cmp,
460 .se_snprintf = hist_entry__cpu_snprintf,
461 .se_width_idx = HISTC_CPU,
462};
463
464/* --sort socket */
465
466static int64_t
467sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
468{
469 return right->socket - left->socket;
470}
471
472static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
473 size_t size, unsigned int width)
474{
475 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
476}
477
478static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
479{
480 int sk = *(const int *)arg;
481
482 if (type != HIST_FILTER__SOCKET)
483 return -1;
484
485 return sk >= 0 && he->socket != sk;
486}
487
488struct sort_entry sort_socket = {
489 .se_header = "Socket",
490 .se_cmp = sort__socket_cmp,
491 .se_snprintf = hist_entry__socket_snprintf,
492 .se_filter = hist_entry__socket_filter,
493 .se_width_idx = HISTC_SOCKET,
494};
495
496/* --sort trace */
497
498static char *get_trace_output(struct hist_entry *he)
499{
500 struct trace_seq seq;
501 struct perf_evsel *evsel;
502 struct pevent_record rec = {
503 .data = he->raw_data,
504 .size = he->raw_size,
505 };
506
507 evsel = hists_to_evsel(he->hists);
508
509 trace_seq_init(&seq);
510 if (symbol_conf.raw_trace) {
511 pevent_print_fields(&seq, he->raw_data, he->raw_size,
512 evsel->tp_format);
513 } else {
514 pevent_event_info(&seq, evsel->tp_format, &rec);
515 }
516 return seq.buffer;
517}
518
519static int64_t
520sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
521{
522 struct perf_evsel *evsel;
523
524 evsel = hists_to_evsel(left->hists);
525 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
526 return 0;
527
528 if (left->trace_output == NULL)
529 left->trace_output = get_trace_output(left);
530 if (right->trace_output == NULL)
531 right->trace_output = get_trace_output(right);
532
533 return strcmp(right->trace_output, left->trace_output);
534}
535
536static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
537 size_t size, unsigned int width)
538{
539 struct perf_evsel *evsel;
540
541 evsel = hists_to_evsel(he->hists);
542 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
543 return scnprintf(bf, size, "%-.*s", width, "N/A");
544
545 if (he->trace_output == NULL)
546 he->trace_output = get_trace_output(he);
547 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
548}
549
550struct sort_entry sort_trace = {
551 .se_header = "Trace output",
552 .se_cmp = sort__trace_cmp,
553 .se_snprintf = hist_entry__trace_snprintf,
554 .se_width_idx = HISTC_TRACE,
555};
556
557/* sort keys for branch stacks */
558
559static int64_t
560sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
561{
562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 return _sort__dso_cmp(left->branch_info->from.map,
566 right->branch_info->from.map);
567}
568
569static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
570 size_t size, unsigned int width)
571{
572 if (he->branch_info)
573 return _hist_entry__dso_snprintf(he->branch_info->from.map,
574 bf, size, width);
575 else
576 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
577}
578
579static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
580 const void *arg)
581{
582 const struct dso *dso = arg;
583
584 if (type != HIST_FILTER__DSO)
585 return -1;
586
587 return dso && (!he->branch_info || !he->branch_info->from.map ||
588 he->branch_info->from.map->dso != dso);
589}
590
591static int64_t
592sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
593{
594 if (!left->branch_info || !right->branch_info)
595 return cmp_null(left->branch_info, right->branch_info);
596
597 return _sort__dso_cmp(left->branch_info->to.map,
598 right->branch_info->to.map);
599}
600
601static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
602 size_t size, unsigned int width)
603{
604 if (he->branch_info)
605 return _hist_entry__dso_snprintf(he->branch_info->to.map,
606 bf, size, width);
607 else
608 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
609}
610
611static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
612 const void *arg)
613{
614 const struct dso *dso = arg;
615
616 if (type != HIST_FILTER__DSO)
617 return -1;
618
619 return dso && (!he->branch_info || !he->branch_info->to.map ||
620 he->branch_info->to.map->dso != dso);
621}
622
623static int64_t
624sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
625{
626 struct addr_map_symbol *from_l = &left->branch_info->from;
627 struct addr_map_symbol *from_r = &right->branch_info->from;
628
629 if (!left->branch_info || !right->branch_info)
630 return cmp_null(left->branch_info, right->branch_info);
631
632 from_l = &left->branch_info->from;
633 from_r = &right->branch_info->from;
634
635 if (!from_l->sym && !from_r->sym)
636 return _sort__addr_cmp(from_l->addr, from_r->addr);
637
638 return _sort__sym_cmp(from_l->sym, from_r->sym);
639}
640
641static int64_t
642sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
643{
644 struct addr_map_symbol *to_l, *to_r;
645
646 if (!left->branch_info || !right->branch_info)
647 return cmp_null(left->branch_info, right->branch_info);
648
649 to_l = &left->branch_info->to;
650 to_r = &right->branch_info->to;
651
652 if (!to_l->sym && !to_r->sym)
653 return _sort__addr_cmp(to_l->addr, to_r->addr);
654
655 return _sort__sym_cmp(to_l->sym, to_r->sym);
656}
657
658static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
659 size_t size, unsigned int width)
660{
661 if (he->branch_info) {
662 struct addr_map_symbol *from = &he->branch_info->from;
663
664 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
665 he->level, bf, size, width);
666 }
667
668 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
669}
670
671static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
672 size_t size, unsigned int width)
673{
674 if (he->branch_info) {
675 struct addr_map_symbol *to = &he->branch_info->to;
676
677 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
678 he->level, bf, size, width);
679 }
680
681 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
682}
683
684static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
685 const void *arg)
686{
687 const char *sym = arg;
688
689 if (type != HIST_FILTER__SYMBOL)
690 return -1;
691
692 return sym && !(he->branch_info && he->branch_info->from.sym &&
693 strstr(he->branch_info->from.sym->name, sym));
694}
695
696static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
697 const void *arg)
698{
699 const char *sym = arg;
700
701 if (type != HIST_FILTER__SYMBOL)
702 return -1;
703
704 return sym && !(he->branch_info && he->branch_info->to.sym &&
705 strstr(he->branch_info->to.sym->name, sym));
706}
707
708struct sort_entry sort_dso_from = {
709 .se_header = "Source Shared Object",
710 .se_cmp = sort__dso_from_cmp,
711 .se_snprintf = hist_entry__dso_from_snprintf,
712 .se_filter = hist_entry__dso_from_filter,
713 .se_width_idx = HISTC_DSO_FROM,
714};
715
716struct sort_entry sort_dso_to = {
717 .se_header = "Target Shared Object",
718 .se_cmp = sort__dso_to_cmp,
719 .se_snprintf = hist_entry__dso_to_snprintf,
720 .se_filter = hist_entry__dso_to_filter,
721 .se_width_idx = HISTC_DSO_TO,
722};
723
724struct sort_entry sort_sym_from = {
725 .se_header = "Source Symbol",
726 .se_cmp = sort__sym_from_cmp,
727 .se_snprintf = hist_entry__sym_from_snprintf,
728 .se_filter = hist_entry__sym_from_filter,
729 .se_width_idx = HISTC_SYMBOL_FROM,
730};
731
732struct sort_entry sort_sym_to = {
733 .se_header = "Target Symbol",
734 .se_cmp = sort__sym_to_cmp,
735 .se_snprintf = hist_entry__sym_to_snprintf,
736 .se_filter = hist_entry__sym_to_filter,
737 .se_width_idx = HISTC_SYMBOL_TO,
738};
739
740static int64_t
741sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
742{
743 unsigned char mp, p;
744
745 if (!left->branch_info || !right->branch_info)
746 return cmp_null(left->branch_info, right->branch_info);
747
748 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
749 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
750 return mp || p;
751}
752
753static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
754 size_t size, unsigned int width){
755 static const char *out = "N/A";
756
757 if (he->branch_info) {
758 if (he->branch_info->flags.predicted)
759 out = "N";
760 else if (he->branch_info->flags.mispred)
761 out = "Y";
762 }
763
764 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
765}
766
767static int64_t
768sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
769{
770 return left->branch_info->flags.cycles -
771 right->branch_info->flags.cycles;
772}
773
774static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
775 size_t size, unsigned int width)
776{
777 if (he->branch_info->flags.cycles == 0)
778 return repsep_snprintf(bf, size, "%-*s", width, "-");
779 return repsep_snprintf(bf, size, "%-*hd", width,
780 he->branch_info->flags.cycles);
781}
782
783struct sort_entry sort_cycles = {
784 .se_header = "Basic Block Cycles",
785 .se_cmp = sort__cycles_cmp,
786 .se_snprintf = hist_entry__cycles_snprintf,
787 .se_width_idx = HISTC_CYCLES,
788};
789
790/* --sort daddr_sym */
791static int64_t
792sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
793{
794 uint64_t l = 0, r = 0;
795
796 if (left->mem_info)
797 l = left->mem_info->daddr.addr;
798 if (right->mem_info)
799 r = right->mem_info->daddr.addr;
800
801 return (int64_t)(r - l);
802}
803
804static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
805 size_t size, unsigned int width)
806{
807 uint64_t addr = 0;
808 struct map *map = NULL;
809 struct symbol *sym = NULL;
810
811 if (he->mem_info) {
812 addr = he->mem_info->daddr.addr;
813 map = he->mem_info->daddr.map;
814 sym = he->mem_info->daddr.sym;
815 }
816 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
817 width);
818}
819
820static int64_t
821sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
822{
823 uint64_t l = 0, r = 0;
824
825 if (left->mem_info)
826 l = left->mem_info->iaddr.addr;
827 if (right->mem_info)
828 r = right->mem_info->iaddr.addr;
829
830 return (int64_t)(r - l);
831}
832
833static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
834 size_t size, unsigned int width)
835{
836 uint64_t addr = 0;
837 struct map *map = NULL;
838 struct symbol *sym = NULL;
839
840 if (he->mem_info) {
841 addr = he->mem_info->iaddr.addr;
842 map = he->mem_info->iaddr.map;
843 sym = he->mem_info->iaddr.sym;
844 }
845 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
846 width);
847}
848
849static int64_t
850sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
851{
852 struct map *map_l = NULL;
853 struct map *map_r = NULL;
854
855 if (left->mem_info)
856 map_l = left->mem_info->daddr.map;
857 if (right->mem_info)
858 map_r = right->mem_info->daddr.map;
859
860 return _sort__dso_cmp(map_l, map_r);
861}
862
863static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
864 size_t size, unsigned int width)
865{
866 struct map *map = NULL;
867
868 if (he->mem_info)
869 map = he->mem_info->daddr.map;
870
871 return _hist_entry__dso_snprintf(map, bf, size, width);
872}
873
874static int64_t
875sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
876{
877 union perf_mem_data_src data_src_l;
878 union perf_mem_data_src data_src_r;
879
880 if (left->mem_info)
881 data_src_l = left->mem_info->data_src;
882 else
883 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
884
885 if (right->mem_info)
886 data_src_r = right->mem_info->data_src;
887 else
888 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
889
890 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
891}
892
893static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
894 size_t size, unsigned int width)
895{
896 char out[10];
897
898 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
899 return repsep_snprintf(bf, size, "%.*s", width, out);
900}
901
902static int64_t
903sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
904{
905 union perf_mem_data_src data_src_l;
906 union perf_mem_data_src data_src_r;
907
908 if (left->mem_info)
909 data_src_l = left->mem_info->data_src;
910 else
911 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
912
913 if (right->mem_info)
914 data_src_r = right->mem_info->data_src;
915 else
916 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
917
918 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
919}
920
921static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
922 size_t size, unsigned int width)
923{
924 char out[64];
925
926 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
927 return repsep_snprintf(bf, size, "%-*s", width, out);
928}
929
930static int64_t
931sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
932{
933 union perf_mem_data_src data_src_l;
934 union perf_mem_data_src data_src_r;
935
936 if (left->mem_info)
937 data_src_l = left->mem_info->data_src;
938 else
939 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
940
941 if (right->mem_info)
942 data_src_r = right->mem_info->data_src;
943 else
944 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
945
946 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
947}
948
949static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
950 size_t size, unsigned int width)
951{
952 char out[64];
953
954 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
955 return repsep_snprintf(bf, size, "%-*s", width, out);
956}
957
958static int64_t
959sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
960{
961 union perf_mem_data_src data_src_l;
962 union perf_mem_data_src data_src_r;
963
964 if (left->mem_info)
965 data_src_l = left->mem_info->data_src;
966 else
967 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
968
969 if (right->mem_info)
970 data_src_r = right->mem_info->data_src;
971 else
972 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
973
974 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
975}
976
977static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
978 size_t size, unsigned int width)
979{
980 char out[64];
981
982 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
983 return repsep_snprintf(bf, size, "%-*s", width, out);
984}
985
986static int64_t
987sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
988{
989 u64 l, r;
990 struct map *l_map, *r_map;
991
992 if (!left->mem_info) return -1;
993 if (!right->mem_info) return 1;
994
995 /* group event types together */
996 if (left->cpumode > right->cpumode) return -1;
997 if (left->cpumode < right->cpumode) return 1;
998
999 l_map = left->mem_info->daddr.map;
1000 r_map = right->mem_info->daddr.map;
1001
1002 /* if both are NULL, jump to sort on al_addr instead */
1003 if (!l_map && !r_map)
1004 goto addr;
1005
1006 if (!l_map) return -1;
1007 if (!r_map) return 1;
1008
1009 if (l_map->maj > r_map->maj) return -1;
1010 if (l_map->maj < r_map->maj) return 1;
1011
1012 if (l_map->min > r_map->min) return -1;
1013 if (l_map->min < r_map->min) return 1;
1014
1015 if (l_map->ino > r_map->ino) return -1;
1016 if (l_map->ino < r_map->ino) return 1;
1017
1018 if (l_map->ino_generation > r_map->ino_generation) return -1;
1019 if (l_map->ino_generation < r_map->ino_generation) return 1;
1020
1021 /*
1022 * Addresses with no major/minor numbers are assumed to be
1023 * anonymous in userspace. Sort those on pid then address.
1024 *
1025 * The kernel and non-zero major/minor mapped areas are
1026 * assumed to be unity mapped. Sort those on address.
1027 */
1028
1029 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1030 (!(l_map->flags & MAP_SHARED)) &&
1031 !l_map->maj && !l_map->min && !l_map->ino &&
1032 !l_map->ino_generation) {
1033 /* userspace anonymous */
1034
1035 if (left->thread->pid_ > right->thread->pid_) return -1;
1036 if (left->thread->pid_ < right->thread->pid_) return 1;
1037 }
1038
1039addr:
1040 /* al_addr does all the right addr - start + offset calculations */
1041 l = cl_address(left->mem_info->daddr.al_addr);
1042 r = cl_address(right->mem_info->daddr.al_addr);
1043
1044 if (l > r) return -1;
1045 if (l < r) return 1;
1046
1047 return 0;
1048}
1049
1050static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1051 size_t size, unsigned int width)
1052{
1053
1054 uint64_t addr = 0;
1055 struct map *map = NULL;
1056 struct symbol *sym = NULL;
1057 char level = he->level;
1058
1059 if (he->mem_info) {
1060 addr = cl_address(he->mem_info->daddr.al_addr);
1061 map = he->mem_info->daddr.map;
1062 sym = he->mem_info->daddr.sym;
1063
1064 /* print [s] for shared data mmaps */
1065 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1066 map && (map->type == MAP__VARIABLE) &&
1067 (map->flags & MAP_SHARED) &&
1068 (map->maj || map->min || map->ino ||
1069 map->ino_generation))
1070 level = 's';
1071 else if (!map)
1072 level = 'X';
1073 }
1074 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1075 width);
1076}
1077
1078struct sort_entry sort_mispredict = {
1079 .se_header = "Branch Mispredicted",
1080 .se_cmp = sort__mispredict_cmp,
1081 .se_snprintf = hist_entry__mispredict_snprintf,
1082 .se_width_idx = HISTC_MISPREDICT,
1083};
1084
1085static u64 he_weight(struct hist_entry *he)
1086{
1087 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1088}
1089
1090static int64_t
1091sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1092{
1093 return he_weight(left) - he_weight(right);
1094}
1095
1096static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1097 size_t size, unsigned int width)
1098{
1099 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1100}
1101
1102struct sort_entry sort_local_weight = {
1103 .se_header = "Local Weight",
1104 .se_cmp = sort__local_weight_cmp,
1105 .se_snprintf = hist_entry__local_weight_snprintf,
1106 .se_width_idx = HISTC_LOCAL_WEIGHT,
1107};
1108
1109static int64_t
1110sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1111{
1112 return left->stat.weight - right->stat.weight;
1113}
1114
1115static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1116 size_t size, unsigned int width)
1117{
1118 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1119}
1120
1121struct sort_entry sort_global_weight = {
1122 .se_header = "Weight",
1123 .se_cmp = sort__global_weight_cmp,
1124 .se_snprintf = hist_entry__global_weight_snprintf,
1125 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1126};
1127
1128struct sort_entry sort_mem_daddr_sym = {
1129 .se_header = "Data Symbol",
1130 .se_cmp = sort__daddr_cmp,
1131 .se_snprintf = hist_entry__daddr_snprintf,
1132 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1133};
1134
1135struct sort_entry sort_mem_iaddr_sym = {
1136 .se_header = "Code Symbol",
1137 .se_cmp = sort__iaddr_cmp,
1138 .se_snprintf = hist_entry__iaddr_snprintf,
1139 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1140};
1141
1142struct sort_entry sort_mem_daddr_dso = {
1143 .se_header = "Data Object",
1144 .se_cmp = sort__dso_daddr_cmp,
1145 .se_snprintf = hist_entry__dso_daddr_snprintf,
1146 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1147};
1148
1149struct sort_entry sort_mem_locked = {
1150 .se_header = "Locked",
1151 .se_cmp = sort__locked_cmp,
1152 .se_snprintf = hist_entry__locked_snprintf,
1153 .se_width_idx = HISTC_MEM_LOCKED,
1154};
1155
1156struct sort_entry sort_mem_tlb = {
1157 .se_header = "TLB access",
1158 .se_cmp = sort__tlb_cmp,
1159 .se_snprintf = hist_entry__tlb_snprintf,
1160 .se_width_idx = HISTC_MEM_TLB,
1161};
1162
1163struct sort_entry sort_mem_lvl = {
1164 .se_header = "Memory access",
1165 .se_cmp = sort__lvl_cmp,
1166 .se_snprintf = hist_entry__lvl_snprintf,
1167 .se_width_idx = HISTC_MEM_LVL,
1168};
1169
1170struct sort_entry sort_mem_snoop = {
1171 .se_header = "Snoop",
1172 .se_cmp = sort__snoop_cmp,
1173 .se_snprintf = hist_entry__snoop_snprintf,
1174 .se_width_idx = HISTC_MEM_SNOOP,
1175};
1176
1177struct sort_entry sort_mem_dcacheline = {
1178 .se_header = "Data Cacheline",
1179 .se_cmp = sort__dcacheline_cmp,
1180 .se_snprintf = hist_entry__dcacheline_snprintf,
1181 .se_width_idx = HISTC_MEM_DCACHELINE,
1182};
1183
1184static int64_t
1185sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1186{
1187 if (!left->branch_info || !right->branch_info)
1188 return cmp_null(left->branch_info, right->branch_info);
1189
1190 return left->branch_info->flags.abort !=
1191 right->branch_info->flags.abort;
1192}
1193
1194static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1195 size_t size, unsigned int width)
1196{
1197 static const char *out = "N/A";
1198
1199 if (he->branch_info) {
1200 if (he->branch_info->flags.abort)
1201 out = "A";
1202 else
1203 out = ".";
1204 }
1205
1206 return repsep_snprintf(bf, size, "%-*s", width, out);
1207}
1208
1209struct sort_entry sort_abort = {
1210 .se_header = "Transaction abort",
1211 .se_cmp = sort__abort_cmp,
1212 .se_snprintf = hist_entry__abort_snprintf,
1213 .se_width_idx = HISTC_ABORT,
1214};
1215
1216static int64_t
1217sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1218{
1219 if (!left->branch_info || !right->branch_info)
1220 return cmp_null(left->branch_info, right->branch_info);
1221
1222 return left->branch_info->flags.in_tx !=
1223 right->branch_info->flags.in_tx;
1224}
1225
1226static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1227 size_t size, unsigned int width)
1228{
1229 static const char *out = "N/A";
1230
1231 if (he->branch_info) {
1232 if (he->branch_info->flags.in_tx)
1233 out = "T";
1234 else
1235 out = ".";
1236 }
1237
1238 return repsep_snprintf(bf, size, "%-*s", width, out);
1239}
1240
1241struct sort_entry sort_in_tx = {
1242 .se_header = "Branch in transaction",
1243 .se_cmp = sort__in_tx_cmp,
1244 .se_snprintf = hist_entry__in_tx_snprintf,
1245 .se_width_idx = HISTC_IN_TX,
1246};
1247
1248static int64_t
1249sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1250{
1251 return left->transaction - right->transaction;
1252}
1253
1254static inline char *add_str(char *p, const char *str)
1255{
1256 strcpy(p, str);
1257 return p + strlen(str);
1258}
1259
1260static struct txbit {
1261 unsigned flag;
1262 const char *name;
1263 int skip_for_len;
1264} txbits[] = {
1265 { PERF_TXN_ELISION, "EL ", 0 },
1266 { PERF_TXN_TRANSACTION, "TX ", 1 },
1267 { PERF_TXN_SYNC, "SYNC ", 1 },
1268 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1269 { PERF_TXN_RETRY, "RETRY ", 0 },
1270 { PERF_TXN_CONFLICT, "CON ", 0 },
1271 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1272 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1273 { 0, NULL, 0 }
1274};
1275
1276int hist_entry__transaction_len(void)
1277{
1278 int i;
1279 int len = 0;
1280
1281 for (i = 0; txbits[i].name; i++) {
1282 if (!txbits[i].skip_for_len)
1283 len += strlen(txbits[i].name);
1284 }
1285 len += 4; /* :XX<space> */
1286 return len;
1287}
1288
1289static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1290 size_t size, unsigned int width)
1291{
1292 u64 t = he->transaction;
1293 char buf[128];
1294 char *p = buf;
1295 int i;
1296
1297 buf[0] = 0;
1298 for (i = 0; txbits[i].name; i++)
1299 if (txbits[i].flag & t)
1300 p = add_str(p, txbits[i].name);
1301 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1302 p = add_str(p, "NEITHER ");
1303 if (t & PERF_TXN_ABORT_MASK) {
1304 sprintf(p, ":%" PRIx64,
1305 (t & PERF_TXN_ABORT_MASK) >>
1306 PERF_TXN_ABORT_SHIFT);
1307 p += strlen(p);
1308 }
1309
1310 return repsep_snprintf(bf, size, "%-*s", width, buf);
1311}
1312
1313struct sort_entry sort_transaction = {
1314 .se_header = "Transaction ",
1315 .se_cmp = sort__transaction_cmp,
1316 .se_snprintf = hist_entry__transaction_snprintf,
1317 .se_width_idx = HISTC_TRANSACTION,
1318};
1319
1320struct sort_dimension {
1321 const char *name;
1322 struct sort_entry *entry;
1323 int taken;
1324};
1325
1326#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1327
1328static struct sort_dimension common_sort_dimensions[] = {
1329 DIM(SORT_PID, "pid", sort_thread),
1330 DIM(SORT_COMM, "comm", sort_comm),
1331 DIM(SORT_DSO, "dso", sort_dso),
1332 DIM(SORT_SYM, "symbol", sort_sym),
1333 DIM(SORT_PARENT, "parent", sort_parent),
1334 DIM(SORT_CPU, "cpu", sort_cpu),
1335 DIM(SORT_SOCKET, "socket", sort_socket),
1336 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1337 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1338 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1339 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1340 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1341 DIM(SORT_TRACE, "trace", sort_trace),
1342};
1343
1344#undef DIM
1345
1346#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1347
1348static struct sort_dimension bstack_sort_dimensions[] = {
1349 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1350 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1351 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1352 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1353 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1354 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1355 DIM(SORT_ABORT, "abort", sort_abort),
1356 DIM(SORT_CYCLES, "cycles", sort_cycles),
1357};
1358
1359#undef DIM
1360
1361#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1362
1363static struct sort_dimension memory_sort_dimensions[] = {
1364 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1365 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1366 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1367 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1368 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1369 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1370 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1371 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1372};
1373
1374#undef DIM
1375
1376struct hpp_dimension {
1377 const char *name;
1378 struct perf_hpp_fmt *fmt;
1379 int taken;
1380};
1381
1382#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1383
1384static struct hpp_dimension hpp_sort_dimensions[] = {
1385 DIM(PERF_HPP__OVERHEAD, "overhead"),
1386 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1387 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1388 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1389 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1390 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1391 DIM(PERF_HPP__SAMPLES, "sample"),
1392 DIM(PERF_HPP__PERIOD, "period"),
1393};
1394
1395#undef DIM
1396
1397struct hpp_sort_entry {
1398 struct perf_hpp_fmt hpp;
1399 struct sort_entry *se;
1400};
1401
1402void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1403{
1404 struct hpp_sort_entry *hse;
1405
1406 if (!perf_hpp__is_sort_entry(fmt))
1407 return;
1408
1409 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1410 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1411}
1412
1413static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1414 struct perf_evsel *evsel)
1415{
1416 struct hpp_sort_entry *hse;
1417 size_t len = fmt->user_len;
1418
1419 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1420
1421 if (!len)
1422 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1423
1424 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1425}
1426
1427static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1428 struct perf_hpp *hpp __maybe_unused,
1429 struct perf_evsel *evsel)
1430{
1431 struct hpp_sort_entry *hse;
1432 size_t len = fmt->user_len;
1433
1434 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1435
1436 if (!len)
1437 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1438
1439 return len;
1440}
1441
1442static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1443 struct hist_entry *he)
1444{
1445 struct hpp_sort_entry *hse;
1446 size_t len = fmt->user_len;
1447
1448 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1449
1450 if (!len)
1451 len = hists__col_len(he->hists, hse->se->se_width_idx);
1452
1453 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1454}
1455
1456static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1457 struct hist_entry *a, struct hist_entry *b)
1458{
1459 struct hpp_sort_entry *hse;
1460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1462 return hse->se->se_cmp(a, b);
1463}
1464
1465static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1466 struct hist_entry *a, struct hist_entry *b)
1467{
1468 struct hpp_sort_entry *hse;
1469 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1470
1471 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1472 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1473 return collapse_fn(a, b);
1474}
1475
1476static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1477 struct hist_entry *a, struct hist_entry *b)
1478{
1479 struct hpp_sort_entry *hse;
1480 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1481
1482 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1483 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1484 return sort_fn(a, b);
1485}
1486
1487bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1488{
1489 return format->header == __sort__hpp_header;
1490}
1491
1492#define MK_SORT_ENTRY_CHK(key) \
1493bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1494{ \
1495 struct hpp_sort_entry *hse; \
1496 \
1497 if (!perf_hpp__is_sort_entry(fmt)) \
1498 return false; \
1499 \
1500 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1501 return hse->se == &sort_ ## key ; \
1502}
1503
1504MK_SORT_ENTRY_CHK(trace)
1505MK_SORT_ENTRY_CHK(srcline)
1506MK_SORT_ENTRY_CHK(srcfile)
1507MK_SORT_ENTRY_CHK(thread)
1508MK_SORT_ENTRY_CHK(comm)
1509MK_SORT_ENTRY_CHK(dso)
1510MK_SORT_ENTRY_CHK(sym)
1511
1512
1513static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1514{
1515 struct hpp_sort_entry *hse_a;
1516 struct hpp_sort_entry *hse_b;
1517
1518 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1519 return false;
1520
1521 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1522 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1523
1524 return hse_a->se == hse_b->se;
1525}
1526
1527static void hse_free(struct perf_hpp_fmt *fmt)
1528{
1529 struct hpp_sort_entry *hse;
1530
1531 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1532 free(hse);
1533}
1534
1535static struct hpp_sort_entry *
1536__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1537{
1538 struct hpp_sort_entry *hse;
1539
1540 hse = malloc(sizeof(*hse));
1541 if (hse == NULL) {
1542 pr_err("Memory allocation failed\n");
1543 return NULL;
1544 }
1545
1546 hse->se = sd->entry;
1547 hse->hpp.name = sd->entry->se_header;
1548 hse->hpp.header = __sort__hpp_header;
1549 hse->hpp.width = __sort__hpp_width;
1550 hse->hpp.entry = __sort__hpp_entry;
1551 hse->hpp.color = NULL;
1552
1553 hse->hpp.cmp = __sort__hpp_cmp;
1554 hse->hpp.collapse = __sort__hpp_collapse;
1555 hse->hpp.sort = __sort__hpp_sort;
1556 hse->hpp.equal = __sort__hpp_equal;
1557 hse->hpp.free = hse_free;
1558
1559 INIT_LIST_HEAD(&hse->hpp.list);
1560 INIT_LIST_HEAD(&hse->hpp.sort_list);
1561 hse->hpp.elide = false;
1562 hse->hpp.len = 0;
1563 hse->hpp.user_len = 0;
1564 hse->hpp.level = level;
1565
1566 return hse;
1567}
1568
1569static void hpp_free(struct perf_hpp_fmt *fmt)
1570{
1571 free(fmt);
1572}
1573
1574static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1575 int level)
1576{
1577 struct perf_hpp_fmt *fmt;
1578
1579 fmt = memdup(hd->fmt, sizeof(*fmt));
1580 if (fmt) {
1581 INIT_LIST_HEAD(&fmt->list);
1582 INIT_LIST_HEAD(&fmt->sort_list);
1583 fmt->free = hpp_free;
1584 fmt->level = level;
1585 }
1586
1587 return fmt;
1588}
1589
1590int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1591{
1592 struct perf_hpp_fmt *fmt;
1593 struct hpp_sort_entry *hse;
1594 int ret = -1;
1595 int r;
1596
1597 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1598 if (!perf_hpp__is_sort_entry(fmt))
1599 continue;
1600
1601 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1602 if (hse->se->se_filter == NULL)
1603 continue;
1604
1605 /*
1606 * hist entry is filtered if any of sort key in the hpp list
1607 * is applied. But it should skip non-matched filter types.
1608 */
1609 r = hse->se->se_filter(he, type, arg);
1610 if (r >= 0) {
1611 if (ret < 0)
1612 ret = 0;
1613 ret |= r;
1614 }
1615 }
1616
1617 return ret;
1618}
1619
1620static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1621 struct perf_hpp_list *list,
1622 int level)
1623{
1624 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1625
1626 if (hse == NULL)
1627 return -1;
1628
1629 perf_hpp_list__register_sort_field(list, &hse->hpp);
1630 return 0;
1631}
1632
1633static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1634 struct perf_hpp_list *list)
1635{
1636 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1637
1638 if (hse == NULL)
1639 return -1;
1640
1641 perf_hpp_list__column_register(list, &hse->hpp);
1642 return 0;
1643}
1644
1645struct hpp_dynamic_entry {
1646 struct perf_hpp_fmt hpp;
1647 struct perf_evsel *evsel;
1648 struct format_field *field;
1649 unsigned dynamic_len;
1650 bool raw_trace;
1651};
1652
1653static int hde_width(struct hpp_dynamic_entry *hde)
1654{
1655 if (!hde->hpp.len) {
1656 int len = hde->dynamic_len;
1657 int namelen = strlen(hde->field->name);
1658 int fieldlen = hde->field->size;
1659
1660 if (namelen > len)
1661 len = namelen;
1662
1663 if (!(hde->field->flags & FIELD_IS_STRING)) {
1664 /* length for print hex numbers */
1665 fieldlen = hde->field->size * 2 + 2;
1666 }
1667 if (fieldlen > len)
1668 len = fieldlen;
1669
1670 hde->hpp.len = len;
1671 }
1672 return hde->hpp.len;
1673}
1674
1675static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1676 struct hist_entry *he)
1677{
1678 char *str, *pos;
1679 struct format_field *field = hde->field;
1680 size_t namelen;
1681 bool last = false;
1682
1683 if (hde->raw_trace)
1684 return;
1685
1686 /* parse pretty print result and update max length */
1687 if (!he->trace_output)
1688 he->trace_output = get_trace_output(he);
1689
1690 namelen = strlen(field->name);
1691 str = he->trace_output;
1692
1693 while (str) {
1694 pos = strchr(str, ' ');
1695 if (pos == NULL) {
1696 last = true;
1697 pos = str + strlen(str);
1698 }
1699
1700 if (!strncmp(str, field->name, namelen)) {
1701 size_t len;
1702
1703 str += namelen + 1;
1704 len = pos - str;
1705
1706 if (len > hde->dynamic_len)
1707 hde->dynamic_len = len;
1708 break;
1709 }
1710
1711 if (last)
1712 str = NULL;
1713 else
1714 str = pos + 1;
1715 }
1716}
1717
1718static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1719 struct perf_evsel *evsel __maybe_unused)
1720{
1721 struct hpp_dynamic_entry *hde;
1722 size_t len = fmt->user_len;
1723
1724 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1725
1726 if (!len)
1727 len = hde_width(hde);
1728
1729 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1730}
1731
1732static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1733 struct perf_hpp *hpp __maybe_unused,
1734 struct perf_evsel *evsel __maybe_unused)
1735{
1736 struct hpp_dynamic_entry *hde;
1737 size_t len = fmt->user_len;
1738
1739 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1740
1741 if (!len)
1742 len = hde_width(hde);
1743
1744 return len;
1745}
1746
1747bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1748{
1749 struct hpp_dynamic_entry *hde;
1750
1751 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1752
1753 return hists_to_evsel(hists) == hde->evsel;
1754}
1755
1756static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1757 struct hist_entry *he)
1758{
1759 struct hpp_dynamic_entry *hde;
1760 size_t len = fmt->user_len;
1761 char *str, *pos;
1762 struct format_field *field;
1763 size_t namelen;
1764 bool last = false;
1765 int ret;
1766
1767 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1768
1769 if (!len)
1770 len = hde_width(hde);
1771
1772 if (hde->raw_trace)
1773 goto raw_field;
1774
1775 if (!he->trace_output)
1776 he->trace_output = get_trace_output(he);
1777
1778 field = hde->field;
1779 namelen = strlen(field->name);
1780 str = he->trace_output;
1781
1782 while (str) {
1783 pos = strchr(str, ' ');
1784 if (pos == NULL) {
1785 last = true;
1786 pos = str + strlen(str);
1787 }
1788
1789 if (!strncmp(str, field->name, namelen)) {
1790 str += namelen + 1;
1791 str = strndup(str, pos - str);
1792
1793 if (str == NULL)
1794 return scnprintf(hpp->buf, hpp->size,
1795 "%*.*s", len, len, "ERROR");
1796 break;
1797 }
1798
1799 if (last)
1800 str = NULL;
1801 else
1802 str = pos + 1;
1803 }
1804
1805 if (str == NULL) {
1806 struct trace_seq seq;
1807raw_field:
1808 trace_seq_init(&seq);
1809 pevent_print_field(&seq, he->raw_data, hde->field);
1810 str = seq.buffer;
1811 }
1812
1813 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1814 free(str);
1815 return ret;
1816}
1817
1818static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1819 struct hist_entry *a, struct hist_entry *b)
1820{
1821 struct hpp_dynamic_entry *hde;
1822 struct format_field *field;
1823 unsigned offset, size;
1824
1825 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1826
1827 if (b == NULL) {
1828 update_dynamic_len(hde, a);
1829 return 0;
1830 }
1831
1832 field = hde->field;
1833 if (field->flags & FIELD_IS_DYNAMIC) {
1834 unsigned long long dyn;
1835
1836 pevent_read_number_field(field, a->raw_data, &dyn);
1837 offset = dyn & 0xffff;
1838 size = (dyn >> 16) & 0xffff;
1839
1840 /* record max width for output */
1841 if (size > hde->dynamic_len)
1842 hde->dynamic_len = size;
1843 } else {
1844 offset = field->offset;
1845 size = field->size;
1846 }
1847
1848 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1849}
1850
1851bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1852{
1853 return fmt->cmp == __sort__hde_cmp;
1854}
1855
1856static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1857{
1858 struct hpp_dynamic_entry *hde_a;
1859 struct hpp_dynamic_entry *hde_b;
1860
1861 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1862 return false;
1863
1864 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1865 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1866
1867 return hde_a->field == hde_b->field;
1868}
1869
1870static void hde_free(struct perf_hpp_fmt *fmt)
1871{
1872 struct hpp_dynamic_entry *hde;
1873
1874 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1875 free(hde);
1876}
1877
1878static struct hpp_dynamic_entry *
1879__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
1880 int level)
1881{
1882 struct hpp_dynamic_entry *hde;
1883
1884 hde = malloc(sizeof(*hde));
1885 if (hde == NULL) {
1886 pr_debug("Memory allocation failed\n");
1887 return NULL;
1888 }
1889
1890 hde->evsel = evsel;
1891 hde->field = field;
1892 hde->dynamic_len = 0;
1893
1894 hde->hpp.name = field->name;
1895 hde->hpp.header = __sort__hde_header;
1896 hde->hpp.width = __sort__hde_width;
1897 hde->hpp.entry = __sort__hde_entry;
1898 hde->hpp.color = NULL;
1899
1900 hde->hpp.cmp = __sort__hde_cmp;
1901 hde->hpp.collapse = __sort__hde_cmp;
1902 hde->hpp.sort = __sort__hde_cmp;
1903 hde->hpp.equal = __sort__hde_equal;
1904 hde->hpp.free = hde_free;
1905
1906 INIT_LIST_HEAD(&hde->hpp.list);
1907 INIT_LIST_HEAD(&hde->hpp.sort_list);
1908 hde->hpp.elide = false;
1909 hde->hpp.len = 0;
1910 hde->hpp.user_len = 0;
1911 hde->hpp.level = level;
1912
1913 return hde;
1914}
1915
1916struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
1917{
1918 struct perf_hpp_fmt *new_fmt = NULL;
1919
1920 if (perf_hpp__is_sort_entry(fmt)) {
1921 struct hpp_sort_entry *hse, *new_hse;
1922
1923 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1924 new_hse = memdup(hse, sizeof(*hse));
1925 if (new_hse)
1926 new_fmt = &new_hse->hpp;
1927 } else if (perf_hpp__is_dynamic_entry(fmt)) {
1928 struct hpp_dynamic_entry *hde, *new_hde;
1929
1930 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1931 new_hde = memdup(hde, sizeof(*hde));
1932 if (new_hde)
1933 new_fmt = &new_hde->hpp;
1934 } else {
1935 new_fmt = memdup(fmt, sizeof(*fmt));
1936 }
1937
1938 INIT_LIST_HEAD(&new_fmt->list);
1939 INIT_LIST_HEAD(&new_fmt->sort_list);
1940
1941 return new_fmt;
1942}
1943
1944static int parse_field_name(char *str, char **event, char **field, char **opt)
1945{
1946 char *event_name, *field_name, *opt_name;
1947
1948 event_name = str;
1949 field_name = strchr(str, '.');
1950
1951 if (field_name) {
1952 *field_name++ = '\0';
1953 } else {
1954 event_name = NULL;
1955 field_name = str;
1956 }
1957
1958 opt_name = strchr(field_name, '/');
1959 if (opt_name)
1960 *opt_name++ = '\0';
1961
1962 *event = event_name;
1963 *field = field_name;
1964 *opt = opt_name;
1965
1966 return 0;
1967}
1968
1969/* find match evsel using a given event name. The event name can be:
1970 * 1. '%' + event index (e.g. '%1' for first event)
1971 * 2. full event name (e.g. sched:sched_switch)
1972 * 3. partial event name (should not contain ':')
1973 */
1974static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1975{
1976 struct perf_evsel *evsel = NULL;
1977 struct perf_evsel *pos;
1978 bool full_name;
1979
1980 /* case 1 */
1981 if (event_name[0] == '%') {
1982 int nr = strtol(event_name+1, NULL, 0);
1983
1984 if (nr > evlist->nr_entries)
1985 return NULL;
1986
1987 evsel = perf_evlist__first(evlist);
1988 while (--nr > 0)
1989 evsel = perf_evsel__next(evsel);
1990
1991 return evsel;
1992 }
1993
1994 full_name = !!strchr(event_name, ':');
1995 evlist__for_each(evlist, pos) {
1996 /* case 2 */
1997 if (full_name && !strcmp(pos->name, event_name))
1998 return pos;
1999 /* case 3 */
2000 if (!full_name && strstr(pos->name, event_name)) {
2001 if (evsel) {
2002 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2003 event_name, evsel->name, pos->name);
2004 return NULL;
2005 }
2006 evsel = pos;
2007 }
2008 }
2009
2010 return evsel;
2011}
2012
2013static int __dynamic_dimension__add(struct perf_evsel *evsel,
2014 struct format_field *field,
2015 bool raw_trace, int level)
2016{
2017 struct hpp_dynamic_entry *hde;
2018
2019 hde = __alloc_dynamic_entry(evsel, field, level);
2020 if (hde == NULL)
2021 return -ENOMEM;
2022
2023 hde->raw_trace = raw_trace;
2024
2025 perf_hpp__register_sort_field(&hde->hpp);
2026 return 0;
2027}
2028
2029static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2030{
2031 int ret;
2032 struct format_field *field;
2033
2034 field = evsel->tp_format->format.fields;
2035 while (field) {
2036 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2037 if (ret < 0)
2038 return ret;
2039
2040 field = field->next;
2041 }
2042 return 0;
2043}
2044
2045static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2046 int level)
2047{
2048 int ret;
2049 struct perf_evsel *evsel;
2050
2051 evlist__for_each(evlist, evsel) {
2052 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2053 continue;
2054
2055 ret = add_evsel_fields(evsel, raw_trace, level);
2056 if (ret < 0)
2057 return ret;
2058 }
2059 return 0;
2060}
2061
2062static int add_all_matching_fields(struct perf_evlist *evlist,
2063 char *field_name, bool raw_trace, int level)
2064{
2065 int ret = -ESRCH;
2066 struct perf_evsel *evsel;
2067 struct format_field *field;
2068
2069 evlist__for_each(evlist, evsel) {
2070 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2071 continue;
2072
2073 field = pevent_find_any_field(evsel->tp_format, field_name);
2074 if (field == NULL)
2075 continue;
2076
2077 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2078 if (ret < 0)
2079 break;
2080 }
2081 return ret;
2082}
2083
2084static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2085 int level)
2086{
2087 char *str, *event_name, *field_name, *opt_name;
2088 struct perf_evsel *evsel;
2089 struct format_field *field;
2090 bool raw_trace = symbol_conf.raw_trace;
2091 int ret = 0;
2092
2093 if (evlist == NULL)
2094 return -ENOENT;
2095
2096 str = strdup(tok);
2097 if (str == NULL)
2098 return -ENOMEM;
2099
2100 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2101 ret = -EINVAL;
2102 goto out;
2103 }
2104
2105 if (opt_name) {
2106 if (strcmp(opt_name, "raw")) {
2107 pr_debug("unsupported field option %s\n", opt_name);
2108 ret = -EINVAL;
2109 goto out;
2110 }
2111 raw_trace = true;
2112 }
2113
2114 if (!strcmp(field_name, "trace_fields")) {
2115 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2116 goto out;
2117 }
2118
2119 if (event_name == NULL) {
2120 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2121 goto out;
2122 }
2123
2124 evsel = find_evsel(evlist, event_name);
2125 if (evsel == NULL) {
2126 pr_debug("Cannot find event: %s\n", event_name);
2127 ret = -ENOENT;
2128 goto out;
2129 }
2130
2131 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2132 pr_debug("%s is not a tracepoint event\n", event_name);
2133 ret = -EINVAL;
2134 goto out;
2135 }
2136
2137 if (!strcmp(field_name, "*")) {
2138 ret = add_evsel_fields(evsel, raw_trace, level);
2139 } else {
2140 field = pevent_find_any_field(evsel->tp_format, field_name);
2141 if (field == NULL) {
2142 pr_debug("Cannot find event field for %s.%s\n",
2143 event_name, field_name);
2144 return -ENOENT;
2145 }
2146
2147 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2148 }
2149
2150out:
2151 free(str);
2152 return ret;
2153}
2154
2155static int __sort_dimension__add(struct sort_dimension *sd,
2156 struct perf_hpp_list *list,
2157 int level)
2158{
2159 if (sd->taken)
2160 return 0;
2161
2162 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2163 return -1;
2164
2165 if (sd->entry->se_collapse)
2166 sort__need_collapse = 1;
2167
2168 sd->taken = 1;
2169
2170 return 0;
2171}
2172
2173static int __hpp_dimension__add(struct hpp_dimension *hd,
2174 struct perf_hpp_list *list,
2175 int level)
2176{
2177 struct perf_hpp_fmt *fmt;
2178
2179 if (hd->taken)
2180 return 0;
2181
2182 fmt = __hpp_dimension__alloc_hpp(hd, level);
2183 if (!fmt)
2184 return -1;
2185
2186 hd->taken = 1;
2187 perf_hpp_list__register_sort_field(list, fmt);
2188 return 0;
2189}
2190
2191static int __sort_dimension__add_output(struct perf_hpp_list *list,
2192 struct sort_dimension *sd)
2193{
2194 if (sd->taken)
2195 return 0;
2196
2197 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2198 return -1;
2199
2200 sd->taken = 1;
2201 return 0;
2202}
2203
2204static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2205 struct hpp_dimension *hd)
2206{
2207 struct perf_hpp_fmt *fmt;
2208
2209 if (hd->taken)
2210 return 0;
2211
2212 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2213 if (!fmt)
2214 return -1;
2215
2216 hd->taken = 1;
2217 perf_hpp_list__column_register(list, fmt);
2218 return 0;
2219}
2220
2221int hpp_dimension__add_output(unsigned col)
2222{
2223 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2224 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2225}
2226
2227static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2228 struct perf_evlist *evlist,
2229 int level)
2230{
2231 unsigned int i;
2232
2233 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2234 struct sort_dimension *sd = &common_sort_dimensions[i];
2235
2236 if (strncasecmp(tok, sd->name, strlen(tok)))
2237 continue;
2238
2239 if (sd->entry == &sort_parent) {
2240 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2241 if (ret) {
2242 char err[BUFSIZ];
2243
2244 regerror(ret, &parent_regex, err, sizeof(err));
2245 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2246 return -EINVAL;
2247 }
2248 sort__has_parent = 1;
2249 } else if (sd->entry == &sort_sym) {
2250 sort__has_sym = 1;
2251 /*
2252 * perf diff displays the performance difference amongst
2253 * two or more perf.data files. Those files could come
2254 * from different binaries. So we should not compare
2255 * their ips, but the name of symbol.
2256 */
2257 if (sort__mode == SORT_MODE__DIFF)
2258 sd->entry->se_collapse = sort__sym_sort;
2259
2260 } else if (sd->entry == &sort_dso) {
2261 sort__has_dso = 1;
2262 } else if (sd->entry == &sort_socket) {
2263 sort__has_socket = 1;
2264 } else if (sd->entry == &sort_thread) {
2265 sort__has_thread = 1;
2266 } else if (sd->entry == &sort_comm) {
2267 sort__has_comm = 1;
2268 }
2269
2270 return __sort_dimension__add(sd, list, level);
2271 }
2272
2273 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2274 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2275
2276 if (strncasecmp(tok, hd->name, strlen(tok)))
2277 continue;
2278
2279 return __hpp_dimension__add(hd, list, level);
2280 }
2281
2282 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2283 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2284
2285 if (strncasecmp(tok, sd->name, strlen(tok)))
2286 continue;
2287
2288 if (sort__mode != SORT_MODE__BRANCH)
2289 return -EINVAL;
2290
2291 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2292 sort__has_sym = 1;
2293
2294 __sort_dimension__add(sd, list, level);
2295 return 0;
2296 }
2297
2298 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2299 struct sort_dimension *sd = &memory_sort_dimensions[i];
2300
2301 if (strncasecmp(tok, sd->name, strlen(tok)))
2302 continue;
2303
2304 if (sort__mode != SORT_MODE__MEMORY)
2305 return -EINVAL;
2306
2307 if (sd->entry == &sort_mem_daddr_sym)
2308 sort__has_sym = 1;
2309
2310 __sort_dimension__add(sd, list, level);
2311 return 0;
2312 }
2313
2314 if (!add_dynamic_entry(evlist, tok, level))
2315 return 0;
2316
2317 return -ESRCH;
2318}
2319
2320static int setup_sort_list(struct perf_hpp_list *list, char *str,
2321 struct perf_evlist *evlist)
2322{
2323 char *tmp, *tok;
2324 int ret = 0;
2325 int level = 0;
2326 int next_level = 1;
2327 bool in_group = false;
2328
2329 do {
2330 tok = str;
2331 tmp = strpbrk(str, "{}, ");
2332 if (tmp) {
2333 if (in_group)
2334 next_level = level;
2335 else
2336 next_level = level + 1;
2337
2338 if (*tmp == '{')
2339 in_group = true;
2340 else if (*tmp == '}')
2341 in_group = false;
2342
2343 *tmp = '\0';
2344 str = tmp + 1;
2345 }
2346
2347 if (*tok) {
2348 ret = sort_dimension__add(list, tok, evlist, level);
2349 if (ret == -EINVAL) {
2350 error("Invalid --sort key: `%s'", tok);
2351 break;
2352 } else if (ret == -ESRCH) {
2353 error("Unknown --sort key: `%s'", tok);
2354 break;
2355 }
2356 }
2357
2358 level = next_level;
2359 } while (tmp);
2360
2361 return ret;
2362}
2363
2364static const char *get_default_sort_order(struct perf_evlist *evlist)
2365{
2366 const char *default_sort_orders[] = {
2367 default_sort_order,
2368 default_branch_sort_order,
2369 default_mem_sort_order,
2370 default_top_sort_order,
2371 default_diff_sort_order,
2372 default_tracepoint_sort_order,
2373 };
2374 bool use_trace = true;
2375 struct perf_evsel *evsel;
2376
2377 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2378
2379 if (evlist == NULL)
2380 goto out_no_evlist;
2381
2382 evlist__for_each(evlist, evsel) {
2383 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2384 use_trace = false;
2385 break;
2386 }
2387 }
2388
2389 if (use_trace) {
2390 sort__mode = SORT_MODE__TRACEPOINT;
2391 if (symbol_conf.raw_trace)
2392 return "trace_fields";
2393 }
2394out_no_evlist:
2395 return default_sort_orders[sort__mode];
2396}
2397
2398static int setup_sort_order(struct perf_evlist *evlist)
2399{
2400 char *new_sort_order;
2401
2402 /*
2403 * Append '+'-prefixed sort order to the default sort
2404 * order string.
2405 */
2406 if (!sort_order || is_strict_order(sort_order))
2407 return 0;
2408
2409 if (sort_order[1] == '\0') {
2410 error("Invalid --sort key: `+'");
2411 return -EINVAL;
2412 }
2413
2414 /*
2415 * We allocate new sort_order string, but we never free it,
2416 * because it's checked over the rest of the code.
2417 */
2418 if (asprintf(&new_sort_order, "%s,%s",
2419 get_default_sort_order(evlist), sort_order + 1) < 0) {
2420 error("Not enough memory to set up --sort");
2421 return -ENOMEM;
2422 }
2423
2424 sort_order = new_sort_order;
2425 return 0;
2426}
2427
2428/*
2429 * Adds 'pre,' prefix into 'str' is 'pre' is
2430 * not already part of 'str'.
2431 */
2432static char *prefix_if_not_in(const char *pre, char *str)
2433{
2434 char *n;
2435
2436 if (!str || strstr(str, pre))
2437 return str;
2438
2439 if (asprintf(&n, "%s,%s", pre, str) < 0)
2440 return NULL;
2441
2442 free(str);
2443 return n;
2444}
2445
2446static char *setup_overhead(char *keys)
2447{
2448 if (sort__mode == SORT_MODE__DIFF)
2449 return keys;
2450
2451 keys = prefix_if_not_in("overhead", keys);
2452
2453 if (symbol_conf.cumulate_callchain)
2454 keys = prefix_if_not_in("overhead_children", keys);
2455
2456 return keys;
2457}
2458
2459static int __setup_sorting(struct perf_evlist *evlist)
2460{
2461 char *str;
2462 const char *sort_keys;
2463 int ret = 0;
2464
2465 ret = setup_sort_order(evlist);
2466 if (ret)
2467 return ret;
2468
2469 sort_keys = sort_order;
2470 if (sort_keys == NULL) {
2471 if (is_strict_order(field_order)) {
2472 /*
2473 * If user specified field order but no sort order,
2474 * we'll honor it and not add default sort orders.
2475 */
2476 return 0;
2477 }
2478
2479 sort_keys = get_default_sort_order(evlist);
2480 }
2481
2482 str = strdup(sort_keys);
2483 if (str == NULL) {
2484 error("Not enough memory to setup sort keys");
2485 return -ENOMEM;
2486 }
2487
2488 /*
2489 * Prepend overhead fields for backward compatibility.
2490 */
2491 if (!is_strict_order(field_order)) {
2492 str = setup_overhead(str);
2493 if (str == NULL) {
2494 error("Not enough memory to setup overhead keys");
2495 return -ENOMEM;
2496 }
2497 }
2498
2499 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2500
2501 free(str);
2502 return ret;
2503}
2504
2505void perf_hpp__set_elide(int idx, bool elide)
2506{
2507 struct perf_hpp_fmt *fmt;
2508 struct hpp_sort_entry *hse;
2509
2510 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2511 if (!perf_hpp__is_sort_entry(fmt))
2512 continue;
2513
2514 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2515 if (hse->se->se_width_idx == idx) {
2516 fmt->elide = elide;
2517 break;
2518 }
2519 }
2520}
2521
2522static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2523{
2524 if (list && strlist__nr_entries(list) == 1) {
2525 if (fp != NULL)
2526 fprintf(fp, "# %s: %s\n", list_name,
2527 strlist__entry(list, 0)->s);
2528 return true;
2529 }
2530 return false;
2531}
2532
2533static bool get_elide(int idx, FILE *output)
2534{
2535 switch (idx) {
2536 case HISTC_SYMBOL:
2537 return __get_elide(symbol_conf.sym_list, "symbol", output);
2538 case HISTC_DSO:
2539 return __get_elide(symbol_conf.dso_list, "dso", output);
2540 case HISTC_COMM:
2541 return __get_elide(symbol_conf.comm_list, "comm", output);
2542 default:
2543 break;
2544 }
2545
2546 if (sort__mode != SORT_MODE__BRANCH)
2547 return false;
2548
2549 switch (idx) {
2550 case HISTC_SYMBOL_FROM:
2551 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2552 case HISTC_SYMBOL_TO:
2553 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2554 case HISTC_DSO_FROM:
2555 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2556 case HISTC_DSO_TO:
2557 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2558 default:
2559 break;
2560 }
2561
2562 return false;
2563}
2564
2565void sort__setup_elide(FILE *output)
2566{
2567 struct perf_hpp_fmt *fmt;
2568 struct hpp_sort_entry *hse;
2569
2570 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2571 if (!perf_hpp__is_sort_entry(fmt))
2572 continue;
2573
2574 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2575 fmt->elide = get_elide(hse->se->se_width_idx, output);
2576 }
2577
2578 /*
2579 * It makes no sense to elide all of sort entries.
2580 * Just revert them to show up again.
2581 */
2582 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2583 if (!perf_hpp__is_sort_entry(fmt))
2584 continue;
2585
2586 if (!fmt->elide)
2587 return;
2588 }
2589
2590 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2591 if (!perf_hpp__is_sort_entry(fmt))
2592 continue;
2593
2594 fmt->elide = false;
2595 }
2596}
2597
2598static int output_field_add(struct perf_hpp_list *list, char *tok)
2599{
2600 unsigned int i;
2601
2602 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2603 struct sort_dimension *sd = &common_sort_dimensions[i];
2604
2605 if (strncasecmp(tok, sd->name, strlen(tok)))
2606 continue;
2607
2608 return __sort_dimension__add_output(list, sd);
2609 }
2610
2611 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2612 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2613
2614 if (strncasecmp(tok, hd->name, strlen(tok)))
2615 continue;
2616
2617 return __hpp_dimension__add_output(list, hd);
2618 }
2619
2620 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2621 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2622
2623 if (strncasecmp(tok, sd->name, strlen(tok)))
2624 continue;
2625
2626 return __sort_dimension__add_output(list, sd);
2627 }
2628
2629 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2630 struct sort_dimension *sd = &memory_sort_dimensions[i];
2631
2632 if (strncasecmp(tok, sd->name, strlen(tok)))
2633 continue;
2634
2635 return __sort_dimension__add_output(list, sd);
2636 }
2637
2638 return -ESRCH;
2639}
2640
2641static int setup_output_list(struct perf_hpp_list *list, char *str)
2642{
2643 char *tmp, *tok;
2644 int ret = 0;
2645
2646 for (tok = strtok_r(str, ", ", &tmp);
2647 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2648 ret = output_field_add(list, tok);
2649 if (ret == -EINVAL) {
2650 error("Invalid --fields key: `%s'", tok);
2651 break;
2652 } else if (ret == -ESRCH) {
2653 error("Unknown --fields key: `%s'", tok);
2654 break;
2655 }
2656 }
2657
2658 return ret;
2659}
2660
2661static void reset_dimensions(void)
2662{
2663 unsigned int i;
2664
2665 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2666 common_sort_dimensions[i].taken = 0;
2667
2668 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2669 hpp_sort_dimensions[i].taken = 0;
2670
2671 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2672 bstack_sort_dimensions[i].taken = 0;
2673
2674 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2675 memory_sort_dimensions[i].taken = 0;
2676}
2677
2678bool is_strict_order(const char *order)
2679{
2680 return order && (*order != '+');
2681}
2682
2683static int __setup_output_field(void)
2684{
2685 char *str, *strp;
2686 int ret = -EINVAL;
2687
2688 if (field_order == NULL)
2689 return 0;
2690
2691 strp = str = strdup(field_order);
2692 if (str == NULL) {
2693 error("Not enough memory to setup output fields");
2694 return -ENOMEM;
2695 }
2696
2697 if (!is_strict_order(field_order))
2698 strp++;
2699
2700 if (!strlen(strp)) {
2701 error("Invalid --fields key: `+'");
2702 goto out;
2703 }
2704
2705 ret = setup_output_list(&perf_hpp_list, strp);
2706
2707out:
2708 free(str);
2709 return ret;
2710}
2711
2712int setup_sorting(struct perf_evlist *evlist)
2713{
2714 int err;
2715
2716 err = __setup_sorting(evlist);
2717 if (err < 0)
2718 return err;
2719
2720 if (parent_pattern != default_parent_pattern) {
2721 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2722 if (err < 0)
2723 return err;
2724 }
2725
2726 reset_dimensions();
2727
2728 /*
2729 * perf diff doesn't use default hpp output fields.
2730 */
2731 if (sort__mode != SORT_MODE__DIFF)
2732 perf_hpp__init();
2733
2734 err = __setup_output_field();
2735 if (err < 0)
2736 return err;
2737
2738 /* copy sort keys to output fields */
2739 perf_hpp__setup_output_field(&perf_hpp_list);
2740 /* and then copy output fields to sort keys */
2741 perf_hpp__append_sort_keys(&perf_hpp_list);
2742
2743 /* setup hists-specific output fields */
2744 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2745 return -1;
2746
2747 return 0;
2748}
2749
2750void reset_output_field(void)
2751{
2752 sort__need_collapse = 0;
2753 sort__has_parent = 0;
2754 sort__has_sym = 0;
2755 sort__has_dso = 0;
2756
2757 field_order = NULL;
2758 sort_order = NULL;
2759
2760 reset_dimensions();
2761 perf_hpp__reset_output_field(&perf_hpp_list);
2762}
This page took 0.033445 seconds and 5 git commands to generate.