perf hists: Add 'equal' method to perf_hpp_fmt struct
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9
10 regex_t parent_regex;
11 const char default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char *parent_pattern = default_parent_pattern;
13 const char default_sort_order[] = "comm,dso,symbol";
14 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order[] = "dso,symbol";
17 const char default_diff_sort_order[] = "dso,symbol";
18 const char default_tracepoint_sort_order[] = "trace";
19 const char *sort_order;
20 const char *field_order;
21 regex_t ignore_callees_regex;
22 int have_ignore_callees = 0;
23 int sort__need_collapse = 0;
24 int sort__has_parent = 0;
25 int sort__has_sym = 0;
26 int sort__has_dso = 0;
27 int sort__has_socket = 0;
28 int sort__has_thread = 0;
29 enum sort_mode sort__mode = SORT_MODE__NORMAL;
30
31
32 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
33 {
34 int n;
35 va_list ap;
36
37 va_start(ap, fmt);
38 n = vsnprintf(bf, size, fmt, ap);
39 if (symbol_conf.field_sep && n > 0) {
40 char *sep = bf;
41
42 while (1) {
43 sep = strchr(sep, *symbol_conf.field_sep);
44 if (sep == NULL)
45 break;
46 *sep = '.';
47 }
48 }
49 va_end(ap);
50
51 if (n >= (int)size)
52 return size - 1;
53 return n;
54 }
55
56 static int64_t cmp_null(const void *l, const void *r)
57 {
58 if (!l && !r)
59 return 0;
60 else if (!l)
61 return -1;
62 else
63 return 1;
64 }
65
66 /* --sort pid */
67
68 static int64_t
69 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
70 {
71 return right->thread->tid - left->thread->tid;
72 }
73
74 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
75 size_t size, unsigned int width)
76 {
77 const char *comm = thread__comm_str(he->thread);
78
79 width = max(7U, width) - 6;
80 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
81 width, width, comm ?: "");
82 }
83
84 struct sort_entry sort_thread = {
85 .se_header = " Pid:Command",
86 .se_cmp = sort__thread_cmp,
87 .se_snprintf = hist_entry__thread_snprintf,
88 .se_width_idx = HISTC_THREAD,
89 };
90
91 /* --sort comm */
92
93 static int64_t
94 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
95 {
96 /* Compare the addr that should be unique among comm */
97 return strcmp(comm__str(right->comm), comm__str(left->comm));
98 }
99
100 static int64_t
101 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
102 {
103 /* Compare the addr that should be unique among comm */
104 return strcmp(comm__str(right->comm), comm__str(left->comm));
105 }
106
107 static int64_t
108 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
109 {
110 return strcmp(comm__str(right->comm), comm__str(left->comm));
111 }
112
113 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
114 size_t size, unsigned int width)
115 {
116 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
117 }
118
119 struct sort_entry sort_comm = {
120 .se_header = "Command",
121 .se_cmp = sort__comm_cmp,
122 .se_collapse = sort__comm_collapse,
123 .se_sort = sort__comm_sort,
124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126 };
127
128 /* --sort dso */
129
130 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
131 {
132 struct dso *dso_l = map_l ? map_l->dso : NULL;
133 struct dso *dso_r = map_r ? map_r->dso : NULL;
134 const char *dso_name_l, *dso_name_r;
135
136 if (!dso_l || !dso_r)
137 return cmp_null(dso_r, dso_l);
138
139 if (verbose) {
140 dso_name_l = dso_l->long_name;
141 dso_name_r = dso_r->long_name;
142 } else {
143 dso_name_l = dso_l->short_name;
144 dso_name_r = dso_r->short_name;
145 }
146
147 return strcmp(dso_name_l, dso_name_r);
148 }
149
150 static int64_t
151 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
152 {
153 return _sort__dso_cmp(right->ms.map, left->ms.map);
154 }
155
156 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
157 size_t size, unsigned int width)
158 {
159 if (map && map->dso) {
160 const char *dso_name = !verbose ? map->dso->short_name :
161 map->dso->long_name;
162 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
163 }
164
165 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
166 }
167
168 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
169 size_t size, unsigned int width)
170 {
171 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
172 }
173
174 struct sort_entry sort_dso = {
175 .se_header = "Shared Object",
176 .se_cmp = sort__dso_cmp,
177 .se_snprintf = hist_entry__dso_snprintf,
178 .se_width_idx = HISTC_DSO,
179 };
180
181 /* --sort symbol */
182
183 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
184 {
185 return (int64_t)(right_ip - left_ip);
186 }
187
188 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
189 {
190 if (!sym_l || !sym_r)
191 return cmp_null(sym_l, sym_r);
192
193 if (sym_l == sym_r)
194 return 0;
195
196 if (sym_l->start != sym_r->start)
197 return (int64_t)(sym_r->start - sym_l->start);
198
199 return (int64_t)(sym_r->end - sym_l->end);
200 }
201
202 static int64_t
203 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204 {
205 int64_t ret;
206
207 if (!left->ms.sym && !right->ms.sym)
208 return _sort__addr_cmp(left->ip, right->ip);
209
210 /*
211 * comparing symbol address alone is not enough since it's a
212 * relative address within a dso.
213 */
214 if (!sort__has_dso) {
215 ret = sort__dso_cmp(left, right);
216 if (ret != 0)
217 return ret;
218 }
219
220 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
221 }
222
223 static int64_t
224 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
225 {
226 if (!left->ms.sym || !right->ms.sym)
227 return cmp_null(left->ms.sym, right->ms.sym);
228
229 return strcmp(right->ms.sym->name, left->ms.sym->name);
230 }
231
232 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
233 u64 ip, char level, char *bf, size_t size,
234 unsigned int width)
235 {
236 size_t ret = 0;
237
238 if (verbose) {
239 char o = map ? dso__symtab_origin(map->dso) : '!';
240 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
241 BITS_PER_LONG / 4 + 2, ip, o);
242 }
243
244 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
245 if (sym && map) {
246 if (map->type == MAP__VARIABLE) {
247 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
248 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
249 ip - map->unmap_ip(map, sym->start));
250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 width - ret, "");
252 } else {
253 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
254 width - ret,
255 sym->name);
256 }
257 } else {
258 size_t len = BITS_PER_LONG / 4;
259 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
260 len, ip);
261 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
262 width - ret, "");
263 }
264
265 if (ret > width)
266 bf[width] = '\0';
267
268 return width;
269 }
270
271 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
272 size_t size, unsigned int width)
273 {
274 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
275 he->level, bf, size, width);
276 }
277
278 struct sort_entry sort_sym = {
279 .se_header = "Symbol",
280 .se_cmp = sort__sym_cmp,
281 .se_sort = sort__sym_sort,
282 .se_snprintf = hist_entry__sym_snprintf,
283 .se_width_idx = HISTC_SYMBOL,
284 };
285
286 /* --sort srcline */
287
288 static int64_t
289 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
290 {
291 if (!left->srcline) {
292 if (!left->ms.map)
293 left->srcline = SRCLINE_UNKNOWN;
294 else {
295 struct map *map = left->ms.map;
296 left->srcline = get_srcline(map->dso,
297 map__rip_2objdump(map, left->ip),
298 left->ms.sym, true);
299 }
300 }
301 if (!right->srcline) {
302 if (!right->ms.map)
303 right->srcline = SRCLINE_UNKNOWN;
304 else {
305 struct map *map = right->ms.map;
306 right->srcline = get_srcline(map->dso,
307 map__rip_2objdump(map, right->ip),
308 right->ms.sym, true);
309 }
310 }
311 return strcmp(right->srcline, left->srcline);
312 }
313
314 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
315 size_t size, unsigned int width)
316 {
317 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
318 }
319
320 struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325 };
326
327 /* --sort srcfile */
328
329 static char no_srcfile[1];
330
331 static char *get_srcfile(struct hist_entry *e)
332 {
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
336 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
337 e->ms.sym, false, true);
338 if (!strcmp(sf, SRCLINE_UNKNOWN))
339 return no_srcfile;
340 p = strchr(sf, ':');
341 if (p && *sf) {
342 *p = 0;
343 return sf;
344 }
345 free(sf);
346 return no_srcfile;
347 }
348
349 static int64_t
350 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
351 {
352 if (!left->srcfile) {
353 if (!left->ms.map)
354 left->srcfile = no_srcfile;
355 else
356 left->srcfile = get_srcfile(left);
357 }
358 if (!right->srcfile) {
359 if (!right->ms.map)
360 right->srcfile = no_srcfile;
361 else
362 right->srcfile = get_srcfile(right);
363 }
364 return strcmp(right->srcfile, left->srcfile);
365 }
366
367 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
368 size_t size, unsigned int width)
369 {
370 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371 }
372
373 struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378 };
379
380 /* --sort parent */
381
382 static int64_t
383 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384 {
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
391 return strcmp(sym_r->name, sym_l->name);
392 }
393
394 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
395 size_t size, unsigned int width)
396 {
397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
398 he->parent ? he->parent->name : "[other]");
399 }
400
401 struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406 };
407
408 /* --sort cpu */
409
410 static int64_t
411 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412 {
413 return right->cpu - left->cpu;
414 }
415
416 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
418 {
419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
420 }
421
422 struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427 };
428
429 /* --sort socket */
430
431 static int64_t
432 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433 {
434 return right->socket - left->socket;
435 }
436
437 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439 {
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441 }
442
443 struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448 };
449
450 /* --sort trace */
451
452 static char *get_trace_output(struct hist_entry *he)
453 {
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
470 return seq.buffer;
471 }
472
473 static int64_t
474 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475 {
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
488 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
489
490 return strcmp(right->trace_output, left->trace_output);
491 }
492
493 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
494 size_t size, unsigned int width)
495 {
496 struct perf_evsel *evsel;
497
498 evsel = hists_to_evsel(he->hists);
499 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
500 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
501
502 if (he->trace_output == NULL)
503 he->trace_output = get_trace_output(he);
504 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
505 }
506
507 struct sort_entry sort_trace = {
508 .se_header = "Trace output",
509 .se_cmp = sort__trace_cmp,
510 .se_snprintf = hist_entry__trace_snprintf,
511 .se_width_idx = HISTC_TRACE,
512 };
513
514 /* sort keys for branch stacks */
515
516 static int64_t
517 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
518 {
519 if (!left->branch_info || !right->branch_info)
520 return cmp_null(left->branch_info, right->branch_info);
521
522 return _sort__dso_cmp(left->branch_info->from.map,
523 right->branch_info->from.map);
524 }
525
526 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
527 size_t size, unsigned int width)
528 {
529 if (he->branch_info)
530 return _hist_entry__dso_snprintf(he->branch_info->from.map,
531 bf, size, width);
532 else
533 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
534 }
535
536 static int64_t
537 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
538 {
539 if (!left->branch_info || !right->branch_info)
540 return cmp_null(left->branch_info, right->branch_info);
541
542 return _sort__dso_cmp(left->branch_info->to.map,
543 right->branch_info->to.map);
544 }
545
546 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
547 size_t size, unsigned int width)
548 {
549 if (he->branch_info)
550 return _hist_entry__dso_snprintf(he->branch_info->to.map,
551 bf, size, width);
552 else
553 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
554 }
555
556 static int64_t
557 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
558 {
559 struct addr_map_symbol *from_l = &left->branch_info->from;
560 struct addr_map_symbol *from_r = &right->branch_info->from;
561
562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 from_l = &left->branch_info->from;
566 from_r = &right->branch_info->from;
567
568 if (!from_l->sym && !from_r->sym)
569 return _sort__addr_cmp(from_l->addr, from_r->addr);
570
571 return _sort__sym_cmp(from_l->sym, from_r->sym);
572 }
573
574 static int64_t
575 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
576 {
577 struct addr_map_symbol *to_l, *to_r;
578
579 if (!left->branch_info || !right->branch_info)
580 return cmp_null(left->branch_info, right->branch_info);
581
582 to_l = &left->branch_info->to;
583 to_r = &right->branch_info->to;
584
585 if (!to_l->sym && !to_r->sym)
586 return _sort__addr_cmp(to_l->addr, to_r->addr);
587
588 return _sort__sym_cmp(to_l->sym, to_r->sym);
589 }
590
591 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
592 size_t size, unsigned int width)
593 {
594 if (he->branch_info) {
595 struct addr_map_symbol *from = &he->branch_info->from;
596
597 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
598 he->level, bf, size, width);
599 }
600
601 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
602 }
603
604 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
605 size_t size, unsigned int width)
606 {
607 if (he->branch_info) {
608 struct addr_map_symbol *to = &he->branch_info->to;
609
610 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
611 he->level, bf, size, width);
612 }
613
614 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
615 }
616
617 struct sort_entry sort_dso_from = {
618 .se_header = "Source Shared Object",
619 .se_cmp = sort__dso_from_cmp,
620 .se_snprintf = hist_entry__dso_from_snprintf,
621 .se_width_idx = HISTC_DSO_FROM,
622 };
623
624 struct sort_entry sort_dso_to = {
625 .se_header = "Target Shared Object",
626 .se_cmp = sort__dso_to_cmp,
627 .se_snprintf = hist_entry__dso_to_snprintf,
628 .se_width_idx = HISTC_DSO_TO,
629 };
630
631 struct sort_entry sort_sym_from = {
632 .se_header = "Source Symbol",
633 .se_cmp = sort__sym_from_cmp,
634 .se_snprintf = hist_entry__sym_from_snprintf,
635 .se_width_idx = HISTC_SYMBOL_FROM,
636 };
637
638 struct sort_entry sort_sym_to = {
639 .se_header = "Target Symbol",
640 .se_cmp = sort__sym_to_cmp,
641 .se_snprintf = hist_entry__sym_to_snprintf,
642 .se_width_idx = HISTC_SYMBOL_TO,
643 };
644
645 static int64_t
646 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
647 {
648 unsigned char mp, p;
649
650 if (!left->branch_info || !right->branch_info)
651 return cmp_null(left->branch_info, right->branch_info);
652
653 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
654 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
655 return mp || p;
656 }
657
658 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
659 size_t size, unsigned int width){
660 static const char *out = "N/A";
661
662 if (he->branch_info) {
663 if (he->branch_info->flags.predicted)
664 out = "N";
665 else if (he->branch_info->flags.mispred)
666 out = "Y";
667 }
668
669 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
670 }
671
672 static int64_t
673 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
674 {
675 return left->branch_info->flags.cycles -
676 right->branch_info->flags.cycles;
677 }
678
679 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681 {
682 if (he->branch_info->flags.cycles == 0)
683 return repsep_snprintf(bf, size, "%-*s", width, "-");
684 return repsep_snprintf(bf, size, "%-*hd", width,
685 he->branch_info->flags.cycles);
686 }
687
688 struct sort_entry sort_cycles = {
689 .se_header = "Basic Block Cycles",
690 .se_cmp = sort__cycles_cmp,
691 .se_snprintf = hist_entry__cycles_snprintf,
692 .se_width_idx = HISTC_CYCLES,
693 };
694
695 /* --sort daddr_sym */
696 static int64_t
697 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
698 {
699 uint64_t l = 0, r = 0;
700
701 if (left->mem_info)
702 l = left->mem_info->daddr.addr;
703 if (right->mem_info)
704 r = right->mem_info->daddr.addr;
705
706 return (int64_t)(r - l);
707 }
708
709 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
710 size_t size, unsigned int width)
711 {
712 uint64_t addr = 0;
713 struct map *map = NULL;
714 struct symbol *sym = NULL;
715
716 if (he->mem_info) {
717 addr = he->mem_info->daddr.addr;
718 map = he->mem_info->daddr.map;
719 sym = he->mem_info->daddr.sym;
720 }
721 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
722 width);
723 }
724
725 static int64_t
726 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
727 {
728 uint64_t l = 0, r = 0;
729
730 if (left->mem_info)
731 l = left->mem_info->iaddr.addr;
732 if (right->mem_info)
733 r = right->mem_info->iaddr.addr;
734
735 return (int64_t)(r - l);
736 }
737
738 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
740 {
741 uint64_t addr = 0;
742 struct map *map = NULL;
743 struct symbol *sym = NULL;
744
745 if (he->mem_info) {
746 addr = he->mem_info->iaddr.addr;
747 map = he->mem_info->iaddr.map;
748 sym = he->mem_info->iaddr.sym;
749 }
750 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
751 width);
752 }
753
754 static int64_t
755 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
756 {
757 struct map *map_l = NULL;
758 struct map *map_r = NULL;
759
760 if (left->mem_info)
761 map_l = left->mem_info->daddr.map;
762 if (right->mem_info)
763 map_r = right->mem_info->daddr.map;
764
765 return _sort__dso_cmp(map_l, map_r);
766 }
767
768 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
769 size_t size, unsigned int width)
770 {
771 struct map *map = NULL;
772
773 if (he->mem_info)
774 map = he->mem_info->daddr.map;
775
776 return _hist_entry__dso_snprintf(map, bf, size, width);
777 }
778
779 static int64_t
780 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
781 {
782 union perf_mem_data_src data_src_l;
783 union perf_mem_data_src data_src_r;
784
785 if (left->mem_info)
786 data_src_l = left->mem_info->data_src;
787 else
788 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
789
790 if (right->mem_info)
791 data_src_r = right->mem_info->data_src;
792 else
793 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
794
795 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
796 }
797
798 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
799 size_t size, unsigned int width)
800 {
801 const char *out;
802 u64 mask = PERF_MEM_LOCK_NA;
803
804 if (he->mem_info)
805 mask = he->mem_info->data_src.mem_lock;
806
807 if (mask & PERF_MEM_LOCK_NA)
808 out = "N/A";
809 else if (mask & PERF_MEM_LOCK_LOCKED)
810 out = "Yes";
811 else
812 out = "No";
813
814 return repsep_snprintf(bf, size, "%-*s", width, out);
815 }
816
817 static int64_t
818 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
819 {
820 union perf_mem_data_src data_src_l;
821 union perf_mem_data_src data_src_r;
822
823 if (left->mem_info)
824 data_src_l = left->mem_info->data_src;
825 else
826 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
827
828 if (right->mem_info)
829 data_src_r = right->mem_info->data_src;
830 else
831 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
832
833 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
834 }
835
836 static const char * const tlb_access[] = {
837 "N/A",
838 "HIT",
839 "MISS",
840 "L1",
841 "L2",
842 "Walker",
843 "Fault",
844 };
845 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846
847 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
848 size_t size, unsigned int width)
849 {
850 char out[64];
851 size_t sz = sizeof(out) - 1; /* -1 for null termination */
852 size_t l = 0, i;
853 u64 m = PERF_MEM_TLB_NA;
854 u64 hit, miss;
855
856 out[0] = '\0';
857
858 if (he->mem_info)
859 m = he->mem_info->data_src.mem_dtlb;
860
861 hit = m & PERF_MEM_TLB_HIT;
862 miss = m & PERF_MEM_TLB_MISS;
863
864 /* already taken care of */
865 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
866
867 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
868 if (!(m & 0x1))
869 continue;
870 if (l) {
871 strcat(out, " or ");
872 l += 4;
873 }
874 strncat(out, tlb_access[i], sz - l);
875 l += strlen(tlb_access[i]);
876 }
877 if (*out == '\0')
878 strcpy(out, "N/A");
879 if (hit)
880 strncat(out, " hit", sz - l);
881 if (miss)
882 strncat(out, " miss", sz - l);
883
884 return repsep_snprintf(bf, size, "%-*s", width, out);
885 }
886
887 static int64_t
888 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
889 {
890 union perf_mem_data_src data_src_l;
891 union perf_mem_data_src data_src_r;
892
893 if (left->mem_info)
894 data_src_l = left->mem_info->data_src;
895 else
896 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
897
898 if (right->mem_info)
899 data_src_r = right->mem_info->data_src;
900 else
901 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
902
903 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
904 }
905
906 static const char * const mem_lvl[] = {
907 "N/A",
908 "HIT",
909 "MISS",
910 "L1",
911 "LFB",
912 "L2",
913 "L3",
914 "Local RAM",
915 "Remote RAM (1 hop)",
916 "Remote RAM (2 hops)",
917 "Remote Cache (1 hop)",
918 "Remote Cache (2 hops)",
919 "I/O",
920 "Uncached",
921 };
922 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923
924 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
925 size_t size, unsigned int width)
926 {
927 char out[64];
928 size_t sz = sizeof(out) - 1; /* -1 for null termination */
929 size_t i, l = 0;
930 u64 m = PERF_MEM_LVL_NA;
931 u64 hit, miss;
932
933 if (he->mem_info)
934 m = he->mem_info->data_src.mem_lvl;
935
936 out[0] = '\0';
937
938 hit = m & PERF_MEM_LVL_HIT;
939 miss = m & PERF_MEM_LVL_MISS;
940
941 /* already taken care of */
942 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
943
944 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
945 if (!(m & 0x1))
946 continue;
947 if (l) {
948 strcat(out, " or ");
949 l += 4;
950 }
951 strncat(out, mem_lvl[i], sz - l);
952 l += strlen(mem_lvl[i]);
953 }
954 if (*out == '\0')
955 strcpy(out, "N/A");
956 if (hit)
957 strncat(out, " hit", sz - l);
958 if (miss)
959 strncat(out, " miss", sz - l);
960
961 return repsep_snprintf(bf, size, "%-*s", width, out);
962 }
963
964 static int64_t
965 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
966 {
967 union perf_mem_data_src data_src_l;
968 union perf_mem_data_src data_src_r;
969
970 if (left->mem_info)
971 data_src_l = left->mem_info->data_src;
972 else
973 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
974
975 if (right->mem_info)
976 data_src_r = right->mem_info->data_src;
977 else
978 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
979
980 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
981 }
982
983 static const char * const snoop_access[] = {
984 "N/A",
985 "None",
986 "Miss",
987 "Hit",
988 "HitM",
989 };
990 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991
992 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
993 size_t size, unsigned int width)
994 {
995 char out[64];
996 size_t sz = sizeof(out) - 1; /* -1 for null termination */
997 size_t i, l = 0;
998 u64 m = PERF_MEM_SNOOP_NA;
999
1000 out[0] = '\0';
1001
1002 if (he->mem_info)
1003 m = he->mem_info->data_src.mem_snoop;
1004
1005 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1006 if (!(m & 0x1))
1007 continue;
1008 if (l) {
1009 strcat(out, " or ");
1010 l += 4;
1011 }
1012 strncat(out, snoop_access[i], sz - l);
1013 l += strlen(snoop_access[i]);
1014 }
1015
1016 if (*out == '\0')
1017 strcpy(out, "N/A");
1018
1019 return repsep_snprintf(bf, size, "%-*s", width, out);
1020 }
1021
1022 static inline u64 cl_address(u64 address)
1023 {
1024 /* return the cacheline of the address */
1025 return (address & ~(cacheline_size - 1));
1026 }
1027
1028 static int64_t
1029 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1030 {
1031 u64 l, r;
1032 struct map *l_map, *r_map;
1033
1034 if (!left->mem_info) return -1;
1035 if (!right->mem_info) return 1;
1036
1037 /* group event types together */
1038 if (left->cpumode > right->cpumode) return -1;
1039 if (left->cpumode < right->cpumode) return 1;
1040
1041 l_map = left->mem_info->daddr.map;
1042 r_map = right->mem_info->daddr.map;
1043
1044 /* if both are NULL, jump to sort on al_addr instead */
1045 if (!l_map && !r_map)
1046 goto addr;
1047
1048 if (!l_map) return -1;
1049 if (!r_map) return 1;
1050
1051 if (l_map->maj > r_map->maj) return -1;
1052 if (l_map->maj < r_map->maj) return 1;
1053
1054 if (l_map->min > r_map->min) return -1;
1055 if (l_map->min < r_map->min) return 1;
1056
1057 if (l_map->ino > r_map->ino) return -1;
1058 if (l_map->ino < r_map->ino) return 1;
1059
1060 if (l_map->ino_generation > r_map->ino_generation) return -1;
1061 if (l_map->ino_generation < r_map->ino_generation) return 1;
1062
1063 /*
1064 * Addresses with no major/minor numbers are assumed to be
1065 * anonymous in userspace. Sort those on pid then address.
1066 *
1067 * The kernel and non-zero major/minor mapped areas are
1068 * assumed to be unity mapped. Sort those on address.
1069 */
1070
1071 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1072 (!(l_map->flags & MAP_SHARED)) &&
1073 !l_map->maj && !l_map->min && !l_map->ino &&
1074 !l_map->ino_generation) {
1075 /* userspace anonymous */
1076
1077 if (left->thread->pid_ > right->thread->pid_) return -1;
1078 if (left->thread->pid_ < right->thread->pid_) return 1;
1079 }
1080
1081 addr:
1082 /* al_addr does all the right addr - start + offset calculations */
1083 l = cl_address(left->mem_info->daddr.al_addr);
1084 r = cl_address(right->mem_info->daddr.al_addr);
1085
1086 if (l > r) return -1;
1087 if (l < r) return 1;
1088
1089 return 0;
1090 }
1091
1092 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1093 size_t size, unsigned int width)
1094 {
1095
1096 uint64_t addr = 0;
1097 struct map *map = NULL;
1098 struct symbol *sym = NULL;
1099 char level = he->level;
1100
1101 if (he->mem_info) {
1102 addr = cl_address(he->mem_info->daddr.al_addr);
1103 map = he->mem_info->daddr.map;
1104 sym = he->mem_info->daddr.sym;
1105
1106 /* print [s] for shared data mmaps */
1107 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1108 map && (map->type == MAP__VARIABLE) &&
1109 (map->flags & MAP_SHARED) &&
1110 (map->maj || map->min || map->ino ||
1111 map->ino_generation))
1112 level = 's';
1113 else if (!map)
1114 level = 'X';
1115 }
1116 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1117 width);
1118 }
1119
1120 struct sort_entry sort_mispredict = {
1121 .se_header = "Branch Mispredicted",
1122 .se_cmp = sort__mispredict_cmp,
1123 .se_snprintf = hist_entry__mispredict_snprintf,
1124 .se_width_idx = HISTC_MISPREDICT,
1125 };
1126
1127 static u64 he_weight(struct hist_entry *he)
1128 {
1129 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1130 }
1131
1132 static int64_t
1133 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1134 {
1135 return he_weight(left) - he_weight(right);
1136 }
1137
1138 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1139 size_t size, unsigned int width)
1140 {
1141 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1142 }
1143
1144 struct sort_entry sort_local_weight = {
1145 .se_header = "Local Weight",
1146 .se_cmp = sort__local_weight_cmp,
1147 .se_snprintf = hist_entry__local_weight_snprintf,
1148 .se_width_idx = HISTC_LOCAL_WEIGHT,
1149 };
1150
1151 static int64_t
1152 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1153 {
1154 return left->stat.weight - right->stat.weight;
1155 }
1156
1157 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1158 size_t size, unsigned int width)
1159 {
1160 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1161 }
1162
1163 struct sort_entry sort_global_weight = {
1164 .se_header = "Weight",
1165 .se_cmp = sort__global_weight_cmp,
1166 .se_snprintf = hist_entry__global_weight_snprintf,
1167 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1168 };
1169
1170 struct sort_entry sort_mem_daddr_sym = {
1171 .se_header = "Data Symbol",
1172 .se_cmp = sort__daddr_cmp,
1173 .se_snprintf = hist_entry__daddr_snprintf,
1174 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1175 };
1176
1177 struct sort_entry sort_mem_iaddr_sym = {
1178 .se_header = "Code Symbol",
1179 .se_cmp = sort__iaddr_cmp,
1180 .se_snprintf = hist_entry__iaddr_snprintf,
1181 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1182 };
1183
1184 struct sort_entry sort_mem_daddr_dso = {
1185 .se_header = "Data Object",
1186 .se_cmp = sort__dso_daddr_cmp,
1187 .se_snprintf = hist_entry__dso_daddr_snprintf,
1188 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1189 };
1190
1191 struct sort_entry sort_mem_locked = {
1192 .se_header = "Locked",
1193 .se_cmp = sort__locked_cmp,
1194 .se_snprintf = hist_entry__locked_snprintf,
1195 .se_width_idx = HISTC_MEM_LOCKED,
1196 };
1197
1198 struct sort_entry sort_mem_tlb = {
1199 .se_header = "TLB access",
1200 .se_cmp = sort__tlb_cmp,
1201 .se_snprintf = hist_entry__tlb_snprintf,
1202 .se_width_idx = HISTC_MEM_TLB,
1203 };
1204
1205 struct sort_entry sort_mem_lvl = {
1206 .se_header = "Memory access",
1207 .se_cmp = sort__lvl_cmp,
1208 .se_snprintf = hist_entry__lvl_snprintf,
1209 .se_width_idx = HISTC_MEM_LVL,
1210 };
1211
1212 struct sort_entry sort_mem_snoop = {
1213 .se_header = "Snoop",
1214 .se_cmp = sort__snoop_cmp,
1215 .se_snprintf = hist_entry__snoop_snprintf,
1216 .se_width_idx = HISTC_MEM_SNOOP,
1217 };
1218
1219 struct sort_entry sort_mem_dcacheline = {
1220 .se_header = "Data Cacheline",
1221 .se_cmp = sort__dcacheline_cmp,
1222 .se_snprintf = hist_entry__dcacheline_snprintf,
1223 .se_width_idx = HISTC_MEM_DCACHELINE,
1224 };
1225
1226 static int64_t
1227 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1228 {
1229 if (!left->branch_info || !right->branch_info)
1230 return cmp_null(left->branch_info, right->branch_info);
1231
1232 return left->branch_info->flags.abort !=
1233 right->branch_info->flags.abort;
1234 }
1235
1236 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1237 size_t size, unsigned int width)
1238 {
1239 static const char *out = "N/A";
1240
1241 if (he->branch_info) {
1242 if (he->branch_info->flags.abort)
1243 out = "A";
1244 else
1245 out = ".";
1246 }
1247
1248 return repsep_snprintf(bf, size, "%-*s", width, out);
1249 }
1250
1251 struct sort_entry sort_abort = {
1252 .se_header = "Transaction abort",
1253 .se_cmp = sort__abort_cmp,
1254 .se_snprintf = hist_entry__abort_snprintf,
1255 .se_width_idx = HISTC_ABORT,
1256 };
1257
1258 static int64_t
1259 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1260 {
1261 if (!left->branch_info || !right->branch_info)
1262 return cmp_null(left->branch_info, right->branch_info);
1263
1264 return left->branch_info->flags.in_tx !=
1265 right->branch_info->flags.in_tx;
1266 }
1267
1268 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1269 size_t size, unsigned int width)
1270 {
1271 static const char *out = "N/A";
1272
1273 if (he->branch_info) {
1274 if (he->branch_info->flags.in_tx)
1275 out = "T";
1276 else
1277 out = ".";
1278 }
1279
1280 return repsep_snprintf(bf, size, "%-*s", width, out);
1281 }
1282
1283 struct sort_entry sort_in_tx = {
1284 .se_header = "Branch in transaction",
1285 .se_cmp = sort__in_tx_cmp,
1286 .se_snprintf = hist_entry__in_tx_snprintf,
1287 .se_width_idx = HISTC_IN_TX,
1288 };
1289
1290 static int64_t
1291 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1292 {
1293 return left->transaction - right->transaction;
1294 }
1295
1296 static inline char *add_str(char *p, const char *str)
1297 {
1298 strcpy(p, str);
1299 return p + strlen(str);
1300 }
1301
1302 static struct txbit {
1303 unsigned flag;
1304 const char *name;
1305 int skip_for_len;
1306 } txbits[] = {
1307 { PERF_TXN_ELISION, "EL ", 0 },
1308 { PERF_TXN_TRANSACTION, "TX ", 1 },
1309 { PERF_TXN_SYNC, "SYNC ", 1 },
1310 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1311 { PERF_TXN_RETRY, "RETRY ", 0 },
1312 { PERF_TXN_CONFLICT, "CON ", 0 },
1313 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1314 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1315 { 0, NULL, 0 }
1316 };
1317
1318 int hist_entry__transaction_len(void)
1319 {
1320 int i;
1321 int len = 0;
1322
1323 for (i = 0; txbits[i].name; i++) {
1324 if (!txbits[i].skip_for_len)
1325 len += strlen(txbits[i].name);
1326 }
1327 len += 4; /* :XX<space> */
1328 return len;
1329 }
1330
1331 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1332 size_t size, unsigned int width)
1333 {
1334 u64 t = he->transaction;
1335 char buf[128];
1336 char *p = buf;
1337 int i;
1338
1339 buf[0] = 0;
1340 for (i = 0; txbits[i].name; i++)
1341 if (txbits[i].flag & t)
1342 p = add_str(p, txbits[i].name);
1343 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1344 p = add_str(p, "NEITHER ");
1345 if (t & PERF_TXN_ABORT_MASK) {
1346 sprintf(p, ":%" PRIx64,
1347 (t & PERF_TXN_ABORT_MASK) >>
1348 PERF_TXN_ABORT_SHIFT);
1349 p += strlen(p);
1350 }
1351
1352 return repsep_snprintf(bf, size, "%-*s", width, buf);
1353 }
1354
1355 struct sort_entry sort_transaction = {
1356 .se_header = "Transaction ",
1357 .se_cmp = sort__transaction_cmp,
1358 .se_snprintf = hist_entry__transaction_snprintf,
1359 .se_width_idx = HISTC_TRANSACTION,
1360 };
1361
1362 struct sort_dimension {
1363 const char *name;
1364 struct sort_entry *entry;
1365 int taken;
1366 };
1367
1368 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369
1370 static struct sort_dimension common_sort_dimensions[] = {
1371 DIM(SORT_PID, "pid", sort_thread),
1372 DIM(SORT_COMM, "comm", sort_comm),
1373 DIM(SORT_DSO, "dso", sort_dso),
1374 DIM(SORT_SYM, "symbol", sort_sym),
1375 DIM(SORT_PARENT, "parent", sort_parent),
1376 DIM(SORT_CPU, "cpu", sort_cpu),
1377 DIM(SORT_SOCKET, "socket", sort_socket),
1378 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1379 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1380 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1381 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1382 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1383 DIM(SORT_TRACE, "trace", sort_trace),
1384 };
1385
1386 #undef DIM
1387
1388 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389
1390 static struct sort_dimension bstack_sort_dimensions[] = {
1391 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1392 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1393 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1394 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1395 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1396 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1397 DIM(SORT_ABORT, "abort", sort_abort),
1398 DIM(SORT_CYCLES, "cycles", sort_cycles),
1399 };
1400
1401 #undef DIM
1402
1403 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404
1405 static struct sort_dimension memory_sort_dimensions[] = {
1406 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1407 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1408 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1409 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1410 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1411 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1412 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1413 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1414 };
1415
1416 #undef DIM
1417
1418 struct hpp_dimension {
1419 const char *name;
1420 struct perf_hpp_fmt *fmt;
1421 int taken;
1422 };
1423
1424 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425
1426 static struct hpp_dimension hpp_sort_dimensions[] = {
1427 DIM(PERF_HPP__OVERHEAD, "overhead"),
1428 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1429 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1431 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1432 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1433 DIM(PERF_HPP__SAMPLES, "sample"),
1434 DIM(PERF_HPP__PERIOD, "period"),
1435 };
1436
1437 #undef DIM
1438
1439 struct hpp_sort_entry {
1440 struct perf_hpp_fmt hpp;
1441 struct sort_entry *se;
1442 };
1443
1444 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1445 {
1446 struct hpp_sort_entry *hse;
1447
1448 if (!perf_hpp__is_sort_entry(fmt))
1449 return;
1450
1451 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1452 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1453 }
1454
1455 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1456 struct perf_evsel *evsel)
1457 {
1458 struct hpp_sort_entry *hse;
1459 size_t len = fmt->user_len;
1460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1462
1463 if (!len)
1464 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1465
1466 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1467 }
1468
1469 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1470 struct perf_hpp *hpp __maybe_unused,
1471 struct perf_evsel *evsel)
1472 {
1473 struct hpp_sort_entry *hse;
1474 size_t len = fmt->user_len;
1475
1476 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1477
1478 if (!len)
1479 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1480
1481 return len;
1482 }
1483
1484 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1485 struct hist_entry *he)
1486 {
1487 struct hpp_sort_entry *hse;
1488 size_t len = fmt->user_len;
1489
1490 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1491
1492 if (!len)
1493 len = hists__col_len(he->hists, hse->se->se_width_idx);
1494
1495 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1496 }
1497
1498 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1499 struct hist_entry *a, struct hist_entry *b)
1500 {
1501 struct hpp_sort_entry *hse;
1502
1503 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1504 return hse->se->se_cmp(a, b);
1505 }
1506
1507 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1508 struct hist_entry *a, struct hist_entry *b)
1509 {
1510 struct hpp_sort_entry *hse;
1511 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1512
1513 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1514 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1515 return collapse_fn(a, b);
1516 }
1517
1518 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1519 struct hist_entry *a, struct hist_entry *b)
1520 {
1521 struct hpp_sort_entry *hse;
1522 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1523
1524 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1525 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1526 return sort_fn(a, b);
1527 }
1528
1529 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1530 {
1531 return format->header == __sort__hpp_header;
1532 }
1533
1534 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1535 {
1536 struct hpp_sort_entry *hse_a;
1537 struct hpp_sort_entry *hse_b;
1538
1539 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1540 return false;
1541
1542 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1543 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1544
1545 return hse_a->se == hse_b->se;
1546 }
1547
1548 static struct hpp_sort_entry *
1549 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1550 {
1551 struct hpp_sort_entry *hse;
1552
1553 hse = malloc(sizeof(*hse));
1554 if (hse == NULL) {
1555 pr_err("Memory allocation failed\n");
1556 return NULL;
1557 }
1558
1559 hse->se = sd->entry;
1560 hse->hpp.name = sd->entry->se_header;
1561 hse->hpp.header = __sort__hpp_header;
1562 hse->hpp.width = __sort__hpp_width;
1563 hse->hpp.entry = __sort__hpp_entry;
1564 hse->hpp.color = NULL;
1565
1566 hse->hpp.cmp = __sort__hpp_cmp;
1567 hse->hpp.collapse = __sort__hpp_collapse;
1568 hse->hpp.sort = __sort__hpp_sort;
1569 hse->hpp.equal = __sort__hpp_equal;
1570
1571 INIT_LIST_HEAD(&hse->hpp.list);
1572 INIT_LIST_HEAD(&hse->hpp.sort_list);
1573 hse->hpp.elide = false;
1574 hse->hpp.len = 0;
1575 hse->hpp.user_len = 0;
1576
1577 return hse;
1578 }
1579
1580 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1581 {
1582 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1583
1584 if (hse == NULL)
1585 return -1;
1586
1587 perf_hpp__register_sort_field(&hse->hpp);
1588 return 0;
1589 }
1590
1591 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1592 {
1593 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1594
1595 if (hse == NULL)
1596 return -1;
1597
1598 perf_hpp__column_register(&hse->hpp);
1599 return 0;
1600 }
1601
1602 struct hpp_dynamic_entry {
1603 struct perf_hpp_fmt hpp;
1604 struct perf_evsel *evsel;
1605 struct format_field *field;
1606 unsigned dynamic_len;
1607 bool raw_trace;
1608 };
1609
1610 static int hde_width(struct hpp_dynamic_entry *hde)
1611 {
1612 if (!hde->hpp.len) {
1613 int len = hde->dynamic_len;
1614 int namelen = strlen(hde->field->name);
1615 int fieldlen = hde->field->size;
1616
1617 if (namelen > len)
1618 len = namelen;
1619
1620 if (!(hde->field->flags & FIELD_IS_STRING)) {
1621 /* length for print hex numbers */
1622 fieldlen = hde->field->size * 2 + 2;
1623 }
1624 if (fieldlen > len)
1625 len = fieldlen;
1626
1627 hde->hpp.len = len;
1628 }
1629 return hde->hpp.len;
1630 }
1631
1632 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1633 struct hist_entry *he)
1634 {
1635 char *str, *pos;
1636 struct format_field *field = hde->field;
1637 size_t namelen;
1638 bool last = false;
1639
1640 if (hde->raw_trace)
1641 return;
1642
1643 /* parse pretty print result and update max length */
1644 if (!he->trace_output)
1645 he->trace_output = get_trace_output(he);
1646
1647 namelen = strlen(field->name);
1648 str = he->trace_output;
1649
1650 while (str) {
1651 pos = strchr(str, ' ');
1652 if (pos == NULL) {
1653 last = true;
1654 pos = str + strlen(str);
1655 }
1656
1657 if (!strncmp(str, field->name, namelen)) {
1658 size_t len;
1659
1660 str += namelen + 1;
1661 len = pos - str;
1662
1663 if (len > hde->dynamic_len)
1664 hde->dynamic_len = len;
1665 break;
1666 }
1667
1668 if (last)
1669 str = NULL;
1670 else
1671 str = pos + 1;
1672 }
1673 }
1674
1675 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1676 struct perf_evsel *evsel __maybe_unused)
1677 {
1678 struct hpp_dynamic_entry *hde;
1679 size_t len = fmt->user_len;
1680
1681 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1682
1683 if (!len)
1684 len = hde_width(hde);
1685
1686 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1687 }
1688
1689 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1690 struct perf_hpp *hpp __maybe_unused,
1691 struct perf_evsel *evsel __maybe_unused)
1692 {
1693 struct hpp_dynamic_entry *hde;
1694 size_t len = fmt->user_len;
1695
1696 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1697
1698 if (!len)
1699 len = hde_width(hde);
1700
1701 return len;
1702 }
1703
1704 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1705 {
1706 struct hpp_dynamic_entry *hde;
1707
1708 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1709
1710 return hists_to_evsel(hists) == hde->evsel;
1711 }
1712
1713 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1714 struct hist_entry *he)
1715 {
1716 struct hpp_dynamic_entry *hde;
1717 size_t len = fmt->user_len;
1718 char *str, *pos;
1719 struct format_field *field;
1720 size_t namelen;
1721 bool last = false;
1722 int ret;
1723
1724 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1725
1726 if (!len)
1727 len = hde_width(hde);
1728
1729 if (hde->raw_trace)
1730 goto raw_field;
1731
1732 field = hde->field;
1733 namelen = strlen(field->name);
1734 str = he->trace_output;
1735
1736 while (str) {
1737 pos = strchr(str, ' ');
1738 if (pos == NULL) {
1739 last = true;
1740 pos = str + strlen(str);
1741 }
1742
1743 if (!strncmp(str, field->name, namelen)) {
1744 str += namelen + 1;
1745 str = strndup(str, pos - str);
1746
1747 if (str == NULL)
1748 return scnprintf(hpp->buf, hpp->size,
1749 "%*.*s", len, len, "ERROR");
1750 break;
1751 }
1752
1753 if (last)
1754 str = NULL;
1755 else
1756 str = pos + 1;
1757 }
1758
1759 if (str == NULL) {
1760 struct trace_seq seq;
1761 raw_field:
1762 trace_seq_init(&seq);
1763 pevent_print_field(&seq, he->raw_data, hde->field);
1764 str = seq.buffer;
1765 }
1766
1767 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1768 free(str);
1769 return ret;
1770 }
1771
1772 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1773 struct hist_entry *a, struct hist_entry *b)
1774 {
1775 struct hpp_dynamic_entry *hde;
1776 struct format_field *field;
1777 unsigned offset, size;
1778
1779 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1780
1781 field = hde->field;
1782 if (field->flags & FIELD_IS_DYNAMIC) {
1783 unsigned long long dyn;
1784
1785 pevent_read_number_field(field, a->raw_data, &dyn);
1786 offset = dyn & 0xffff;
1787 size = (dyn >> 16) & 0xffff;
1788
1789 /* record max width for output */
1790 if (size > hde->dynamic_len)
1791 hde->dynamic_len = size;
1792 } else {
1793 offset = field->offset;
1794 size = field->size;
1795
1796 update_dynamic_len(hde, a);
1797 update_dynamic_len(hde, b);
1798 }
1799
1800 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1801 }
1802
1803 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1804 {
1805 return fmt->cmp == __sort__hde_cmp;
1806 }
1807
1808 static struct hpp_dynamic_entry *
1809 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1810 {
1811 struct hpp_dynamic_entry *hde;
1812
1813 hde = malloc(sizeof(*hde));
1814 if (hde == NULL) {
1815 pr_debug("Memory allocation failed\n");
1816 return NULL;
1817 }
1818
1819 hde->evsel = evsel;
1820 hde->field = field;
1821 hde->dynamic_len = 0;
1822
1823 hde->hpp.name = field->name;
1824 hde->hpp.header = __sort__hde_header;
1825 hde->hpp.width = __sort__hde_width;
1826 hde->hpp.entry = __sort__hde_entry;
1827 hde->hpp.color = NULL;
1828
1829 hde->hpp.cmp = __sort__hde_cmp;
1830 hde->hpp.collapse = __sort__hde_cmp;
1831 hde->hpp.sort = __sort__hde_cmp;
1832
1833 INIT_LIST_HEAD(&hde->hpp.list);
1834 INIT_LIST_HEAD(&hde->hpp.sort_list);
1835 hde->hpp.elide = false;
1836 hde->hpp.len = 0;
1837 hde->hpp.user_len = 0;
1838
1839 return hde;
1840 }
1841
1842 static int parse_field_name(char *str, char **event, char **field, char **opt)
1843 {
1844 char *event_name, *field_name, *opt_name;
1845
1846 event_name = str;
1847 field_name = strchr(str, '.');
1848
1849 if (field_name) {
1850 *field_name++ = '\0';
1851 } else {
1852 event_name = NULL;
1853 field_name = str;
1854 }
1855
1856 opt_name = strchr(field_name, '/');
1857 if (opt_name)
1858 *opt_name++ = '\0';
1859
1860 *event = event_name;
1861 *field = field_name;
1862 *opt = opt_name;
1863
1864 return 0;
1865 }
1866
1867 /* find match evsel using a given event name. The event name can be:
1868 * 1. '%' + event index (e.g. '%1' for first event)
1869 * 2. full event name (e.g. sched:sched_switch)
1870 * 3. partial event name (should not contain ':')
1871 */
1872 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1873 {
1874 struct perf_evsel *evsel = NULL;
1875 struct perf_evsel *pos;
1876 bool full_name;
1877
1878 /* case 1 */
1879 if (event_name[0] == '%') {
1880 int nr = strtol(event_name+1, NULL, 0);
1881
1882 if (nr > evlist->nr_entries)
1883 return NULL;
1884
1885 evsel = perf_evlist__first(evlist);
1886 while (--nr > 0)
1887 evsel = perf_evsel__next(evsel);
1888
1889 return evsel;
1890 }
1891
1892 full_name = !!strchr(event_name, ':');
1893 evlist__for_each(evlist, pos) {
1894 /* case 2 */
1895 if (full_name && !strcmp(pos->name, event_name))
1896 return pos;
1897 /* case 3 */
1898 if (!full_name && strstr(pos->name, event_name)) {
1899 if (evsel) {
1900 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1901 event_name, evsel->name, pos->name);
1902 return NULL;
1903 }
1904 evsel = pos;
1905 }
1906 }
1907
1908 return evsel;
1909 }
1910
1911 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1912 struct format_field *field,
1913 bool raw_trace)
1914 {
1915 struct hpp_dynamic_entry *hde;
1916
1917 hde = __alloc_dynamic_entry(evsel, field);
1918 if (hde == NULL)
1919 return -ENOMEM;
1920
1921 hde->raw_trace = raw_trace;
1922
1923 perf_hpp__register_sort_field(&hde->hpp);
1924 return 0;
1925 }
1926
1927 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1928 {
1929 int ret;
1930 struct format_field *field;
1931
1932 field = evsel->tp_format->format.fields;
1933 while (field) {
1934 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1935 if (ret < 0)
1936 return ret;
1937
1938 field = field->next;
1939 }
1940 return 0;
1941 }
1942
1943 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1944 {
1945 int ret;
1946 struct perf_evsel *evsel;
1947
1948 evlist__for_each(evlist, evsel) {
1949 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1950 continue;
1951
1952 ret = add_evsel_fields(evsel, raw_trace);
1953 if (ret < 0)
1954 return ret;
1955 }
1956 return 0;
1957 }
1958
1959 static int add_all_matching_fields(struct perf_evlist *evlist,
1960 char *field_name, bool raw_trace)
1961 {
1962 int ret = -ESRCH;
1963 struct perf_evsel *evsel;
1964 struct format_field *field;
1965
1966 evlist__for_each(evlist, evsel) {
1967 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1968 continue;
1969
1970 field = pevent_find_any_field(evsel->tp_format, field_name);
1971 if (field == NULL)
1972 continue;
1973
1974 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1975 if (ret < 0)
1976 break;
1977 }
1978 return ret;
1979 }
1980
1981 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1982 {
1983 char *str, *event_name, *field_name, *opt_name;
1984 struct perf_evsel *evsel;
1985 struct format_field *field;
1986 bool raw_trace = symbol_conf.raw_trace;
1987 int ret = 0;
1988
1989 if (evlist == NULL)
1990 return -ENOENT;
1991
1992 str = strdup(tok);
1993 if (str == NULL)
1994 return -ENOMEM;
1995
1996 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
1997 ret = -EINVAL;
1998 goto out;
1999 }
2000
2001 if (opt_name) {
2002 if (strcmp(opt_name, "raw")) {
2003 pr_debug("unsupported field option %s\n", opt_name);
2004 ret = -EINVAL;
2005 goto out;
2006 }
2007 raw_trace = true;
2008 }
2009
2010 if (!strcmp(field_name, "trace_fields")) {
2011 ret = add_all_dynamic_fields(evlist, raw_trace);
2012 goto out;
2013 }
2014
2015 if (event_name == NULL) {
2016 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2017 goto out;
2018 }
2019
2020 evsel = find_evsel(evlist, event_name);
2021 if (evsel == NULL) {
2022 pr_debug("Cannot find event: %s\n", event_name);
2023 ret = -ENOENT;
2024 goto out;
2025 }
2026
2027 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2028 pr_debug("%s is not a tracepoint event\n", event_name);
2029 ret = -EINVAL;
2030 goto out;
2031 }
2032
2033 if (!strcmp(field_name, "*")) {
2034 ret = add_evsel_fields(evsel, raw_trace);
2035 } else {
2036 field = pevent_find_any_field(evsel->tp_format, field_name);
2037 if (field == NULL) {
2038 pr_debug("Cannot find event field for %s.%s\n",
2039 event_name, field_name);
2040 return -ENOENT;
2041 }
2042
2043 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2044 }
2045
2046 out:
2047 free(str);
2048 return ret;
2049 }
2050
2051 static int __sort_dimension__add(struct sort_dimension *sd)
2052 {
2053 if (sd->taken)
2054 return 0;
2055
2056 if (__sort_dimension__add_hpp_sort(sd) < 0)
2057 return -1;
2058
2059 if (sd->entry->se_collapse)
2060 sort__need_collapse = 1;
2061
2062 sd->taken = 1;
2063
2064 return 0;
2065 }
2066
2067 static int __hpp_dimension__add(struct hpp_dimension *hd)
2068 {
2069 if (!hd->taken) {
2070 hd->taken = 1;
2071
2072 perf_hpp__register_sort_field(hd->fmt);
2073 }
2074 return 0;
2075 }
2076
2077 static int __sort_dimension__add_output(struct sort_dimension *sd)
2078 {
2079 if (sd->taken)
2080 return 0;
2081
2082 if (__sort_dimension__add_hpp_output(sd) < 0)
2083 return -1;
2084
2085 sd->taken = 1;
2086 return 0;
2087 }
2088
2089 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
2090 {
2091 if (!hd->taken) {
2092 hd->taken = 1;
2093
2094 perf_hpp__column_register(hd->fmt);
2095 }
2096 return 0;
2097 }
2098
2099 int hpp_dimension__add_output(unsigned col)
2100 {
2101 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2102 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
2103 }
2104
2105 static int sort_dimension__add(const char *tok,
2106 struct perf_evlist *evlist __maybe_unused)
2107 {
2108 unsigned int i;
2109
2110 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2111 struct sort_dimension *sd = &common_sort_dimensions[i];
2112
2113 if (strncasecmp(tok, sd->name, strlen(tok)))
2114 continue;
2115
2116 if (sd->entry == &sort_parent) {
2117 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2118 if (ret) {
2119 char err[BUFSIZ];
2120
2121 regerror(ret, &parent_regex, err, sizeof(err));
2122 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2123 return -EINVAL;
2124 }
2125 sort__has_parent = 1;
2126 } else if (sd->entry == &sort_sym) {
2127 sort__has_sym = 1;
2128 /*
2129 * perf diff displays the performance difference amongst
2130 * two or more perf.data files. Those files could come
2131 * from different binaries. So we should not compare
2132 * their ips, but the name of symbol.
2133 */
2134 if (sort__mode == SORT_MODE__DIFF)
2135 sd->entry->se_collapse = sort__sym_sort;
2136
2137 } else if (sd->entry == &sort_dso) {
2138 sort__has_dso = 1;
2139 } else if (sd->entry == &sort_socket) {
2140 sort__has_socket = 1;
2141 } else if (sd->entry == &sort_thread) {
2142 sort__has_thread = 1;
2143 }
2144
2145 return __sort_dimension__add(sd);
2146 }
2147
2148 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2149 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2150
2151 if (strncasecmp(tok, hd->name, strlen(tok)))
2152 continue;
2153
2154 return __hpp_dimension__add(hd);
2155 }
2156
2157 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2158 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2159
2160 if (strncasecmp(tok, sd->name, strlen(tok)))
2161 continue;
2162
2163 if (sort__mode != SORT_MODE__BRANCH)
2164 return -EINVAL;
2165
2166 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2167 sort__has_sym = 1;
2168
2169 __sort_dimension__add(sd);
2170 return 0;
2171 }
2172
2173 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2174 struct sort_dimension *sd = &memory_sort_dimensions[i];
2175
2176 if (strncasecmp(tok, sd->name, strlen(tok)))
2177 continue;
2178
2179 if (sort__mode != SORT_MODE__MEMORY)
2180 return -EINVAL;
2181
2182 if (sd->entry == &sort_mem_daddr_sym)
2183 sort__has_sym = 1;
2184
2185 __sort_dimension__add(sd);
2186 return 0;
2187 }
2188
2189 if (!add_dynamic_entry(evlist, tok))
2190 return 0;
2191
2192 return -ESRCH;
2193 }
2194
2195 static const char *get_default_sort_order(struct perf_evlist *evlist)
2196 {
2197 const char *default_sort_orders[] = {
2198 default_sort_order,
2199 default_branch_sort_order,
2200 default_mem_sort_order,
2201 default_top_sort_order,
2202 default_diff_sort_order,
2203 default_tracepoint_sort_order,
2204 };
2205 bool use_trace = true;
2206 struct perf_evsel *evsel;
2207
2208 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2209
2210 if (evlist == NULL)
2211 goto out_no_evlist;
2212
2213 evlist__for_each(evlist, evsel) {
2214 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2215 use_trace = false;
2216 break;
2217 }
2218 }
2219
2220 if (use_trace) {
2221 sort__mode = SORT_MODE__TRACEPOINT;
2222 if (symbol_conf.raw_trace)
2223 return "trace_fields";
2224 }
2225 out_no_evlist:
2226 return default_sort_orders[sort__mode];
2227 }
2228
2229 static int setup_sort_order(struct perf_evlist *evlist)
2230 {
2231 char *new_sort_order;
2232
2233 /*
2234 * Append '+'-prefixed sort order to the default sort
2235 * order string.
2236 */
2237 if (!sort_order || is_strict_order(sort_order))
2238 return 0;
2239
2240 if (sort_order[1] == '\0') {
2241 error("Invalid --sort key: `+'");
2242 return -EINVAL;
2243 }
2244
2245 /*
2246 * We allocate new sort_order string, but we never free it,
2247 * because it's checked over the rest of the code.
2248 */
2249 if (asprintf(&new_sort_order, "%s,%s",
2250 get_default_sort_order(evlist), sort_order + 1) < 0) {
2251 error("Not enough memory to set up --sort");
2252 return -ENOMEM;
2253 }
2254
2255 sort_order = new_sort_order;
2256 return 0;
2257 }
2258
2259 /*
2260 * Adds 'pre,' prefix into 'str' is 'pre' is
2261 * not already part of 'str'.
2262 */
2263 static char *prefix_if_not_in(const char *pre, char *str)
2264 {
2265 char *n;
2266
2267 if (!str || strstr(str, pre))
2268 return str;
2269
2270 if (asprintf(&n, "%s,%s", pre, str) < 0)
2271 return NULL;
2272
2273 free(str);
2274 return n;
2275 }
2276
2277 static char *setup_overhead(char *keys)
2278 {
2279 keys = prefix_if_not_in("overhead", keys);
2280
2281 if (symbol_conf.cumulate_callchain)
2282 keys = prefix_if_not_in("overhead_children", keys);
2283
2284 return keys;
2285 }
2286
2287 static int __setup_sorting(struct perf_evlist *evlist)
2288 {
2289 char *tmp, *tok, *str;
2290 const char *sort_keys;
2291 int ret = 0;
2292
2293 ret = setup_sort_order(evlist);
2294 if (ret)
2295 return ret;
2296
2297 sort_keys = sort_order;
2298 if (sort_keys == NULL) {
2299 if (is_strict_order(field_order)) {
2300 /*
2301 * If user specified field order but no sort order,
2302 * we'll honor it and not add default sort orders.
2303 */
2304 return 0;
2305 }
2306
2307 sort_keys = get_default_sort_order(evlist);
2308 }
2309
2310 str = strdup(sort_keys);
2311 if (str == NULL) {
2312 error("Not enough memory to setup sort keys");
2313 return -ENOMEM;
2314 }
2315
2316 /*
2317 * Prepend overhead fields for backward compatibility.
2318 */
2319 if (!is_strict_order(field_order)) {
2320 str = setup_overhead(str);
2321 if (str == NULL) {
2322 error("Not enough memory to setup overhead keys");
2323 return -ENOMEM;
2324 }
2325 }
2326
2327 for (tok = strtok_r(str, ", ", &tmp);
2328 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2329 ret = sort_dimension__add(tok, evlist);
2330 if (ret == -EINVAL) {
2331 error("Invalid --sort key: `%s'", tok);
2332 break;
2333 } else if (ret == -ESRCH) {
2334 error("Unknown --sort key: `%s'", tok);
2335 break;
2336 }
2337 }
2338
2339 free(str);
2340 return ret;
2341 }
2342
2343 void perf_hpp__set_elide(int idx, bool elide)
2344 {
2345 struct perf_hpp_fmt *fmt;
2346 struct hpp_sort_entry *hse;
2347
2348 perf_hpp__for_each_format(fmt) {
2349 if (!perf_hpp__is_sort_entry(fmt))
2350 continue;
2351
2352 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2353 if (hse->se->se_width_idx == idx) {
2354 fmt->elide = elide;
2355 break;
2356 }
2357 }
2358 }
2359
2360 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2361 {
2362 if (list && strlist__nr_entries(list) == 1) {
2363 if (fp != NULL)
2364 fprintf(fp, "# %s: %s\n", list_name,
2365 strlist__entry(list, 0)->s);
2366 return true;
2367 }
2368 return false;
2369 }
2370
2371 static bool get_elide(int idx, FILE *output)
2372 {
2373 switch (idx) {
2374 case HISTC_SYMBOL:
2375 return __get_elide(symbol_conf.sym_list, "symbol", output);
2376 case HISTC_DSO:
2377 return __get_elide(symbol_conf.dso_list, "dso", output);
2378 case HISTC_COMM:
2379 return __get_elide(symbol_conf.comm_list, "comm", output);
2380 default:
2381 break;
2382 }
2383
2384 if (sort__mode != SORT_MODE__BRANCH)
2385 return false;
2386
2387 switch (idx) {
2388 case HISTC_SYMBOL_FROM:
2389 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2390 case HISTC_SYMBOL_TO:
2391 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2392 case HISTC_DSO_FROM:
2393 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2394 case HISTC_DSO_TO:
2395 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2396 default:
2397 break;
2398 }
2399
2400 return false;
2401 }
2402
2403 void sort__setup_elide(FILE *output)
2404 {
2405 struct perf_hpp_fmt *fmt;
2406 struct hpp_sort_entry *hse;
2407
2408 perf_hpp__for_each_format(fmt) {
2409 if (!perf_hpp__is_sort_entry(fmt))
2410 continue;
2411
2412 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2413 fmt->elide = get_elide(hse->se->se_width_idx, output);
2414 }
2415
2416 /*
2417 * It makes no sense to elide all of sort entries.
2418 * Just revert them to show up again.
2419 */
2420 perf_hpp__for_each_format(fmt) {
2421 if (!perf_hpp__is_sort_entry(fmt))
2422 continue;
2423
2424 if (!fmt->elide)
2425 return;
2426 }
2427
2428 perf_hpp__for_each_format(fmt) {
2429 if (!perf_hpp__is_sort_entry(fmt))
2430 continue;
2431
2432 fmt->elide = false;
2433 }
2434 }
2435
2436 static int output_field_add(char *tok)
2437 {
2438 unsigned int i;
2439
2440 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2441 struct sort_dimension *sd = &common_sort_dimensions[i];
2442
2443 if (strncasecmp(tok, sd->name, strlen(tok)))
2444 continue;
2445
2446 return __sort_dimension__add_output(sd);
2447 }
2448
2449 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2450 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2451
2452 if (strncasecmp(tok, hd->name, strlen(tok)))
2453 continue;
2454
2455 return __hpp_dimension__add_output(hd);
2456 }
2457
2458 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2459 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2460
2461 if (strncasecmp(tok, sd->name, strlen(tok)))
2462 continue;
2463
2464 return __sort_dimension__add_output(sd);
2465 }
2466
2467 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2468 struct sort_dimension *sd = &memory_sort_dimensions[i];
2469
2470 if (strncasecmp(tok, sd->name, strlen(tok)))
2471 continue;
2472
2473 return __sort_dimension__add_output(sd);
2474 }
2475
2476 return -ESRCH;
2477 }
2478
2479 static void reset_dimensions(void)
2480 {
2481 unsigned int i;
2482
2483 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2484 common_sort_dimensions[i].taken = 0;
2485
2486 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2487 hpp_sort_dimensions[i].taken = 0;
2488
2489 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2490 bstack_sort_dimensions[i].taken = 0;
2491
2492 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2493 memory_sort_dimensions[i].taken = 0;
2494 }
2495
2496 bool is_strict_order(const char *order)
2497 {
2498 return order && (*order != '+');
2499 }
2500
2501 static int __setup_output_field(void)
2502 {
2503 char *tmp, *tok, *str, *strp;
2504 int ret = -EINVAL;
2505
2506 if (field_order == NULL)
2507 return 0;
2508
2509 strp = str = strdup(field_order);
2510 if (str == NULL) {
2511 error("Not enough memory to setup output fields");
2512 return -ENOMEM;
2513 }
2514
2515 if (!is_strict_order(field_order))
2516 strp++;
2517
2518 if (!strlen(strp)) {
2519 error("Invalid --fields key: `+'");
2520 goto out;
2521 }
2522
2523 for (tok = strtok_r(strp, ", ", &tmp);
2524 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2525 ret = output_field_add(tok);
2526 if (ret == -EINVAL) {
2527 error("Invalid --fields key: `%s'", tok);
2528 break;
2529 } else if (ret == -ESRCH) {
2530 error("Unknown --fields key: `%s'", tok);
2531 break;
2532 }
2533 }
2534
2535 out:
2536 free(str);
2537 return ret;
2538 }
2539
2540 int setup_sorting(struct perf_evlist *evlist)
2541 {
2542 int err;
2543
2544 err = __setup_sorting(evlist);
2545 if (err < 0)
2546 return err;
2547
2548 if (parent_pattern != default_parent_pattern) {
2549 err = sort_dimension__add("parent", evlist);
2550 if (err < 0)
2551 return err;
2552 }
2553
2554 reset_dimensions();
2555
2556 /*
2557 * perf diff doesn't use default hpp output fields.
2558 */
2559 if (sort__mode != SORT_MODE__DIFF)
2560 perf_hpp__init();
2561
2562 err = __setup_output_field();
2563 if (err < 0)
2564 return err;
2565
2566 /* copy sort keys to output fields */
2567 perf_hpp__setup_output_field();
2568 /* and then copy output fields to sort keys */
2569 perf_hpp__append_sort_keys();
2570
2571 return 0;
2572 }
2573
2574 void reset_output_field(void)
2575 {
2576 sort__need_collapse = 0;
2577 sort__has_parent = 0;
2578 sort__has_sym = 0;
2579 sort__has_dso = 0;
2580
2581 field_order = NULL;
2582 sort_order = NULL;
2583
2584 reset_dimensions();
2585 perf_hpp__reset_output_field();
2586 }
This page took 0.138696 seconds and 6 git commands to generate.