perf hists: Introduce perf_hpp_list__for_each_format macro
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9
10 regex_t parent_regex;
11 const char default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char *parent_pattern = default_parent_pattern;
13 const char default_sort_order[] = "comm,dso,symbol";
14 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order[] = "dso,symbol";
17 const char default_diff_sort_order[] = "dso,symbol";
18 const char default_tracepoint_sort_order[] = "trace";
19 const char *sort_order;
20 const char *field_order;
21 regex_t ignore_callees_regex;
22 int have_ignore_callees = 0;
23 int sort__need_collapse = 0;
24 int sort__has_parent = 0;
25 int sort__has_sym = 0;
26 int sort__has_dso = 0;
27 int sort__has_socket = 0;
28 int sort__has_thread = 0;
29 enum sort_mode sort__mode = SORT_MODE__NORMAL;
30
31
32 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
33 {
34 int n;
35 va_list ap;
36
37 va_start(ap, fmt);
38 n = vsnprintf(bf, size, fmt, ap);
39 if (symbol_conf.field_sep && n > 0) {
40 char *sep = bf;
41
42 while (1) {
43 sep = strchr(sep, *symbol_conf.field_sep);
44 if (sep == NULL)
45 break;
46 *sep = '.';
47 }
48 }
49 va_end(ap);
50
51 if (n >= (int)size)
52 return size - 1;
53 return n;
54 }
55
56 static int64_t cmp_null(const void *l, const void *r)
57 {
58 if (!l && !r)
59 return 0;
60 else if (!l)
61 return -1;
62 else
63 return 1;
64 }
65
66 /* --sort pid */
67
68 static int64_t
69 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
70 {
71 return right->thread->tid - left->thread->tid;
72 }
73
74 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
75 size_t size, unsigned int width)
76 {
77 const char *comm = thread__comm_str(he->thread);
78
79 width = max(7U, width) - 6;
80 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
81 width, width, comm ?: "");
82 }
83
84 struct sort_entry sort_thread = {
85 .se_header = " Pid:Command",
86 .se_cmp = sort__thread_cmp,
87 .se_snprintf = hist_entry__thread_snprintf,
88 .se_width_idx = HISTC_THREAD,
89 };
90
91 /* --sort comm */
92
93 static int64_t
94 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
95 {
96 /* Compare the addr that should be unique among comm */
97 return strcmp(comm__str(right->comm), comm__str(left->comm));
98 }
99
100 static int64_t
101 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
102 {
103 /* Compare the addr that should be unique among comm */
104 return strcmp(comm__str(right->comm), comm__str(left->comm));
105 }
106
107 static int64_t
108 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
109 {
110 return strcmp(comm__str(right->comm), comm__str(left->comm));
111 }
112
113 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
114 size_t size, unsigned int width)
115 {
116 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
117 }
118
119 struct sort_entry sort_comm = {
120 .se_header = "Command",
121 .se_cmp = sort__comm_cmp,
122 .se_collapse = sort__comm_collapse,
123 .se_sort = sort__comm_sort,
124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126 };
127
128 /* --sort dso */
129
130 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
131 {
132 struct dso *dso_l = map_l ? map_l->dso : NULL;
133 struct dso *dso_r = map_r ? map_r->dso : NULL;
134 const char *dso_name_l, *dso_name_r;
135
136 if (!dso_l || !dso_r)
137 return cmp_null(dso_r, dso_l);
138
139 if (verbose) {
140 dso_name_l = dso_l->long_name;
141 dso_name_r = dso_r->long_name;
142 } else {
143 dso_name_l = dso_l->short_name;
144 dso_name_r = dso_r->short_name;
145 }
146
147 return strcmp(dso_name_l, dso_name_r);
148 }
149
150 static int64_t
151 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
152 {
153 return _sort__dso_cmp(right->ms.map, left->ms.map);
154 }
155
156 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
157 size_t size, unsigned int width)
158 {
159 if (map && map->dso) {
160 const char *dso_name = !verbose ? map->dso->short_name :
161 map->dso->long_name;
162 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
163 }
164
165 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
166 }
167
168 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
169 size_t size, unsigned int width)
170 {
171 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
172 }
173
174 struct sort_entry sort_dso = {
175 .se_header = "Shared Object",
176 .se_cmp = sort__dso_cmp,
177 .se_snprintf = hist_entry__dso_snprintf,
178 .se_width_idx = HISTC_DSO,
179 };
180
181 /* --sort symbol */
182
183 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
184 {
185 return (int64_t)(right_ip - left_ip);
186 }
187
188 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
189 {
190 if (!sym_l || !sym_r)
191 return cmp_null(sym_l, sym_r);
192
193 if (sym_l == sym_r)
194 return 0;
195
196 if (sym_l->start != sym_r->start)
197 return (int64_t)(sym_r->start - sym_l->start);
198
199 return (int64_t)(sym_r->end - sym_l->end);
200 }
201
202 static int64_t
203 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204 {
205 int64_t ret;
206
207 if (!left->ms.sym && !right->ms.sym)
208 return _sort__addr_cmp(left->ip, right->ip);
209
210 /*
211 * comparing symbol address alone is not enough since it's a
212 * relative address within a dso.
213 */
214 if (!sort__has_dso) {
215 ret = sort__dso_cmp(left, right);
216 if (ret != 0)
217 return ret;
218 }
219
220 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
221 }
222
223 static int64_t
224 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
225 {
226 if (!left->ms.sym || !right->ms.sym)
227 return cmp_null(left->ms.sym, right->ms.sym);
228
229 return strcmp(right->ms.sym->name, left->ms.sym->name);
230 }
231
232 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
233 u64 ip, char level, char *bf, size_t size,
234 unsigned int width)
235 {
236 size_t ret = 0;
237
238 if (verbose) {
239 char o = map ? dso__symtab_origin(map->dso) : '!';
240 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
241 BITS_PER_LONG / 4 + 2, ip, o);
242 }
243
244 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
245 if (sym && map) {
246 if (map->type == MAP__VARIABLE) {
247 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
248 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
249 ip - map->unmap_ip(map, sym->start));
250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 width - ret, "");
252 } else {
253 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
254 width - ret,
255 sym->name);
256 }
257 } else {
258 size_t len = BITS_PER_LONG / 4;
259 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
260 len, ip);
261 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
262 width - ret, "");
263 }
264
265 if (ret > width)
266 bf[width] = '\0';
267
268 return width;
269 }
270
271 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
272 size_t size, unsigned int width)
273 {
274 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
275 he->level, bf, size, width);
276 }
277
278 struct sort_entry sort_sym = {
279 .se_header = "Symbol",
280 .se_cmp = sort__sym_cmp,
281 .se_sort = sort__sym_sort,
282 .se_snprintf = hist_entry__sym_snprintf,
283 .se_width_idx = HISTC_SYMBOL,
284 };
285
286 /* --sort srcline */
287
288 static int64_t
289 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
290 {
291 if (!left->srcline) {
292 if (!left->ms.map)
293 left->srcline = SRCLINE_UNKNOWN;
294 else {
295 struct map *map = left->ms.map;
296 left->srcline = get_srcline(map->dso,
297 map__rip_2objdump(map, left->ip),
298 left->ms.sym, true);
299 }
300 }
301 if (!right->srcline) {
302 if (!right->ms.map)
303 right->srcline = SRCLINE_UNKNOWN;
304 else {
305 struct map *map = right->ms.map;
306 right->srcline = get_srcline(map->dso,
307 map__rip_2objdump(map, right->ip),
308 right->ms.sym, true);
309 }
310 }
311 return strcmp(right->srcline, left->srcline);
312 }
313
314 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
315 size_t size, unsigned int width)
316 {
317 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
318 }
319
320 struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325 };
326
327 /* --sort srcfile */
328
329 static char no_srcfile[1];
330
331 static char *get_srcfile(struct hist_entry *e)
332 {
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
336 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
337 e->ms.sym, false, true);
338 if (!strcmp(sf, SRCLINE_UNKNOWN))
339 return no_srcfile;
340 p = strchr(sf, ':');
341 if (p && *sf) {
342 *p = 0;
343 return sf;
344 }
345 free(sf);
346 return no_srcfile;
347 }
348
349 static int64_t
350 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
351 {
352 if (!left->srcfile) {
353 if (!left->ms.map)
354 left->srcfile = no_srcfile;
355 else
356 left->srcfile = get_srcfile(left);
357 }
358 if (!right->srcfile) {
359 if (!right->ms.map)
360 right->srcfile = no_srcfile;
361 else
362 right->srcfile = get_srcfile(right);
363 }
364 return strcmp(right->srcfile, left->srcfile);
365 }
366
367 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
368 size_t size, unsigned int width)
369 {
370 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371 }
372
373 struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378 };
379
380 /* --sort parent */
381
382 static int64_t
383 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384 {
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
391 return strcmp(sym_r->name, sym_l->name);
392 }
393
394 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
395 size_t size, unsigned int width)
396 {
397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
398 he->parent ? he->parent->name : "[other]");
399 }
400
401 struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406 };
407
408 /* --sort cpu */
409
410 static int64_t
411 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412 {
413 return right->cpu - left->cpu;
414 }
415
416 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
418 {
419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
420 }
421
422 struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427 };
428
429 /* --sort socket */
430
431 static int64_t
432 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433 {
434 return right->socket - left->socket;
435 }
436
437 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439 {
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441 }
442
443 struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448 };
449
450 /* --sort trace */
451
452 static char *get_trace_output(struct hist_entry *he)
453 {
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
470 return seq.buffer;
471 }
472
473 static int64_t
474 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475 {
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
488 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
489
490 return strcmp(right->trace_output, left->trace_output);
491 }
492
493 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
494 size_t size, unsigned int width)
495 {
496 struct perf_evsel *evsel;
497
498 evsel = hists_to_evsel(he->hists);
499 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
500 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
501
502 if (he->trace_output == NULL)
503 he->trace_output = get_trace_output(he);
504 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
505 }
506
507 struct sort_entry sort_trace = {
508 .se_header = "Trace output",
509 .se_cmp = sort__trace_cmp,
510 .se_snprintf = hist_entry__trace_snprintf,
511 .se_width_idx = HISTC_TRACE,
512 };
513
514 /* sort keys for branch stacks */
515
516 static int64_t
517 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
518 {
519 if (!left->branch_info || !right->branch_info)
520 return cmp_null(left->branch_info, right->branch_info);
521
522 return _sort__dso_cmp(left->branch_info->from.map,
523 right->branch_info->from.map);
524 }
525
526 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
527 size_t size, unsigned int width)
528 {
529 if (he->branch_info)
530 return _hist_entry__dso_snprintf(he->branch_info->from.map,
531 bf, size, width);
532 else
533 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
534 }
535
536 static int64_t
537 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
538 {
539 if (!left->branch_info || !right->branch_info)
540 return cmp_null(left->branch_info, right->branch_info);
541
542 return _sort__dso_cmp(left->branch_info->to.map,
543 right->branch_info->to.map);
544 }
545
546 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
547 size_t size, unsigned int width)
548 {
549 if (he->branch_info)
550 return _hist_entry__dso_snprintf(he->branch_info->to.map,
551 bf, size, width);
552 else
553 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
554 }
555
556 static int64_t
557 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
558 {
559 struct addr_map_symbol *from_l = &left->branch_info->from;
560 struct addr_map_symbol *from_r = &right->branch_info->from;
561
562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 from_l = &left->branch_info->from;
566 from_r = &right->branch_info->from;
567
568 if (!from_l->sym && !from_r->sym)
569 return _sort__addr_cmp(from_l->addr, from_r->addr);
570
571 return _sort__sym_cmp(from_l->sym, from_r->sym);
572 }
573
574 static int64_t
575 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
576 {
577 struct addr_map_symbol *to_l, *to_r;
578
579 if (!left->branch_info || !right->branch_info)
580 return cmp_null(left->branch_info, right->branch_info);
581
582 to_l = &left->branch_info->to;
583 to_r = &right->branch_info->to;
584
585 if (!to_l->sym && !to_r->sym)
586 return _sort__addr_cmp(to_l->addr, to_r->addr);
587
588 return _sort__sym_cmp(to_l->sym, to_r->sym);
589 }
590
591 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
592 size_t size, unsigned int width)
593 {
594 if (he->branch_info) {
595 struct addr_map_symbol *from = &he->branch_info->from;
596
597 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
598 he->level, bf, size, width);
599 }
600
601 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
602 }
603
604 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
605 size_t size, unsigned int width)
606 {
607 if (he->branch_info) {
608 struct addr_map_symbol *to = &he->branch_info->to;
609
610 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
611 he->level, bf, size, width);
612 }
613
614 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
615 }
616
617 struct sort_entry sort_dso_from = {
618 .se_header = "Source Shared Object",
619 .se_cmp = sort__dso_from_cmp,
620 .se_snprintf = hist_entry__dso_from_snprintf,
621 .se_width_idx = HISTC_DSO_FROM,
622 };
623
624 struct sort_entry sort_dso_to = {
625 .se_header = "Target Shared Object",
626 .se_cmp = sort__dso_to_cmp,
627 .se_snprintf = hist_entry__dso_to_snprintf,
628 .se_width_idx = HISTC_DSO_TO,
629 };
630
631 struct sort_entry sort_sym_from = {
632 .se_header = "Source Symbol",
633 .se_cmp = sort__sym_from_cmp,
634 .se_snprintf = hist_entry__sym_from_snprintf,
635 .se_width_idx = HISTC_SYMBOL_FROM,
636 };
637
638 struct sort_entry sort_sym_to = {
639 .se_header = "Target Symbol",
640 .se_cmp = sort__sym_to_cmp,
641 .se_snprintf = hist_entry__sym_to_snprintf,
642 .se_width_idx = HISTC_SYMBOL_TO,
643 };
644
645 static int64_t
646 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
647 {
648 unsigned char mp, p;
649
650 if (!left->branch_info || !right->branch_info)
651 return cmp_null(left->branch_info, right->branch_info);
652
653 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
654 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
655 return mp || p;
656 }
657
658 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
659 size_t size, unsigned int width){
660 static const char *out = "N/A";
661
662 if (he->branch_info) {
663 if (he->branch_info->flags.predicted)
664 out = "N";
665 else if (he->branch_info->flags.mispred)
666 out = "Y";
667 }
668
669 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
670 }
671
672 static int64_t
673 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
674 {
675 return left->branch_info->flags.cycles -
676 right->branch_info->flags.cycles;
677 }
678
679 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681 {
682 if (he->branch_info->flags.cycles == 0)
683 return repsep_snprintf(bf, size, "%-*s", width, "-");
684 return repsep_snprintf(bf, size, "%-*hd", width,
685 he->branch_info->flags.cycles);
686 }
687
688 struct sort_entry sort_cycles = {
689 .se_header = "Basic Block Cycles",
690 .se_cmp = sort__cycles_cmp,
691 .se_snprintf = hist_entry__cycles_snprintf,
692 .se_width_idx = HISTC_CYCLES,
693 };
694
695 /* --sort daddr_sym */
696 static int64_t
697 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
698 {
699 uint64_t l = 0, r = 0;
700
701 if (left->mem_info)
702 l = left->mem_info->daddr.addr;
703 if (right->mem_info)
704 r = right->mem_info->daddr.addr;
705
706 return (int64_t)(r - l);
707 }
708
709 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
710 size_t size, unsigned int width)
711 {
712 uint64_t addr = 0;
713 struct map *map = NULL;
714 struct symbol *sym = NULL;
715
716 if (he->mem_info) {
717 addr = he->mem_info->daddr.addr;
718 map = he->mem_info->daddr.map;
719 sym = he->mem_info->daddr.sym;
720 }
721 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
722 width);
723 }
724
725 static int64_t
726 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
727 {
728 uint64_t l = 0, r = 0;
729
730 if (left->mem_info)
731 l = left->mem_info->iaddr.addr;
732 if (right->mem_info)
733 r = right->mem_info->iaddr.addr;
734
735 return (int64_t)(r - l);
736 }
737
738 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
740 {
741 uint64_t addr = 0;
742 struct map *map = NULL;
743 struct symbol *sym = NULL;
744
745 if (he->mem_info) {
746 addr = he->mem_info->iaddr.addr;
747 map = he->mem_info->iaddr.map;
748 sym = he->mem_info->iaddr.sym;
749 }
750 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
751 width);
752 }
753
754 static int64_t
755 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
756 {
757 struct map *map_l = NULL;
758 struct map *map_r = NULL;
759
760 if (left->mem_info)
761 map_l = left->mem_info->daddr.map;
762 if (right->mem_info)
763 map_r = right->mem_info->daddr.map;
764
765 return _sort__dso_cmp(map_l, map_r);
766 }
767
768 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
769 size_t size, unsigned int width)
770 {
771 struct map *map = NULL;
772
773 if (he->mem_info)
774 map = he->mem_info->daddr.map;
775
776 return _hist_entry__dso_snprintf(map, bf, size, width);
777 }
778
779 static int64_t
780 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
781 {
782 union perf_mem_data_src data_src_l;
783 union perf_mem_data_src data_src_r;
784
785 if (left->mem_info)
786 data_src_l = left->mem_info->data_src;
787 else
788 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
789
790 if (right->mem_info)
791 data_src_r = right->mem_info->data_src;
792 else
793 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
794
795 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
796 }
797
798 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
799 size_t size, unsigned int width)
800 {
801 const char *out;
802 u64 mask = PERF_MEM_LOCK_NA;
803
804 if (he->mem_info)
805 mask = he->mem_info->data_src.mem_lock;
806
807 if (mask & PERF_MEM_LOCK_NA)
808 out = "N/A";
809 else if (mask & PERF_MEM_LOCK_LOCKED)
810 out = "Yes";
811 else
812 out = "No";
813
814 return repsep_snprintf(bf, size, "%-*s", width, out);
815 }
816
817 static int64_t
818 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
819 {
820 union perf_mem_data_src data_src_l;
821 union perf_mem_data_src data_src_r;
822
823 if (left->mem_info)
824 data_src_l = left->mem_info->data_src;
825 else
826 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
827
828 if (right->mem_info)
829 data_src_r = right->mem_info->data_src;
830 else
831 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
832
833 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
834 }
835
836 static const char * const tlb_access[] = {
837 "N/A",
838 "HIT",
839 "MISS",
840 "L1",
841 "L2",
842 "Walker",
843 "Fault",
844 };
845 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846
847 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
848 size_t size, unsigned int width)
849 {
850 char out[64];
851 size_t sz = sizeof(out) - 1; /* -1 for null termination */
852 size_t l = 0, i;
853 u64 m = PERF_MEM_TLB_NA;
854 u64 hit, miss;
855
856 out[0] = '\0';
857
858 if (he->mem_info)
859 m = he->mem_info->data_src.mem_dtlb;
860
861 hit = m & PERF_MEM_TLB_HIT;
862 miss = m & PERF_MEM_TLB_MISS;
863
864 /* already taken care of */
865 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
866
867 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
868 if (!(m & 0x1))
869 continue;
870 if (l) {
871 strcat(out, " or ");
872 l += 4;
873 }
874 strncat(out, tlb_access[i], sz - l);
875 l += strlen(tlb_access[i]);
876 }
877 if (*out == '\0')
878 strcpy(out, "N/A");
879 if (hit)
880 strncat(out, " hit", sz - l);
881 if (miss)
882 strncat(out, " miss", sz - l);
883
884 return repsep_snprintf(bf, size, "%-*s", width, out);
885 }
886
887 static int64_t
888 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
889 {
890 union perf_mem_data_src data_src_l;
891 union perf_mem_data_src data_src_r;
892
893 if (left->mem_info)
894 data_src_l = left->mem_info->data_src;
895 else
896 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
897
898 if (right->mem_info)
899 data_src_r = right->mem_info->data_src;
900 else
901 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
902
903 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
904 }
905
906 static const char * const mem_lvl[] = {
907 "N/A",
908 "HIT",
909 "MISS",
910 "L1",
911 "LFB",
912 "L2",
913 "L3",
914 "Local RAM",
915 "Remote RAM (1 hop)",
916 "Remote RAM (2 hops)",
917 "Remote Cache (1 hop)",
918 "Remote Cache (2 hops)",
919 "I/O",
920 "Uncached",
921 };
922 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923
924 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
925 size_t size, unsigned int width)
926 {
927 char out[64];
928 size_t sz = sizeof(out) - 1; /* -1 for null termination */
929 size_t i, l = 0;
930 u64 m = PERF_MEM_LVL_NA;
931 u64 hit, miss;
932
933 if (he->mem_info)
934 m = he->mem_info->data_src.mem_lvl;
935
936 out[0] = '\0';
937
938 hit = m & PERF_MEM_LVL_HIT;
939 miss = m & PERF_MEM_LVL_MISS;
940
941 /* already taken care of */
942 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
943
944 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
945 if (!(m & 0x1))
946 continue;
947 if (l) {
948 strcat(out, " or ");
949 l += 4;
950 }
951 strncat(out, mem_lvl[i], sz - l);
952 l += strlen(mem_lvl[i]);
953 }
954 if (*out == '\0')
955 strcpy(out, "N/A");
956 if (hit)
957 strncat(out, " hit", sz - l);
958 if (miss)
959 strncat(out, " miss", sz - l);
960
961 return repsep_snprintf(bf, size, "%-*s", width, out);
962 }
963
964 static int64_t
965 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
966 {
967 union perf_mem_data_src data_src_l;
968 union perf_mem_data_src data_src_r;
969
970 if (left->mem_info)
971 data_src_l = left->mem_info->data_src;
972 else
973 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
974
975 if (right->mem_info)
976 data_src_r = right->mem_info->data_src;
977 else
978 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
979
980 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
981 }
982
983 static const char * const snoop_access[] = {
984 "N/A",
985 "None",
986 "Miss",
987 "Hit",
988 "HitM",
989 };
990 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991
992 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
993 size_t size, unsigned int width)
994 {
995 char out[64];
996 size_t sz = sizeof(out) - 1; /* -1 for null termination */
997 size_t i, l = 0;
998 u64 m = PERF_MEM_SNOOP_NA;
999
1000 out[0] = '\0';
1001
1002 if (he->mem_info)
1003 m = he->mem_info->data_src.mem_snoop;
1004
1005 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1006 if (!(m & 0x1))
1007 continue;
1008 if (l) {
1009 strcat(out, " or ");
1010 l += 4;
1011 }
1012 strncat(out, snoop_access[i], sz - l);
1013 l += strlen(snoop_access[i]);
1014 }
1015
1016 if (*out == '\0')
1017 strcpy(out, "N/A");
1018
1019 return repsep_snprintf(bf, size, "%-*s", width, out);
1020 }
1021
1022 static inline u64 cl_address(u64 address)
1023 {
1024 /* return the cacheline of the address */
1025 return (address & ~(cacheline_size - 1));
1026 }
1027
1028 static int64_t
1029 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1030 {
1031 u64 l, r;
1032 struct map *l_map, *r_map;
1033
1034 if (!left->mem_info) return -1;
1035 if (!right->mem_info) return 1;
1036
1037 /* group event types together */
1038 if (left->cpumode > right->cpumode) return -1;
1039 if (left->cpumode < right->cpumode) return 1;
1040
1041 l_map = left->mem_info->daddr.map;
1042 r_map = right->mem_info->daddr.map;
1043
1044 /* if both are NULL, jump to sort on al_addr instead */
1045 if (!l_map && !r_map)
1046 goto addr;
1047
1048 if (!l_map) return -1;
1049 if (!r_map) return 1;
1050
1051 if (l_map->maj > r_map->maj) return -1;
1052 if (l_map->maj < r_map->maj) return 1;
1053
1054 if (l_map->min > r_map->min) return -1;
1055 if (l_map->min < r_map->min) return 1;
1056
1057 if (l_map->ino > r_map->ino) return -1;
1058 if (l_map->ino < r_map->ino) return 1;
1059
1060 if (l_map->ino_generation > r_map->ino_generation) return -1;
1061 if (l_map->ino_generation < r_map->ino_generation) return 1;
1062
1063 /*
1064 * Addresses with no major/minor numbers are assumed to be
1065 * anonymous in userspace. Sort those on pid then address.
1066 *
1067 * The kernel and non-zero major/minor mapped areas are
1068 * assumed to be unity mapped. Sort those on address.
1069 */
1070
1071 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1072 (!(l_map->flags & MAP_SHARED)) &&
1073 !l_map->maj && !l_map->min && !l_map->ino &&
1074 !l_map->ino_generation) {
1075 /* userspace anonymous */
1076
1077 if (left->thread->pid_ > right->thread->pid_) return -1;
1078 if (left->thread->pid_ < right->thread->pid_) return 1;
1079 }
1080
1081 addr:
1082 /* al_addr does all the right addr - start + offset calculations */
1083 l = cl_address(left->mem_info->daddr.al_addr);
1084 r = cl_address(right->mem_info->daddr.al_addr);
1085
1086 if (l > r) return -1;
1087 if (l < r) return 1;
1088
1089 return 0;
1090 }
1091
1092 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1093 size_t size, unsigned int width)
1094 {
1095
1096 uint64_t addr = 0;
1097 struct map *map = NULL;
1098 struct symbol *sym = NULL;
1099 char level = he->level;
1100
1101 if (he->mem_info) {
1102 addr = cl_address(he->mem_info->daddr.al_addr);
1103 map = he->mem_info->daddr.map;
1104 sym = he->mem_info->daddr.sym;
1105
1106 /* print [s] for shared data mmaps */
1107 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1108 map && (map->type == MAP__VARIABLE) &&
1109 (map->flags & MAP_SHARED) &&
1110 (map->maj || map->min || map->ino ||
1111 map->ino_generation))
1112 level = 's';
1113 else if (!map)
1114 level = 'X';
1115 }
1116 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1117 width);
1118 }
1119
1120 struct sort_entry sort_mispredict = {
1121 .se_header = "Branch Mispredicted",
1122 .se_cmp = sort__mispredict_cmp,
1123 .se_snprintf = hist_entry__mispredict_snprintf,
1124 .se_width_idx = HISTC_MISPREDICT,
1125 };
1126
1127 static u64 he_weight(struct hist_entry *he)
1128 {
1129 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1130 }
1131
1132 static int64_t
1133 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1134 {
1135 return he_weight(left) - he_weight(right);
1136 }
1137
1138 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1139 size_t size, unsigned int width)
1140 {
1141 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1142 }
1143
1144 struct sort_entry sort_local_weight = {
1145 .se_header = "Local Weight",
1146 .se_cmp = sort__local_weight_cmp,
1147 .se_snprintf = hist_entry__local_weight_snprintf,
1148 .se_width_idx = HISTC_LOCAL_WEIGHT,
1149 };
1150
1151 static int64_t
1152 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1153 {
1154 return left->stat.weight - right->stat.weight;
1155 }
1156
1157 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1158 size_t size, unsigned int width)
1159 {
1160 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1161 }
1162
1163 struct sort_entry sort_global_weight = {
1164 .se_header = "Weight",
1165 .se_cmp = sort__global_weight_cmp,
1166 .se_snprintf = hist_entry__global_weight_snprintf,
1167 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1168 };
1169
1170 struct sort_entry sort_mem_daddr_sym = {
1171 .se_header = "Data Symbol",
1172 .se_cmp = sort__daddr_cmp,
1173 .se_snprintf = hist_entry__daddr_snprintf,
1174 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1175 };
1176
1177 struct sort_entry sort_mem_iaddr_sym = {
1178 .se_header = "Code Symbol",
1179 .se_cmp = sort__iaddr_cmp,
1180 .se_snprintf = hist_entry__iaddr_snprintf,
1181 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1182 };
1183
1184 struct sort_entry sort_mem_daddr_dso = {
1185 .se_header = "Data Object",
1186 .se_cmp = sort__dso_daddr_cmp,
1187 .se_snprintf = hist_entry__dso_daddr_snprintf,
1188 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1189 };
1190
1191 struct sort_entry sort_mem_locked = {
1192 .se_header = "Locked",
1193 .se_cmp = sort__locked_cmp,
1194 .se_snprintf = hist_entry__locked_snprintf,
1195 .se_width_idx = HISTC_MEM_LOCKED,
1196 };
1197
1198 struct sort_entry sort_mem_tlb = {
1199 .se_header = "TLB access",
1200 .se_cmp = sort__tlb_cmp,
1201 .se_snprintf = hist_entry__tlb_snprintf,
1202 .se_width_idx = HISTC_MEM_TLB,
1203 };
1204
1205 struct sort_entry sort_mem_lvl = {
1206 .se_header = "Memory access",
1207 .se_cmp = sort__lvl_cmp,
1208 .se_snprintf = hist_entry__lvl_snprintf,
1209 .se_width_idx = HISTC_MEM_LVL,
1210 };
1211
1212 struct sort_entry sort_mem_snoop = {
1213 .se_header = "Snoop",
1214 .se_cmp = sort__snoop_cmp,
1215 .se_snprintf = hist_entry__snoop_snprintf,
1216 .se_width_idx = HISTC_MEM_SNOOP,
1217 };
1218
1219 struct sort_entry sort_mem_dcacheline = {
1220 .se_header = "Data Cacheline",
1221 .se_cmp = sort__dcacheline_cmp,
1222 .se_snprintf = hist_entry__dcacheline_snprintf,
1223 .se_width_idx = HISTC_MEM_DCACHELINE,
1224 };
1225
1226 static int64_t
1227 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1228 {
1229 if (!left->branch_info || !right->branch_info)
1230 return cmp_null(left->branch_info, right->branch_info);
1231
1232 return left->branch_info->flags.abort !=
1233 right->branch_info->flags.abort;
1234 }
1235
1236 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1237 size_t size, unsigned int width)
1238 {
1239 static const char *out = "N/A";
1240
1241 if (he->branch_info) {
1242 if (he->branch_info->flags.abort)
1243 out = "A";
1244 else
1245 out = ".";
1246 }
1247
1248 return repsep_snprintf(bf, size, "%-*s", width, out);
1249 }
1250
1251 struct sort_entry sort_abort = {
1252 .se_header = "Transaction abort",
1253 .se_cmp = sort__abort_cmp,
1254 .se_snprintf = hist_entry__abort_snprintf,
1255 .se_width_idx = HISTC_ABORT,
1256 };
1257
1258 static int64_t
1259 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1260 {
1261 if (!left->branch_info || !right->branch_info)
1262 return cmp_null(left->branch_info, right->branch_info);
1263
1264 return left->branch_info->flags.in_tx !=
1265 right->branch_info->flags.in_tx;
1266 }
1267
1268 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1269 size_t size, unsigned int width)
1270 {
1271 static const char *out = "N/A";
1272
1273 if (he->branch_info) {
1274 if (he->branch_info->flags.in_tx)
1275 out = "T";
1276 else
1277 out = ".";
1278 }
1279
1280 return repsep_snprintf(bf, size, "%-*s", width, out);
1281 }
1282
1283 struct sort_entry sort_in_tx = {
1284 .se_header = "Branch in transaction",
1285 .se_cmp = sort__in_tx_cmp,
1286 .se_snprintf = hist_entry__in_tx_snprintf,
1287 .se_width_idx = HISTC_IN_TX,
1288 };
1289
1290 static int64_t
1291 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1292 {
1293 return left->transaction - right->transaction;
1294 }
1295
1296 static inline char *add_str(char *p, const char *str)
1297 {
1298 strcpy(p, str);
1299 return p + strlen(str);
1300 }
1301
1302 static struct txbit {
1303 unsigned flag;
1304 const char *name;
1305 int skip_for_len;
1306 } txbits[] = {
1307 { PERF_TXN_ELISION, "EL ", 0 },
1308 { PERF_TXN_TRANSACTION, "TX ", 1 },
1309 { PERF_TXN_SYNC, "SYNC ", 1 },
1310 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1311 { PERF_TXN_RETRY, "RETRY ", 0 },
1312 { PERF_TXN_CONFLICT, "CON ", 0 },
1313 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1314 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1315 { 0, NULL, 0 }
1316 };
1317
1318 int hist_entry__transaction_len(void)
1319 {
1320 int i;
1321 int len = 0;
1322
1323 for (i = 0; txbits[i].name; i++) {
1324 if (!txbits[i].skip_for_len)
1325 len += strlen(txbits[i].name);
1326 }
1327 len += 4; /* :XX<space> */
1328 return len;
1329 }
1330
1331 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1332 size_t size, unsigned int width)
1333 {
1334 u64 t = he->transaction;
1335 char buf[128];
1336 char *p = buf;
1337 int i;
1338
1339 buf[0] = 0;
1340 for (i = 0; txbits[i].name; i++)
1341 if (txbits[i].flag & t)
1342 p = add_str(p, txbits[i].name);
1343 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1344 p = add_str(p, "NEITHER ");
1345 if (t & PERF_TXN_ABORT_MASK) {
1346 sprintf(p, ":%" PRIx64,
1347 (t & PERF_TXN_ABORT_MASK) >>
1348 PERF_TXN_ABORT_SHIFT);
1349 p += strlen(p);
1350 }
1351
1352 return repsep_snprintf(bf, size, "%-*s", width, buf);
1353 }
1354
1355 struct sort_entry sort_transaction = {
1356 .se_header = "Transaction ",
1357 .se_cmp = sort__transaction_cmp,
1358 .se_snprintf = hist_entry__transaction_snprintf,
1359 .se_width_idx = HISTC_TRANSACTION,
1360 };
1361
1362 struct sort_dimension {
1363 const char *name;
1364 struct sort_entry *entry;
1365 int taken;
1366 };
1367
1368 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369
1370 static struct sort_dimension common_sort_dimensions[] = {
1371 DIM(SORT_PID, "pid", sort_thread),
1372 DIM(SORT_COMM, "comm", sort_comm),
1373 DIM(SORT_DSO, "dso", sort_dso),
1374 DIM(SORT_SYM, "symbol", sort_sym),
1375 DIM(SORT_PARENT, "parent", sort_parent),
1376 DIM(SORT_CPU, "cpu", sort_cpu),
1377 DIM(SORT_SOCKET, "socket", sort_socket),
1378 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1379 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1380 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1381 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1382 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1383 DIM(SORT_TRACE, "trace", sort_trace),
1384 };
1385
1386 #undef DIM
1387
1388 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389
1390 static struct sort_dimension bstack_sort_dimensions[] = {
1391 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1392 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1393 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1394 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1395 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1396 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1397 DIM(SORT_ABORT, "abort", sort_abort),
1398 DIM(SORT_CYCLES, "cycles", sort_cycles),
1399 };
1400
1401 #undef DIM
1402
1403 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404
1405 static struct sort_dimension memory_sort_dimensions[] = {
1406 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1407 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1408 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1409 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1410 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1411 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1412 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1413 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1414 };
1415
1416 #undef DIM
1417
1418 struct hpp_dimension {
1419 const char *name;
1420 struct perf_hpp_fmt *fmt;
1421 int taken;
1422 };
1423
1424 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425
1426 static struct hpp_dimension hpp_sort_dimensions[] = {
1427 DIM(PERF_HPP__OVERHEAD, "overhead"),
1428 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1429 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1431 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1432 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1433 DIM(PERF_HPP__SAMPLES, "sample"),
1434 DIM(PERF_HPP__PERIOD, "period"),
1435 };
1436
1437 #undef DIM
1438
1439 struct hpp_sort_entry {
1440 struct perf_hpp_fmt hpp;
1441 struct sort_entry *se;
1442 };
1443
1444 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1445 {
1446 struct hpp_sort_entry *hse;
1447
1448 if (!perf_hpp__is_sort_entry(fmt))
1449 return;
1450
1451 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1452 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1453 }
1454
1455 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1456 struct perf_evsel *evsel)
1457 {
1458 struct hpp_sort_entry *hse;
1459 size_t len = fmt->user_len;
1460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1462
1463 if (!len)
1464 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1465
1466 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1467 }
1468
1469 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1470 struct perf_hpp *hpp __maybe_unused,
1471 struct perf_evsel *evsel)
1472 {
1473 struct hpp_sort_entry *hse;
1474 size_t len = fmt->user_len;
1475
1476 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1477
1478 if (!len)
1479 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1480
1481 return len;
1482 }
1483
1484 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1485 struct hist_entry *he)
1486 {
1487 struct hpp_sort_entry *hse;
1488 size_t len = fmt->user_len;
1489
1490 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1491
1492 if (!len)
1493 len = hists__col_len(he->hists, hse->se->se_width_idx);
1494
1495 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1496 }
1497
1498 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1499 struct hist_entry *a, struct hist_entry *b)
1500 {
1501 struct hpp_sort_entry *hse;
1502
1503 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1504 return hse->se->se_cmp(a, b);
1505 }
1506
1507 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1508 struct hist_entry *a, struct hist_entry *b)
1509 {
1510 struct hpp_sort_entry *hse;
1511 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1512
1513 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1514 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1515 return collapse_fn(a, b);
1516 }
1517
1518 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1519 struct hist_entry *a, struct hist_entry *b)
1520 {
1521 struct hpp_sort_entry *hse;
1522 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1523
1524 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1525 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1526 return sort_fn(a, b);
1527 }
1528
1529 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1530 {
1531 return format->header == __sort__hpp_header;
1532 }
1533
1534 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1535 {
1536 struct hpp_sort_entry *hse_a;
1537 struct hpp_sort_entry *hse_b;
1538
1539 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1540 return false;
1541
1542 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1543 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1544
1545 return hse_a->se == hse_b->se;
1546 }
1547
1548 static void hse_free(struct perf_hpp_fmt *fmt)
1549 {
1550 struct hpp_sort_entry *hse;
1551
1552 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1553 free(hse);
1554 }
1555
1556 static struct hpp_sort_entry *
1557 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1558 {
1559 struct hpp_sort_entry *hse;
1560
1561 hse = malloc(sizeof(*hse));
1562 if (hse == NULL) {
1563 pr_err("Memory allocation failed\n");
1564 return NULL;
1565 }
1566
1567 hse->se = sd->entry;
1568 hse->hpp.name = sd->entry->se_header;
1569 hse->hpp.header = __sort__hpp_header;
1570 hse->hpp.width = __sort__hpp_width;
1571 hse->hpp.entry = __sort__hpp_entry;
1572 hse->hpp.color = NULL;
1573
1574 hse->hpp.cmp = __sort__hpp_cmp;
1575 hse->hpp.collapse = __sort__hpp_collapse;
1576 hse->hpp.sort = __sort__hpp_sort;
1577 hse->hpp.equal = __sort__hpp_equal;
1578 hse->hpp.free = hse_free;
1579
1580 INIT_LIST_HEAD(&hse->hpp.list);
1581 INIT_LIST_HEAD(&hse->hpp.sort_list);
1582 hse->hpp.elide = false;
1583 hse->hpp.len = 0;
1584 hse->hpp.user_len = 0;
1585
1586 return hse;
1587 }
1588
1589 static void hpp_free(struct perf_hpp_fmt *fmt)
1590 {
1591 free(fmt);
1592 }
1593
1594 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1595 {
1596 struct perf_hpp_fmt *fmt;
1597
1598 fmt = memdup(hd->fmt, sizeof(*fmt));
1599 if (fmt) {
1600 INIT_LIST_HEAD(&fmt->list);
1601 INIT_LIST_HEAD(&fmt->sort_list);
1602 fmt->free = hpp_free;
1603 }
1604
1605 return fmt;
1606 }
1607
1608 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1609 {
1610 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1611
1612 if (hse == NULL)
1613 return -1;
1614
1615 perf_hpp__register_sort_field(&hse->hpp);
1616 return 0;
1617 }
1618
1619 static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
1620 struct sort_dimension *sd)
1621 {
1622 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1623
1624 if (hse == NULL)
1625 return -1;
1626
1627 perf_hpp_list__column_register(list, &hse->hpp);
1628 return 0;
1629 }
1630
1631 struct hpp_dynamic_entry {
1632 struct perf_hpp_fmt hpp;
1633 struct perf_evsel *evsel;
1634 struct format_field *field;
1635 unsigned dynamic_len;
1636 bool raw_trace;
1637 };
1638
1639 static int hde_width(struct hpp_dynamic_entry *hde)
1640 {
1641 if (!hde->hpp.len) {
1642 int len = hde->dynamic_len;
1643 int namelen = strlen(hde->field->name);
1644 int fieldlen = hde->field->size;
1645
1646 if (namelen > len)
1647 len = namelen;
1648
1649 if (!(hde->field->flags & FIELD_IS_STRING)) {
1650 /* length for print hex numbers */
1651 fieldlen = hde->field->size * 2 + 2;
1652 }
1653 if (fieldlen > len)
1654 len = fieldlen;
1655
1656 hde->hpp.len = len;
1657 }
1658 return hde->hpp.len;
1659 }
1660
1661 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1662 struct hist_entry *he)
1663 {
1664 char *str, *pos;
1665 struct format_field *field = hde->field;
1666 size_t namelen;
1667 bool last = false;
1668
1669 if (hde->raw_trace)
1670 return;
1671
1672 /* parse pretty print result and update max length */
1673 if (!he->trace_output)
1674 he->trace_output = get_trace_output(he);
1675
1676 namelen = strlen(field->name);
1677 str = he->trace_output;
1678
1679 while (str) {
1680 pos = strchr(str, ' ');
1681 if (pos == NULL) {
1682 last = true;
1683 pos = str + strlen(str);
1684 }
1685
1686 if (!strncmp(str, field->name, namelen)) {
1687 size_t len;
1688
1689 str += namelen + 1;
1690 len = pos - str;
1691
1692 if (len > hde->dynamic_len)
1693 hde->dynamic_len = len;
1694 break;
1695 }
1696
1697 if (last)
1698 str = NULL;
1699 else
1700 str = pos + 1;
1701 }
1702 }
1703
1704 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1705 struct perf_evsel *evsel __maybe_unused)
1706 {
1707 struct hpp_dynamic_entry *hde;
1708 size_t len = fmt->user_len;
1709
1710 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1711
1712 if (!len)
1713 len = hde_width(hde);
1714
1715 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1716 }
1717
1718 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1719 struct perf_hpp *hpp __maybe_unused,
1720 struct perf_evsel *evsel __maybe_unused)
1721 {
1722 struct hpp_dynamic_entry *hde;
1723 size_t len = fmt->user_len;
1724
1725 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1726
1727 if (!len)
1728 len = hde_width(hde);
1729
1730 return len;
1731 }
1732
1733 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1734 {
1735 struct hpp_dynamic_entry *hde;
1736
1737 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1738
1739 return hists_to_evsel(hists) == hde->evsel;
1740 }
1741
1742 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1743 struct hist_entry *he)
1744 {
1745 struct hpp_dynamic_entry *hde;
1746 size_t len = fmt->user_len;
1747 char *str, *pos;
1748 struct format_field *field;
1749 size_t namelen;
1750 bool last = false;
1751 int ret;
1752
1753 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1754
1755 if (!len)
1756 len = hde_width(hde);
1757
1758 if (hde->raw_trace)
1759 goto raw_field;
1760
1761 field = hde->field;
1762 namelen = strlen(field->name);
1763 str = he->trace_output;
1764
1765 while (str) {
1766 pos = strchr(str, ' ');
1767 if (pos == NULL) {
1768 last = true;
1769 pos = str + strlen(str);
1770 }
1771
1772 if (!strncmp(str, field->name, namelen)) {
1773 str += namelen + 1;
1774 str = strndup(str, pos - str);
1775
1776 if (str == NULL)
1777 return scnprintf(hpp->buf, hpp->size,
1778 "%*.*s", len, len, "ERROR");
1779 break;
1780 }
1781
1782 if (last)
1783 str = NULL;
1784 else
1785 str = pos + 1;
1786 }
1787
1788 if (str == NULL) {
1789 struct trace_seq seq;
1790 raw_field:
1791 trace_seq_init(&seq);
1792 pevent_print_field(&seq, he->raw_data, hde->field);
1793 str = seq.buffer;
1794 }
1795
1796 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1797 free(str);
1798 return ret;
1799 }
1800
1801 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1802 struct hist_entry *a, struct hist_entry *b)
1803 {
1804 struct hpp_dynamic_entry *hde;
1805 struct format_field *field;
1806 unsigned offset, size;
1807
1808 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1809
1810 field = hde->field;
1811 if (field->flags & FIELD_IS_DYNAMIC) {
1812 unsigned long long dyn;
1813
1814 pevent_read_number_field(field, a->raw_data, &dyn);
1815 offset = dyn & 0xffff;
1816 size = (dyn >> 16) & 0xffff;
1817
1818 /* record max width for output */
1819 if (size > hde->dynamic_len)
1820 hde->dynamic_len = size;
1821 } else {
1822 offset = field->offset;
1823 size = field->size;
1824
1825 update_dynamic_len(hde, a);
1826 update_dynamic_len(hde, b);
1827 }
1828
1829 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1830 }
1831
1832 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1833 {
1834 return fmt->cmp == __sort__hde_cmp;
1835 }
1836
1837 static void hde_free(struct perf_hpp_fmt *fmt)
1838 {
1839 struct hpp_dynamic_entry *hde;
1840
1841 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1842 free(hde);
1843 }
1844
1845 static struct hpp_dynamic_entry *
1846 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1847 {
1848 struct hpp_dynamic_entry *hde;
1849
1850 hde = malloc(sizeof(*hde));
1851 if (hde == NULL) {
1852 pr_debug("Memory allocation failed\n");
1853 return NULL;
1854 }
1855
1856 hde->evsel = evsel;
1857 hde->field = field;
1858 hde->dynamic_len = 0;
1859
1860 hde->hpp.name = field->name;
1861 hde->hpp.header = __sort__hde_header;
1862 hde->hpp.width = __sort__hde_width;
1863 hde->hpp.entry = __sort__hde_entry;
1864 hde->hpp.color = NULL;
1865
1866 hde->hpp.cmp = __sort__hde_cmp;
1867 hde->hpp.collapse = __sort__hde_cmp;
1868 hde->hpp.sort = __sort__hde_cmp;
1869 hde->hpp.free = hde_free;
1870
1871 INIT_LIST_HEAD(&hde->hpp.list);
1872 INIT_LIST_HEAD(&hde->hpp.sort_list);
1873 hde->hpp.elide = false;
1874 hde->hpp.len = 0;
1875 hde->hpp.user_len = 0;
1876
1877 return hde;
1878 }
1879
1880 static int parse_field_name(char *str, char **event, char **field, char **opt)
1881 {
1882 char *event_name, *field_name, *opt_name;
1883
1884 event_name = str;
1885 field_name = strchr(str, '.');
1886
1887 if (field_name) {
1888 *field_name++ = '\0';
1889 } else {
1890 event_name = NULL;
1891 field_name = str;
1892 }
1893
1894 opt_name = strchr(field_name, '/');
1895 if (opt_name)
1896 *opt_name++ = '\0';
1897
1898 *event = event_name;
1899 *field = field_name;
1900 *opt = opt_name;
1901
1902 return 0;
1903 }
1904
1905 /* find match evsel using a given event name. The event name can be:
1906 * 1. '%' + event index (e.g. '%1' for first event)
1907 * 2. full event name (e.g. sched:sched_switch)
1908 * 3. partial event name (should not contain ':')
1909 */
1910 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1911 {
1912 struct perf_evsel *evsel = NULL;
1913 struct perf_evsel *pos;
1914 bool full_name;
1915
1916 /* case 1 */
1917 if (event_name[0] == '%') {
1918 int nr = strtol(event_name+1, NULL, 0);
1919
1920 if (nr > evlist->nr_entries)
1921 return NULL;
1922
1923 evsel = perf_evlist__first(evlist);
1924 while (--nr > 0)
1925 evsel = perf_evsel__next(evsel);
1926
1927 return evsel;
1928 }
1929
1930 full_name = !!strchr(event_name, ':');
1931 evlist__for_each(evlist, pos) {
1932 /* case 2 */
1933 if (full_name && !strcmp(pos->name, event_name))
1934 return pos;
1935 /* case 3 */
1936 if (!full_name && strstr(pos->name, event_name)) {
1937 if (evsel) {
1938 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1939 event_name, evsel->name, pos->name);
1940 return NULL;
1941 }
1942 evsel = pos;
1943 }
1944 }
1945
1946 return evsel;
1947 }
1948
1949 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1950 struct format_field *field,
1951 bool raw_trace)
1952 {
1953 struct hpp_dynamic_entry *hde;
1954
1955 hde = __alloc_dynamic_entry(evsel, field);
1956 if (hde == NULL)
1957 return -ENOMEM;
1958
1959 hde->raw_trace = raw_trace;
1960
1961 perf_hpp__register_sort_field(&hde->hpp);
1962 return 0;
1963 }
1964
1965 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1966 {
1967 int ret;
1968 struct format_field *field;
1969
1970 field = evsel->tp_format->format.fields;
1971 while (field) {
1972 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1973 if (ret < 0)
1974 return ret;
1975
1976 field = field->next;
1977 }
1978 return 0;
1979 }
1980
1981 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1982 {
1983 int ret;
1984 struct perf_evsel *evsel;
1985
1986 evlist__for_each(evlist, evsel) {
1987 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1988 continue;
1989
1990 ret = add_evsel_fields(evsel, raw_trace);
1991 if (ret < 0)
1992 return ret;
1993 }
1994 return 0;
1995 }
1996
1997 static int add_all_matching_fields(struct perf_evlist *evlist,
1998 char *field_name, bool raw_trace)
1999 {
2000 int ret = -ESRCH;
2001 struct perf_evsel *evsel;
2002 struct format_field *field;
2003
2004 evlist__for_each(evlist, evsel) {
2005 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2006 continue;
2007
2008 field = pevent_find_any_field(evsel->tp_format, field_name);
2009 if (field == NULL)
2010 continue;
2011
2012 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2013 if (ret < 0)
2014 break;
2015 }
2016 return ret;
2017 }
2018
2019 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
2020 {
2021 char *str, *event_name, *field_name, *opt_name;
2022 struct perf_evsel *evsel;
2023 struct format_field *field;
2024 bool raw_trace = symbol_conf.raw_trace;
2025 int ret = 0;
2026
2027 if (evlist == NULL)
2028 return -ENOENT;
2029
2030 str = strdup(tok);
2031 if (str == NULL)
2032 return -ENOMEM;
2033
2034 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2035 ret = -EINVAL;
2036 goto out;
2037 }
2038
2039 if (opt_name) {
2040 if (strcmp(opt_name, "raw")) {
2041 pr_debug("unsupported field option %s\n", opt_name);
2042 ret = -EINVAL;
2043 goto out;
2044 }
2045 raw_trace = true;
2046 }
2047
2048 if (!strcmp(field_name, "trace_fields")) {
2049 ret = add_all_dynamic_fields(evlist, raw_trace);
2050 goto out;
2051 }
2052
2053 if (event_name == NULL) {
2054 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2055 goto out;
2056 }
2057
2058 evsel = find_evsel(evlist, event_name);
2059 if (evsel == NULL) {
2060 pr_debug("Cannot find event: %s\n", event_name);
2061 ret = -ENOENT;
2062 goto out;
2063 }
2064
2065 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2066 pr_debug("%s is not a tracepoint event\n", event_name);
2067 ret = -EINVAL;
2068 goto out;
2069 }
2070
2071 if (!strcmp(field_name, "*")) {
2072 ret = add_evsel_fields(evsel, raw_trace);
2073 } else {
2074 field = pevent_find_any_field(evsel->tp_format, field_name);
2075 if (field == NULL) {
2076 pr_debug("Cannot find event field for %s.%s\n",
2077 event_name, field_name);
2078 return -ENOENT;
2079 }
2080
2081 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2082 }
2083
2084 out:
2085 free(str);
2086 return ret;
2087 }
2088
2089 static int __sort_dimension__add(struct sort_dimension *sd)
2090 {
2091 if (sd->taken)
2092 return 0;
2093
2094 if (__sort_dimension__add_hpp_sort(sd) < 0)
2095 return -1;
2096
2097 if (sd->entry->se_collapse)
2098 sort__need_collapse = 1;
2099
2100 sd->taken = 1;
2101
2102 return 0;
2103 }
2104
2105 static int __hpp_dimension__add(struct hpp_dimension *hd)
2106 {
2107 struct perf_hpp_fmt *fmt;
2108
2109 if (hd->taken)
2110 return 0;
2111
2112 fmt = __hpp_dimension__alloc_hpp(hd);
2113 if (!fmt)
2114 return -1;
2115
2116 hd->taken = 1;
2117 perf_hpp__register_sort_field(fmt);
2118 return 0;
2119 }
2120
2121 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2122 struct sort_dimension *sd)
2123 {
2124 if (sd->taken)
2125 return 0;
2126
2127 if (__sort_dimension__add_hpp_output(list, sd) < 0)
2128 return -1;
2129
2130 sd->taken = 1;
2131 return 0;
2132 }
2133
2134 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2135 struct hpp_dimension *hd)
2136 {
2137 struct perf_hpp_fmt *fmt;
2138
2139 if (hd->taken)
2140 return 0;
2141
2142 fmt = __hpp_dimension__alloc_hpp(hd);
2143 if (!fmt)
2144 return -1;
2145
2146 hd->taken = 1;
2147 perf_hpp_list__column_register(list, fmt);
2148 return 0;
2149 }
2150
2151 int hpp_dimension__add_output(unsigned col)
2152 {
2153 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2154 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2155 }
2156
2157 static int sort_dimension__add(const char *tok,
2158 struct perf_evlist *evlist __maybe_unused)
2159 {
2160 unsigned int i;
2161
2162 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2163 struct sort_dimension *sd = &common_sort_dimensions[i];
2164
2165 if (strncasecmp(tok, sd->name, strlen(tok)))
2166 continue;
2167
2168 if (sd->entry == &sort_parent) {
2169 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2170 if (ret) {
2171 char err[BUFSIZ];
2172
2173 regerror(ret, &parent_regex, err, sizeof(err));
2174 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2175 return -EINVAL;
2176 }
2177 sort__has_parent = 1;
2178 } else if (sd->entry == &sort_sym) {
2179 sort__has_sym = 1;
2180 /*
2181 * perf diff displays the performance difference amongst
2182 * two or more perf.data files. Those files could come
2183 * from different binaries. So we should not compare
2184 * their ips, but the name of symbol.
2185 */
2186 if (sort__mode == SORT_MODE__DIFF)
2187 sd->entry->se_collapse = sort__sym_sort;
2188
2189 } else if (sd->entry == &sort_dso) {
2190 sort__has_dso = 1;
2191 } else if (sd->entry == &sort_socket) {
2192 sort__has_socket = 1;
2193 } else if (sd->entry == &sort_thread) {
2194 sort__has_thread = 1;
2195 }
2196
2197 return __sort_dimension__add(sd);
2198 }
2199
2200 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2201 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2202
2203 if (strncasecmp(tok, hd->name, strlen(tok)))
2204 continue;
2205
2206 return __hpp_dimension__add(hd);
2207 }
2208
2209 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2210 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2211
2212 if (strncasecmp(tok, sd->name, strlen(tok)))
2213 continue;
2214
2215 if (sort__mode != SORT_MODE__BRANCH)
2216 return -EINVAL;
2217
2218 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2219 sort__has_sym = 1;
2220
2221 __sort_dimension__add(sd);
2222 return 0;
2223 }
2224
2225 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2226 struct sort_dimension *sd = &memory_sort_dimensions[i];
2227
2228 if (strncasecmp(tok, sd->name, strlen(tok)))
2229 continue;
2230
2231 if (sort__mode != SORT_MODE__MEMORY)
2232 return -EINVAL;
2233
2234 if (sd->entry == &sort_mem_daddr_sym)
2235 sort__has_sym = 1;
2236
2237 __sort_dimension__add(sd);
2238 return 0;
2239 }
2240
2241 if (!add_dynamic_entry(evlist, tok))
2242 return 0;
2243
2244 return -ESRCH;
2245 }
2246
2247 static int setup_sort_list(char *str, struct perf_evlist *evlist)
2248 {
2249 char *tmp, *tok;
2250 int ret = 0;
2251
2252 for (tok = strtok_r(str, ", ", &tmp);
2253 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2254 ret = sort_dimension__add(tok, evlist);
2255 if (ret == -EINVAL) {
2256 error("Invalid --sort key: `%s'", tok);
2257 break;
2258 } else if (ret == -ESRCH) {
2259 error("Unknown --sort key: `%s'", tok);
2260 break;
2261 }
2262 }
2263
2264 return ret;
2265 }
2266
2267 static const char *get_default_sort_order(struct perf_evlist *evlist)
2268 {
2269 const char *default_sort_orders[] = {
2270 default_sort_order,
2271 default_branch_sort_order,
2272 default_mem_sort_order,
2273 default_top_sort_order,
2274 default_diff_sort_order,
2275 default_tracepoint_sort_order,
2276 };
2277 bool use_trace = true;
2278 struct perf_evsel *evsel;
2279
2280 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2281
2282 if (evlist == NULL)
2283 goto out_no_evlist;
2284
2285 evlist__for_each(evlist, evsel) {
2286 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2287 use_trace = false;
2288 break;
2289 }
2290 }
2291
2292 if (use_trace) {
2293 sort__mode = SORT_MODE__TRACEPOINT;
2294 if (symbol_conf.raw_trace)
2295 return "trace_fields";
2296 }
2297 out_no_evlist:
2298 return default_sort_orders[sort__mode];
2299 }
2300
2301 static int setup_sort_order(struct perf_evlist *evlist)
2302 {
2303 char *new_sort_order;
2304
2305 /*
2306 * Append '+'-prefixed sort order to the default sort
2307 * order string.
2308 */
2309 if (!sort_order || is_strict_order(sort_order))
2310 return 0;
2311
2312 if (sort_order[1] == '\0') {
2313 error("Invalid --sort key: `+'");
2314 return -EINVAL;
2315 }
2316
2317 /*
2318 * We allocate new sort_order string, but we never free it,
2319 * because it's checked over the rest of the code.
2320 */
2321 if (asprintf(&new_sort_order, "%s,%s",
2322 get_default_sort_order(evlist), sort_order + 1) < 0) {
2323 error("Not enough memory to set up --sort");
2324 return -ENOMEM;
2325 }
2326
2327 sort_order = new_sort_order;
2328 return 0;
2329 }
2330
2331 /*
2332 * Adds 'pre,' prefix into 'str' is 'pre' is
2333 * not already part of 'str'.
2334 */
2335 static char *prefix_if_not_in(const char *pre, char *str)
2336 {
2337 char *n;
2338
2339 if (!str || strstr(str, pre))
2340 return str;
2341
2342 if (asprintf(&n, "%s,%s", pre, str) < 0)
2343 return NULL;
2344
2345 free(str);
2346 return n;
2347 }
2348
2349 static char *setup_overhead(char *keys)
2350 {
2351 keys = prefix_if_not_in("overhead", keys);
2352
2353 if (symbol_conf.cumulate_callchain)
2354 keys = prefix_if_not_in("overhead_children", keys);
2355
2356 return keys;
2357 }
2358
2359 static int __setup_sorting(struct perf_evlist *evlist)
2360 {
2361 char *str;
2362 const char *sort_keys;
2363 int ret = 0;
2364
2365 ret = setup_sort_order(evlist);
2366 if (ret)
2367 return ret;
2368
2369 sort_keys = sort_order;
2370 if (sort_keys == NULL) {
2371 if (is_strict_order(field_order)) {
2372 /*
2373 * If user specified field order but no sort order,
2374 * we'll honor it and not add default sort orders.
2375 */
2376 return 0;
2377 }
2378
2379 sort_keys = get_default_sort_order(evlist);
2380 }
2381
2382 str = strdup(sort_keys);
2383 if (str == NULL) {
2384 error("Not enough memory to setup sort keys");
2385 return -ENOMEM;
2386 }
2387
2388 /*
2389 * Prepend overhead fields for backward compatibility.
2390 */
2391 if (!is_strict_order(field_order)) {
2392 str = setup_overhead(str);
2393 if (str == NULL) {
2394 error("Not enough memory to setup overhead keys");
2395 return -ENOMEM;
2396 }
2397 }
2398
2399 ret = setup_sort_list(str, evlist);
2400
2401 free(str);
2402 return ret;
2403 }
2404
2405 void perf_hpp__set_elide(int idx, bool elide)
2406 {
2407 struct perf_hpp_fmt *fmt;
2408 struct hpp_sort_entry *hse;
2409
2410 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2411 if (!perf_hpp__is_sort_entry(fmt))
2412 continue;
2413
2414 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2415 if (hse->se->se_width_idx == idx) {
2416 fmt->elide = elide;
2417 break;
2418 }
2419 }
2420 }
2421
2422 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2423 {
2424 if (list && strlist__nr_entries(list) == 1) {
2425 if (fp != NULL)
2426 fprintf(fp, "# %s: %s\n", list_name,
2427 strlist__entry(list, 0)->s);
2428 return true;
2429 }
2430 return false;
2431 }
2432
2433 static bool get_elide(int idx, FILE *output)
2434 {
2435 switch (idx) {
2436 case HISTC_SYMBOL:
2437 return __get_elide(symbol_conf.sym_list, "symbol", output);
2438 case HISTC_DSO:
2439 return __get_elide(symbol_conf.dso_list, "dso", output);
2440 case HISTC_COMM:
2441 return __get_elide(symbol_conf.comm_list, "comm", output);
2442 default:
2443 break;
2444 }
2445
2446 if (sort__mode != SORT_MODE__BRANCH)
2447 return false;
2448
2449 switch (idx) {
2450 case HISTC_SYMBOL_FROM:
2451 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2452 case HISTC_SYMBOL_TO:
2453 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2454 case HISTC_DSO_FROM:
2455 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2456 case HISTC_DSO_TO:
2457 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2458 default:
2459 break;
2460 }
2461
2462 return false;
2463 }
2464
2465 void sort__setup_elide(FILE *output)
2466 {
2467 struct perf_hpp_fmt *fmt;
2468 struct hpp_sort_entry *hse;
2469
2470 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2471 if (!perf_hpp__is_sort_entry(fmt))
2472 continue;
2473
2474 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2475 fmt->elide = get_elide(hse->se->se_width_idx, output);
2476 }
2477
2478 /*
2479 * It makes no sense to elide all of sort entries.
2480 * Just revert them to show up again.
2481 */
2482 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2483 if (!perf_hpp__is_sort_entry(fmt))
2484 continue;
2485
2486 if (!fmt->elide)
2487 return;
2488 }
2489
2490 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2491 if (!perf_hpp__is_sort_entry(fmt))
2492 continue;
2493
2494 fmt->elide = false;
2495 }
2496 }
2497
2498 static int output_field_add(struct perf_hpp_list *list, char *tok)
2499 {
2500 unsigned int i;
2501
2502 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2503 struct sort_dimension *sd = &common_sort_dimensions[i];
2504
2505 if (strncasecmp(tok, sd->name, strlen(tok)))
2506 continue;
2507
2508 return __sort_dimension__add_output(list, sd);
2509 }
2510
2511 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2512 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2513
2514 if (strncasecmp(tok, hd->name, strlen(tok)))
2515 continue;
2516
2517 return __hpp_dimension__add_output(list, hd);
2518 }
2519
2520 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2521 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2522
2523 if (strncasecmp(tok, sd->name, strlen(tok)))
2524 continue;
2525
2526 return __sort_dimension__add_output(list, sd);
2527 }
2528
2529 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2530 struct sort_dimension *sd = &memory_sort_dimensions[i];
2531
2532 if (strncasecmp(tok, sd->name, strlen(tok)))
2533 continue;
2534
2535 return __sort_dimension__add_output(list, sd);
2536 }
2537
2538 return -ESRCH;
2539 }
2540
2541 static int setup_output_list(struct perf_hpp_list *list, char *str)
2542 {
2543 char *tmp, *tok;
2544 int ret = 0;
2545
2546 for (tok = strtok_r(str, ", ", &tmp);
2547 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2548 ret = output_field_add(list, tok);
2549 if (ret == -EINVAL) {
2550 error("Invalid --fields key: `%s'", tok);
2551 break;
2552 } else if (ret == -ESRCH) {
2553 error("Unknown --fields key: `%s'", tok);
2554 break;
2555 }
2556 }
2557
2558 return ret;
2559 }
2560
2561 static void reset_dimensions(void)
2562 {
2563 unsigned int i;
2564
2565 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2566 common_sort_dimensions[i].taken = 0;
2567
2568 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2569 hpp_sort_dimensions[i].taken = 0;
2570
2571 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2572 bstack_sort_dimensions[i].taken = 0;
2573
2574 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2575 memory_sort_dimensions[i].taken = 0;
2576 }
2577
2578 bool is_strict_order(const char *order)
2579 {
2580 return order && (*order != '+');
2581 }
2582
2583 static int __setup_output_field(void)
2584 {
2585 char *str, *strp;
2586 int ret = -EINVAL;
2587
2588 if (field_order == NULL)
2589 return 0;
2590
2591 strp = str = strdup(field_order);
2592 if (str == NULL) {
2593 error("Not enough memory to setup output fields");
2594 return -ENOMEM;
2595 }
2596
2597 if (!is_strict_order(field_order))
2598 strp++;
2599
2600 if (!strlen(strp)) {
2601 error("Invalid --fields key: `+'");
2602 goto out;
2603 }
2604
2605 ret = setup_output_list(&perf_hpp_list, strp);
2606
2607 out:
2608 free(str);
2609 return ret;
2610 }
2611
2612 int setup_sorting(struct perf_evlist *evlist)
2613 {
2614 int err;
2615
2616 err = __setup_sorting(evlist);
2617 if (err < 0)
2618 return err;
2619
2620 if (parent_pattern != default_parent_pattern) {
2621 err = sort_dimension__add("parent", evlist);
2622 if (err < 0)
2623 return err;
2624 }
2625
2626 reset_dimensions();
2627
2628 /*
2629 * perf diff doesn't use default hpp output fields.
2630 */
2631 if (sort__mode != SORT_MODE__DIFF)
2632 perf_hpp__init();
2633
2634 err = __setup_output_field();
2635 if (err < 0)
2636 return err;
2637
2638 /* copy sort keys to output fields */
2639 perf_hpp__setup_output_field();
2640 /* and then copy output fields to sort keys */
2641 perf_hpp__append_sort_keys();
2642
2643 return 0;
2644 }
2645
2646 void reset_output_field(void)
2647 {
2648 sort__need_collapse = 0;
2649 sort__has_parent = 0;
2650 sort__has_sym = 0;
2651 sort__has_dso = 0;
2652
2653 field_order = NULL;
2654 sort_order = NULL;
2655
2656 reset_dimensions();
2657 perf_hpp__reset_output_field();
2658 }
This page took 0.084863 seconds and 6 git commands to generate.