d894759c47f05e8335d0201647b829b3ca86e100
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
10
11 regex_t parent_regex;
12 const char default_parent_pattern[] = "^sys_|^do_page_fault";
13 const char *parent_pattern = default_parent_pattern;
14 const char default_sort_order[] = "comm,dso,symbol";
15 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char default_top_sort_order[] = "dso,symbol";
18 const char default_diff_sort_order[] = "dso,symbol";
19 const char default_tracepoint_sort_order[] = "trace";
20 const char *sort_order;
21 const char *field_order;
22 regex_t ignore_callees_regex;
23 int have_ignore_callees = 0;
24 int sort__need_collapse = 0;
25 int sort__has_parent = 0;
26 int sort__has_sym = 0;
27 int sort__has_dso = 0;
28 int sort__has_socket = 0;
29 int sort__has_thread = 0;
30 enum sort_mode sort__mode = SORT_MODE__NORMAL;
31
32 /*
33 * Replaces all occurrences of a char used with the:
34 *
35 * -t, --field-separator
36 *
37 * option, that uses a special separator character and don't pad with spaces,
38 * replacing all occurances of this separator in symbol names (and other
39 * output) with a '.' character, that thus it's the only non valid separator.
40 */
41 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
42 {
43 int n;
44 va_list ap;
45
46 va_start(ap, fmt);
47 n = vsnprintf(bf, size, fmt, ap);
48 if (symbol_conf.field_sep && n > 0) {
49 char *sep = bf;
50
51 while (1) {
52 sep = strchr(sep, *symbol_conf.field_sep);
53 if (sep == NULL)
54 break;
55 *sep = '.';
56 }
57 }
58 va_end(ap);
59
60 if (n >= (int)size)
61 return size - 1;
62 return n;
63 }
64
65 static int64_t cmp_null(const void *l, const void *r)
66 {
67 if (!l && !r)
68 return 0;
69 else if (!l)
70 return -1;
71 else
72 return 1;
73 }
74
75 /* --sort pid */
76
77 static int64_t
78 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
79 {
80 return right->thread->tid - left->thread->tid;
81 }
82
83 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
84 size_t size, unsigned int width)
85 {
86 const char *comm = thread__comm_str(he->thread);
87
88 width = max(7U, width) - 6;
89 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
90 width, width, comm ?: "");
91 }
92
93 struct sort_entry sort_thread = {
94 .se_header = " Pid:Command",
95 .se_cmp = sort__thread_cmp,
96 .se_snprintf = hist_entry__thread_snprintf,
97 .se_width_idx = HISTC_THREAD,
98 };
99
100 /* --sort comm */
101
102 static int64_t
103 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
104 {
105 /* Compare the addr that should be unique among comm */
106 return strcmp(comm__str(right->comm), comm__str(left->comm));
107 }
108
109 static int64_t
110 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
111 {
112 /* Compare the addr that should be unique among comm */
113 return strcmp(comm__str(right->comm), comm__str(left->comm));
114 }
115
116 static int64_t
117 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
118 {
119 return strcmp(comm__str(right->comm), comm__str(left->comm));
120 }
121
122 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
123 size_t size, unsigned int width)
124 {
125 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
126 }
127
128 struct sort_entry sort_comm = {
129 .se_header = "Command",
130 .se_cmp = sort__comm_cmp,
131 .se_collapse = sort__comm_collapse,
132 .se_sort = sort__comm_sort,
133 .se_snprintf = hist_entry__comm_snprintf,
134 .se_width_idx = HISTC_COMM,
135 };
136
137 /* --sort dso */
138
139 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
140 {
141 struct dso *dso_l = map_l ? map_l->dso : NULL;
142 struct dso *dso_r = map_r ? map_r->dso : NULL;
143 const char *dso_name_l, *dso_name_r;
144
145 if (!dso_l || !dso_r)
146 return cmp_null(dso_r, dso_l);
147
148 if (verbose) {
149 dso_name_l = dso_l->long_name;
150 dso_name_r = dso_r->long_name;
151 } else {
152 dso_name_l = dso_l->short_name;
153 dso_name_r = dso_r->short_name;
154 }
155
156 return strcmp(dso_name_l, dso_name_r);
157 }
158
159 static int64_t
160 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
161 {
162 return _sort__dso_cmp(right->ms.map, left->ms.map);
163 }
164
165 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
166 size_t size, unsigned int width)
167 {
168 if (map && map->dso) {
169 const char *dso_name = !verbose ? map->dso->short_name :
170 map->dso->long_name;
171 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
172 }
173
174 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
175 }
176
177 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
178 size_t size, unsigned int width)
179 {
180 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
181 }
182
183 struct sort_entry sort_dso = {
184 .se_header = "Shared Object",
185 .se_cmp = sort__dso_cmp,
186 .se_snprintf = hist_entry__dso_snprintf,
187 .se_width_idx = HISTC_DSO,
188 };
189
190 /* --sort symbol */
191
192 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
193 {
194 return (int64_t)(right_ip - left_ip);
195 }
196
197 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
198 {
199 if (!sym_l || !sym_r)
200 return cmp_null(sym_l, sym_r);
201
202 if (sym_l == sym_r)
203 return 0;
204
205 if (sym_l->start != sym_r->start)
206 return (int64_t)(sym_r->start - sym_l->start);
207
208 return (int64_t)(sym_r->end - sym_l->end);
209 }
210
211 static int64_t
212 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
213 {
214 int64_t ret;
215
216 if (!left->ms.sym && !right->ms.sym)
217 return _sort__addr_cmp(left->ip, right->ip);
218
219 /*
220 * comparing symbol address alone is not enough since it's a
221 * relative address within a dso.
222 */
223 if (!sort__has_dso) {
224 ret = sort__dso_cmp(left, right);
225 if (ret != 0)
226 return ret;
227 }
228
229 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
230 }
231
232 static int64_t
233 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
234 {
235 if (!left->ms.sym || !right->ms.sym)
236 return cmp_null(left->ms.sym, right->ms.sym);
237
238 return strcmp(right->ms.sym->name, left->ms.sym->name);
239 }
240
241 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
242 u64 ip, char level, char *bf, size_t size,
243 unsigned int width)
244 {
245 size_t ret = 0;
246
247 if (verbose) {
248 char o = map ? dso__symtab_origin(map->dso) : '!';
249 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
250 BITS_PER_LONG / 4 + 2, ip, o);
251 }
252
253 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
254 if (sym && map) {
255 if (map->type == MAP__VARIABLE) {
256 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
257 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
258 ip - map->unmap_ip(map, sym->start));
259 } else {
260 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
261 width - ret,
262 sym->name);
263 }
264 } else {
265 size_t len = BITS_PER_LONG / 4;
266 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
267 len, ip);
268 }
269
270 return ret;
271 }
272
273 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
274 size_t size, unsigned int width)
275 {
276 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
277 he->level, bf, size, width);
278 }
279
280 struct sort_entry sort_sym = {
281 .se_header = "Symbol",
282 .se_cmp = sort__sym_cmp,
283 .se_sort = sort__sym_sort,
284 .se_snprintf = hist_entry__sym_snprintf,
285 .se_width_idx = HISTC_SYMBOL,
286 };
287
288 /* --sort srcline */
289
290 static char *hist_entry__get_srcline(struct hist_entry *he)
291 {
292 struct map *map = he->ms.map;
293
294 if (!map)
295 return SRCLINE_UNKNOWN;
296
297 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
298 he->ms.sym, true);
299 }
300
301 static int64_t
302 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
303 {
304 if (!left->srcline)
305 left->srcline = hist_entry__get_srcline(left);
306 if (!right->srcline)
307 right->srcline = hist_entry__get_srcline(right);
308
309 return strcmp(right->srcline, left->srcline);
310 }
311
312 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
313 size_t size, unsigned int width)
314 {
315 if (!he->srcline)
316 he->srcline = hist_entry__get_srcline(he);
317
318 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
319 }
320
321 struct sort_entry sort_srcline = {
322 .se_header = "Source:Line",
323 .se_cmp = sort__srcline_cmp,
324 .se_snprintf = hist_entry__srcline_snprintf,
325 .se_width_idx = HISTC_SRCLINE,
326 };
327
328 /* --sort srcfile */
329
330 static char no_srcfile[1];
331
332 static char *hist_entry__get_srcfile(struct hist_entry *e)
333 {
334 char *sf, *p;
335 struct map *map = e->ms.map;
336
337 if (!map)
338 return no_srcfile;
339
340 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
341 e->ms.sym, false, true);
342 if (!strcmp(sf, SRCLINE_UNKNOWN))
343 return no_srcfile;
344 p = strchr(sf, ':');
345 if (p && *sf) {
346 *p = 0;
347 return sf;
348 }
349 free(sf);
350 return no_srcfile;
351 }
352
353 static int64_t
354 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
355 {
356 if (!left->srcfile)
357 left->srcfile = hist_entry__get_srcfile(left);
358 if (!right->srcfile)
359 right->srcfile = hist_entry__get_srcfile(right);
360
361 return strcmp(right->srcfile, left->srcfile);
362 }
363
364 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
365 size_t size, unsigned int width)
366 {
367 if (!he->srcfile)
368 he->srcfile = hist_entry__get_srcfile(he);
369
370 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
371 }
372
373 struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378 };
379
380 /* --sort parent */
381
382 static int64_t
383 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384 {
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
391 return strcmp(sym_r->name, sym_l->name);
392 }
393
394 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
395 size_t size, unsigned int width)
396 {
397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
398 he->parent ? he->parent->name : "[other]");
399 }
400
401 struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406 };
407
408 /* --sort cpu */
409
410 static int64_t
411 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412 {
413 return right->cpu - left->cpu;
414 }
415
416 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
418 {
419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
420 }
421
422 struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427 };
428
429 /* --sort socket */
430
431 static int64_t
432 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433 {
434 return right->socket - left->socket;
435 }
436
437 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439 {
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441 }
442
443 struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448 };
449
450 /* --sort trace */
451
452 static char *get_trace_output(struct hist_entry *he)
453 {
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
470 return seq.buffer;
471 }
472
473 static int64_t
474 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475 {
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 return strcmp(right->trace_output, left->trace_output);
488 }
489
490 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
491 size_t size, unsigned int width)
492 {
493 struct perf_evsel *evsel;
494
495 evsel = hists_to_evsel(he->hists);
496 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
497 return scnprintf(bf, size, "%-.*s", width, "N/A");
498
499 if (he->trace_output == NULL)
500 he->trace_output = get_trace_output(he);
501 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
502 }
503
504 struct sort_entry sort_trace = {
505 .se_header = "Trace output",
506 .se_cmp = sort__trace_cmp,
507 .se_snprintf = hist_entry__trace_snprintf,
508 .se_width_idx = HISTC_TRACE,
509 };
510
511 /* sort keys for branch stacks */
512
513 static int64_t
514 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
515 {
516 if (!left->branch_info || !right->branch_info)
517 return cmp_null(left->branch_info, right->branch_info);
518
519 return _sort__dso_cmp(left->branch_info->from.map,
520 right->branch_info->from.map);
521 }
522
523 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
524 size_t size, unsigned int width)
525 {
526 if (he->branch_info)
527 return _hist_entry__dso_snprintf(he->branch_info->from.map,
528 bf, size, width);
529 else
530 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
531 }
532
533 static int64_t
534 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
535 {
536 if (!left->branch_info || !right->branch_info)
537 return cmp_null(left->branch_info, right->branch_info);
538
539 return _sort__dso_cmp(left->branch_info->to.map,
540 right->branch_info->to.map);
541 }
542
543 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
544 size_t size, unsigned int width)
545 {
546 if (he->branch_info)
547 return _hist_entry__dso_snprintf(he->branch_info->to.map,
548 bf, size, width);
549 else
550 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
551 }
552
553 static int64_t
554 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
555 {
556 struct addr_map_symbol *from_l = &left->branch_info->from;
557 struct addr_map_symbol *from_r = &right->branch_info->from;
558
559 if (!left->branch_info || !right->branch_info)
560 return cmp_null(left->branch_info, right->branch_info);
561
562 from_l = &left->branch_info->from;
563 from_r = &right->branch_info->from;
564
565 if (!from_l->sym && !from_r->sym)
566 return _sort__addr_cmp(from_l->addr, from_r->addr);
567
568 return _sort__sym_cmp(from_l->sym, from_r->sym);
569 }
570
571 static int64_t
572 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
573 {
574 struct addr_map_symbol *to_l, *to_r;
575
576 if (!left->branch_info || !right->branch_info)
577 return cmp_null(left->branch_info, right->branch_info);
578
579 to_l = &left->branch_info->to;
580 to_r = &right->branch_info->to;
581
582 if (!to_l->sym && !to_r->sym)
583 return _sort__addr_cmp(to_l->addr, to_r->addr);
584
585 return _sort__sym_cmp(to_l->sym, to_r->sym);
586 }
587
588 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
589 size_t size, unsigned int width)
590 {
591 if (he->branch_info) {
592 struct addr_map_symbol *from = &he->branch_info->from;
593
594 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
595 he->level, bf, size, width);
596 }
597
598 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
599 }
600
601 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
602 size_t size, unsigned int width)
603 {
604 if (he->branch_info) {
605 struct addr_map_symbol *to = &he->branch_info->to;
606
607 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
608 he->level, bf, size, width);
609 }
610
611 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
612 }
613
614 struct sort_entry sort_dso_from = {
615 .se_header = "Source Shared Object",
616 .se_cmp = sort__dso_from_cmp,
617 .se_snprintf = hist_entry__dso_from_snprintf,
618 .se_width_idx = HISTC_DSO_FROM,
619 };
620
621 struct sort_entry sort_dso_to = {
622 .se_header = "Target Shared Object",
623 .se_cmp = sort__dso_to_cmp,
624 .se_snprintf = hist_entry__dso_to_snprintf,
625 .se_width_idx = HISTC_DSO_TO,
626 };
627
628 struct sort_entry sort_sym_from = {
629 .se_header = "Source Symbol",
630 .se_cmp = sort__sym_from_cmp,
631 .se_snprintf = hist_entry__sym_from_snprintf,
632 .se_width_idx = HISTC_SYMBOL_FROM,
633 };
634
635 struct sort_entry sort_sym_to = {
636 .se_header = "Target Symbol",
637 .se_cmp = sort__sym_to_cmp,
638 .se_snprintf = hist_entry__sym_to_snprintf,
639 .se_width_idx = HISTC_SYMBOL_TO,
640 };
641
642 static int64_t
643 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
644 {
645 unsigned char mp, p;
646
647 if (!left->branch_info || !right->branch_info)
648 return cmp_null(left->branch_info, right->branch_info);
649
650 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
651 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
652 return mp || p;
653 }
654
655 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
656 size_t size, unsigned int width){
657 static const char *out = "N/A";
658
659 if (he->branch_info) {
660 if (he->branch_info->flags.predicted)
661 out = "N";
662 else if (he->branch_info->flags.mispred)
663 out = "Y";
664 }
665
666 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
667 }
668
669 static int64_t
670 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
671 {
672 return left->branch_info->flags.cycles -
673 right->branch_info->flags.cycles;
674 }
675
676 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
677 size_t size, unsigned int width)
678 {
679 if (he->branch_info->flags.cycles == 0)
680 return repsep_snprintf(bf, size, "%-*s", width, "-");
681 return repsep_snprintf(bf, size, "%-*hd", width,
682 he->branch_info->flags.cycles);
683 }
684
685 struct sort_entry sort_cycles = {
686 .se_header = "Basic Block Cycles",
687 .se_cmp = sort__cycles_cmp,
688 .se_snprintf = hist_entry__cycles_snprintf,
689 .se_width_idx = HISTC_CYCLES,
690 };
691
692 /* --sort daddr_sym */
693 static int64_t
694 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
695 {
696 uint64_t l = 0, r = 0;
697
698 if (left->mem_info)
699 l = left->mem_info->daddr.addr;
700 if (right->mem_info)
701 r = right->mem_info->daddr.addr;
702
703 return (int64_t)(r - l);
704 }
705
706 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
707 size_t size, unsigned int width)
708 {
709 uint64_t addr = 0;
710 struct map *map = NULL;
711 struct symbol *sym = NULL;
712
713 if (he->mem_info) {
714 addr = he->mem_info->daddr.addr;
715 map = he->mem_info->daddr.map;
716 sym = he->mem_info->daddr.sym;
717 }
718 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
719 width);
720 }
721
722 static int64_t
723 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
724 {
725 uint64_t l = 0, r = 0;
726
727 if (left->mem_info)
728 l = left->mem_info->iaddr.addr;
729 if (right->mem_info)
730 r = right->mem_info->iaddr.addr;
731
732 return (int64_t)(r - l);
733 }
734
735 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
736 size_t size, unsigned int width)
737 {
738 uint64_t addr = 0;
739 struct map *map = NULL;
740 struct symbol *sym = NULL;
741
742 if (he->mem_info) {
743 addr = he->mem_info->iaddr.addr;
744 map = he->mem_info->iaddr.map;
745 sym = he->mem_info->iaddr.sym;
746 }
747 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
748 width);
749 }
750
751 static int64_t
752 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
753 {
754 struct map *map_l = NULL;
755 struct map *map_r = NULL;
756
757 if (left->mem_info)
758 map_l = left->mem_info->daddr.map;
759 if (right->mem_info)
760 map_r = right->mem_info->daddr.map;
761
762 return _sort__dso_cmp(map_l, map_r);
763 }
764
765 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
766 size_t size, unsigned int width)
767 {
768 struct map *map = NULL;
769
770 if (he->mem_info)
771 map = he->mem_info->daddr.map;
772
773 return _hist_entry__dso_snprintf(map, bf, size, width);
774 }
775
776 static int64_t
777 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
778 {
779 union perf_mem_data_src data_src_l;
780 union perf_mem_data_src data_src_r;
781
782 if (left->mem_info)
783 data_src_l = left->mem_info->data_src;
784 else
785 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
786
787 if (right->mem_info)
788 data_src_r = right->mem_info->data_src;
789 else
790 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
791
792 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
793 }
794
795 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
796 size_t size, unsigned int width)
797 {
798 const char *out;
799 u64 mask = PERF_MEM_LOCK_NA;
800
801 if (he->mem_info)
802 mask = he->mem_info->data_src.mem_lock;
803
804 if (mask & PERF_MEM_LOCK_NA)
805 out = "N/A";
806 else if (mask & PERF_MEM_LOCK_LOCKED)
807 out = "Yes";
808 else
809 out = "No";
810
811 return repsep_snprintf(bf, size, "%.*s", width, out);
812 }
813
814 static int64_t
815 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
816 {
817 union perf_mem_data_src data_src_l;
818 union perf_mem_data_src data_src_r;
819
820 if (left->mem_info)
821 data_src_l = left->mem_info->data_src;
822 else
823 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
824
825 if (right->mem_info)
826 data_src_r = right->mem_info->data_src;
827 else
828 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
829
830 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
831 }
832
833 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
834 size_t size, unsigned int width)
835 {
836 char out[64];
837
838 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
839 return repsep_snprintf(bf, size, "%-*s", width, out);
840 }
841
842 static int64_t
843 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
844 {
845 union perf_mem_data_src data_src_l;
846 union perf_mem_data_src data_src_r;
847
848 if (left->mem_info)
849 data_src_l = left->mem_info->data_src;
850 else
851 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
852
853 if (right->mem_info)
854 data_src_r = right->mem_info->data_src;
855 else
856 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
857
858 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
859 }
860
861 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
862 size_t size, unsigned int width)
863 {
864 char out[64];
865
866 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
867 return repsep_snprintf(bf, size, "%-*s", width, out);
868 }
869
870 static int64_t
871 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
872 {
873 union perf_mem_data_src data_src_l;
874 union perf_mem_data_src data_src_r;
875
876 if (left->mem_info)
877 data_src_l = left->mem_info->data_src;
878 else
879 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
880
881 if (right->mem_info)
882 data_src_r = right->mem_info->data_src;
883 else
884 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
885
886 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
887 }
888
889 static const char * const snoop_access[] = {
890 "N/A",
891 "None",
892 "Miss",
893 "Hit",
894 "HitM",
895 };
896
897 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
898 size_t size, unsigned int width)
899 {
900 char out[64];
901 size_t sz = sizeof(out) - 1; /* -1 for null termination */
902 size_t i, l = 0;
903 u64 m = PERF_MEM_SNOOP_NA;
904
905 out[0] = '\0';
906
907 if (he->mem_info)
908 m = he->mem_info->data_src.mem_snoop;
909
910 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
911 if (!(m & 0x1))
912 continue;
913 if (l) {
914 strcat(out, " or ");
915 l += 4;
916 }
917 strncat(out, snoop_access[i], sz - l);
918 l += strlen(snoop_access[i]);
919 }
920
921 if (*out == '\0')
922 strcpy(out, "N/A");
923
924 return repsep_snprintf(bf, size, "%-*s", width, out);
925 }
926
927 static int64_t
928 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
929 {
930 u64 l, r;
931 struct map *l_map, *r_map;
932
933 if (!left->mem_info) return -1;
934 if (!right->mem_info) return 1;
935
936 /* group event types together */
937 if (left->cpumode > right->cpumode) return -1;
938 if (left->cpumode < right->cpumode) return 1;
939
940 l_map = left->mem_info->daddr.map;
941 r_map = right->mem_info->daddr.map;
942
943 /* if both are NULL, jump to sort on al_addr instead */
944 if (!l_map && !r_map)
945 goto addr;
946
947 if (!l_map) return -1;
948 if (!r_map) return 1;
949
950 if (l_map->maj > r_map->maj) return -1;
951 if (l_map->maj < r_map->maj) return 1;
952
953 if (l_map->min > r_map->min) return -1;
954 if (l_map->min < r_map->min) return 1;
955
956 if (l_map->ino > r_map->ino) return -1;
957 if (l_map->ino < r_map->ino) return 1;
958
959 if (l_map->ino_generation > r_map->ino_generation) return -1;
960 if (l_map->ino_generation < r_map->ino_generation) return 1;
961
962 /*
963 * Addresses with no major/minor numbers are assumed to be
964 * anonymous in userspace. Sort those on pid then address.
965 *
966 * The kernel and non-zero major/minor mapped areas are
967 * assumed to be unity mapped. Sort those on address.
968 */
969
970 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
971 (!(l_map->flags & MAP_SHARED)) &&
972 !l_map->maj && !l_map->min && !l_map->ino &&
973 !l_map->ino_generation) {
974 /* userspace anonymous */
975
976 if (left->thread->pid_ > right->thread->pid_) return -1;
977 if (left->thread->pid_ < right->thread->pid_) return 1;
978 }
979
980 addr:
981 /* al_addr does all the right addr - start + offset calculations */
982 l = cl_address(left->mem_info->daddr.al_addr);
983 r = cl_address(right->mem_info->daddr.al_addr);
984
985 if (l > r) return -1;
986 if (l < r) return 1;
987
988 return 0;
989 }
990
991 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
992 size_t size, unsigned int width)
993 {
994
995 uint64_t addr = 0;
996 struct map *map = NULL;
997 struct symbol *sym = NULL;
998 char level = he->level;
999
1000 if (he->mem_info) {
1001 addr = cl_address(he->mem_info->daddr.al_addr);
1002 map = he->mem_info->daddr.map;
1003 sym = he->mem_info->daddr.sym;
1004
1005 /* print [s] for shared data mmaps */
1006 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1007 map && (map->type == MAP__VARIABLE) &&
1008 (map->flags & MAP_SHARED) &&
1009 (map->maj || map->min || map->ino ||
1010 map->ino_generation))
1011 level = 's';
1012 else if (!map)
1013 level = 'X';
1014 }
1015 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1016 width);
1017 }
1018
1019 struct sort_entry sort_mispredict = {
1020 .se_header = "Branch Mispredicted",
1021 .se_cmp = sort__mispredict_cmp,
1022 .se_snprintf = hist_entry__mispredict_snprintf,
1023 .se_width_idx = HISTC_MISPREDICT,
1024 };
1025
1026 static u64 he_weight(struct hist_entry *he)
1027 {
1028 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1029 }
1030
1031 static int64_t
1032 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1033 {
1034 return he_weight(left) - he_weight(right);
1035 }
1036
1037 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1038 size_t size, unsigned int width)
1039 {
1040 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1041 }
1042
1043 struct sort_entry sort_local_weight = {
1044 .se_header = "Local Weight",
1045 .se_cmp = sort__local_weight_cmp,
1046 .se_snprintf = hist_entry__local_weight_snprintf,
1047 .se_width_idx = HISTC_LOCAL_WEIGHT,
1048 };
1049
1050 static int64_t
1051 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1052 {
1053 return left->stat.weight - right->stat.weight;
1054 }
1055
1056 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1057 size_t size, unsigned int width)
1058 {
1059 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1060 }
1061
1062 struct sort_entry sort_global_weight = {
1063 .se_header = "Weight",
1064 .se_cmp = sort__global_weight_cmp,
1065 .se_snprintf = hist_entry__global_weight_snprintf,
1066 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1067 };
1068
1069 struct sort_entry sort_mem_daddr_sym = {
1070 .se_header = "Data Symbol",
1071 .se_cmp = sort__daddr_cmp,
1072 .se_snprintf = hist_entry__daddr_snprintf,
1073 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1074 };
1075
1076 struct sort_entry sort_mem_iaddr_sym = {
1077 .se_header = "Code Symbol",
1078 .se_cmp = sort__iaddr_cmp,
1079 .se_snprintf = hist_entry__iaddr_snprintf,
1080 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1081 };
1082
1083 struct sort_entry sort_mem_daddr_dso = {
1084 .se_header = "Data Object",
1085 .se_cmp = sort__dso_daddr_cmp,
1086 .se_snprintf = hist_entry__dso_daddr_snprintf,
1087 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1088 };
1089
1090 struct sort_entry sort_mem_locked = {
1091 .se_header = "Locked",
1092 .se_cmp = sort__locked_cmp,
1093 .se_snprintf = hist_entry__locked_snprintf,
1094 .se_width_idx = HISTC_MEM_LOCKED,
1095 };
1096
1097 struct sort_entry sort_mem_tlb = {
1098 .se_header = "TLB access",
1099 .se_cmp = sort__tlb_cmp,
1100 .se_snprintf = hist_entry__tlb_snprintf,
1101 .se_width_idx = HISTC_MEM_TLB,
1102 };
1103
1104 struct sort_entry sort_mem_lvl = {
1105 .se_header = "Memory access",
1106 .se_cmp = sort__lvl_cmp,
1107 .se_snprintf = hist_entry__lvl_snprintf,
1108 .se_width_idx = HISTC_MEM_LVL,
1109 };
1110
1111 struct sort_entry sort_mem_snoop = {
1112 .se_header = "Snoop",
1113 .se_cmp = sort__snoop_cmp,
1114 .se_snprintf = hist_entry__snoop_snprintf,
1115 .se_width_idx = HISTC_MEM_SNOOP,
1116 };
1117
1118 struct sort_entry sort_mem_dcacheline = {
1119 .se_header = "Data Cacheline",
1120 .se_cmp = sort__dcacheline_cmp,
1121 .se_snprintf = hist_entry__dcacheline_snprintf,
1122 .se_width_idx = HISTC_MEM_DCACHELINE,
1123 };
1124
1125 static int64_t
1126 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1127 {
1128 if (!left->branch_info || !right->branch_info)
1129 return cmp_null(left->branch_info, right->branch_info);
1130
1131 return left->branch_info->flags.abort !=
1132 right->branch_info->flags.abort;
1133 }
1134
1135 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1136 size_t size, unsigned int width)
1137 {
1138 static const char *out = "N/A";
1139
1140 if (he->branch_info) {
1141 if (he->branch_info->flags.abort)
1142 out = "A";
1143 else
1144 out = ".";
1145 }
1146
1147 return repsep_snprintf(bf, size, "%-*s", width, out);
1148 }
1149
1150 struct sort_entry sort_abort = {
1151 .se_header = "Transaction abort",
1152 .se_cmp = sort__abort_cmp,
1153 .se_snprintf = hist_entry__abort_snprintf,
1154 .se_width_idx = HISTC_ABORT,
1155 };
1156
1157 static int64_t
1158 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1159 {
1160 if (!left->branch_info || !right->branch_info)
1161 return cmp_null(left->branch_info, right->branch_info);
1162
1163 return left->branch_info->flags.in_tx !=
1164 right->branch_info->flags.in_tx;
1165 }
1166
1167 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1168 size_t size, unsigned int width)
1169 {
1170 static const char *out = "N/A";
1171
1172 if (he->branch_info) {
1173 if (he->branch_info->flags.in_tx)
1174 out = "T";
1175 else
1176 out = ".";
1177 }
1178
1179 return repsep_snprintf(bf, size, "%-*s", width, out);
1180 }
1181
1182 struct sort_entry sort_in_tx = {
1183 .se_header = "Branch in transaction",
1184 .se_cmp = sort__in_tx_cmp,
1185 .se_snprintf = hist_entry__in_tx_snprintf,
1186 .se_width_idx = HISTC_IN_TX,
1187 };
1188
1189 static int64_t
1190 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1191 {
1192 return left->transaction - right->transaction;
1193 }
1194
1195 static inline char *add_str(char *p, const char *str)
1196 {
1197 strcpy(p, str);
1198 return p + strlen(str);
1199 }
1200
1201 static struct txbit {
1202 unsigned flag;
1203 const char *name;
1204 int skip_for_len;
1205 } txbits[] = {
1206 { PERF_TXN_ELISION, "EL ", 0 },
1207 { PERF_TXN_TRANSACTION, "TX ", 1 },
1208 { PERF_TXN_SYNC, "SYNC ", 1 },
1209 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1210 { PERF_TXN_RETRY, "RETRY ", 0 },
1211 { PERF_TXN_CONFLICT, "CON ", 0 },
1212 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1213 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1214 { 0, NULL, 0 }
1215 };
1216
1217 int hist_entry__transaction_len(void)
1218 {
1219 int i;
1220 int len = 0;
1221
1222 for (i = 0; txbits[i].name; i++) {
1223 if (!txbits[i].skip_for_len)
1224 len += strlen(txbits[i].name);
1225 }
1226 len += 4; /* :XX<space> */
1227 return len;
1228 }
1229
1230 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1231 size_t size, unsigned int width)
1232 {
1233 u64 t = he->transaction;
1234 char buf[128];
1235 char *p = buf;
1236 int i;
1237
1238 buf[0] = 0;
1239 for (i = 0; txbits[i].name; i++)
1240 if (txbits[i].flag & t)
1241 p = add_str(p, txbits[i].name);
1242 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1243 p = add_str(p, "NEITHER ");
1244 if (t & PERF_TXN_ABORT_MASK) {
1245 sprintf(p, ":%" PRIx64,
1246 (t & PERF_TXN_ABORT_MASK) >>
1247 PERF_TXN_ABORT_SHIFT);
1248 p += strlen(p);
1249 }
1250
1251 return repsep_snprintf(bf, size, "%-*s", width, buf);
1252 }
1253
1254 struct sort_entry sort_transaction = {
1255 .se_header = "Transaction ",
1256 .se_cmp = sort__transaction_cmp,
1257 .se_snprintf = hist_entry__transaction_snprintf,
1258 .se_width_idx = HISTC_TRANSACTION,
1259 };
1260
1261 struct sort_dimension {
1262 const char *name;
1263 struct sort_entry *entry;
1264 int taken;
1265 };
1266
1267 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1268
1269 static struct sort_dimension common_sort_dimensions[] = {
1270 DIM(SORT_PID, "pid", sort_thread),
1271 DIM(SORT_COMM, "comm", sort_comm),
1272 DIM(SORT_DSO, "dso", sort_dso),
1273 DIM(SORT_SYM, "symbol", sort_sym),
1274 DIM(SORT_PARENT, "parent", sort_parent),
1275 DIM(SORT_CPU, "cpu", sort_cpu),
1276 DIM(SORT_SOCKET, "socket", sort_socket),
1277 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1278 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1279 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1280 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1281 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1282 DIM(SORT_TRACE, "trace", sort_trace),
1283 };
1284
1285 #undef DIM
1286
1287 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1288
1289 static struct sort_dimension bstack_sort_dimensions[] = {
1290 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1291 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1292 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1293 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1294 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1295 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1296 DIM(SORT_ABORT, "abort", sort_abort),
1297 DIM(SORT_CYCLES, "cycles", sort_cycles),
1298 };
1299
1300 #undef DIM
1301
1302 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1303
1304 static struct sort_dimension memory_sort_dimensions[] = {
1305 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1306 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1307 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1308 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1309 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1310 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1311 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1312 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1313 };
1314
1315 #undef DIM
1316
1317 struct hpp_dimension {
1318 const char *name;
1319 struct perf_hpp_fmt *fmt;
1320 int taken;
1321 };
1322
1323 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1324
1325 static struct hpp_dimension hpp_sort_dimensions[] = {
1326 DIM(PERF_HPP__OVERHEAD, "overhead"),
1327 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1328 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1329 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1330 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1331 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1332 DIM(PERF_HPP__SAMPLES, "sample"),
1333 DIM(PERF_HPP__PERIOD, "period"),
1334 };
1335
1336 #undef DIM
1337
1338 struct hpp_sort_entry {
1339 struct perf_hpp_fmt hpp;
1340 struct sort_entry *se;
1341 };
1342
1343 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1344 {
1345 struct hpp_sort_entry *hse;
1346
1347 if (!perf_hpp__is_sort_entry(fmt))
1348 return;
1349
1350 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1351 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1352 }
1353
1354 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1355 struct perf_evsel *evsel)
1356 {
1357 struct hpp_sort_entry *hse;
1358 size_t len = fmt->user_len;
1359
1360 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1361
1362 if (!len)
1363 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1364
1365 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1366 }
1367
1368 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1369 struct perf_hpp *hpp __maybe_unused,
1370 struct perf_evsel *evsel)
1371 {
1372 struct hpp_sort_entry *hse;
1373 size_t len = fmt->user_len;
1374
1375 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1376
1377 if (!len)
1378 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1379
1380 return len;
1381 }
1382
1383 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1384 struct hist_entry *he)
1385 {
1386 struct hpp_sort_entry *hse;
1387 size_t len = fmt->user_len;
1388
1389 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1390
1391 if (!len)
1392 len = hists__col_len(he->hists, hse->se->se_width_idx);
1393
1394 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1395 }
1396
1397 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1398 struct hist_entry *a, struct hist_entry *b)
1399 {
1400 struct hpp_sort_entry *hse;
1401
1402 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1403 return hse->se->se_cmp(a, b);
1404 }
1405
1406 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1407 struct hist_entry *a, struct hist_entry *b)
1408 {
1409 struct hpp_sort_entry *hse;
1410 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1411
1412 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1413 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1414 return collapse_fn(a, b);
1415 }
1416
1417 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1418 struct hist_entry *a, struct hist_entry *b)
1419 {
1420 struct hpp_sort_entry *hse;
1421 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1422
1423 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1424 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1425 return sort_fn(a, b);
1426 }
1427
1428 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1429 {
1430 return format->header == __sort__hpp_header;
1431 }
1432
1433 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1434 {
1435 struct hpp_sort_entry *hse_a;
1436 struct hpp_sort_entry *hse_b;
1437
1438 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1439 return false;
1440
1441 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1442 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1443
1444 return hse_a->se == hse_b->se;
1445 }
1446
1447 static void hse_free(struct perf_hpp_fmt *fmt)
1448 {
1449 struct hpp_sort_entry *hse;
1450
1451 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1452 free(hse);
1453 }
1454
1455 static struct hpp_sort_entry *
1456 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1457 {
1458 struct hpp_sort_entry *hse;
1459
1460 hse = malloc(sizeof(*hse));
1461 if (hse == NULL) {
1462 pr_err("Memory allocation failed\n");
1463 return NULL;
1464 }
1465
1466 hse->se = sd->entry;
1467 hse->hpp.name = sd->entry->se_header;
1468 hse->hpp.header = __sort__hpp_header;
1469 hse->hpp.width = __sort__hpp_width;
1470 hse->hpp.entry = __sort__hpp_entry;
1471 hse->hpp.color = NULL;
1472
1473 hse->hpp.cmp = __sort__hpp_cmp;
1474 hse->hpp.collapse = __sort__hpp_collapse;
1475 hse->hpp.sort = __sort__hpp_sort;
1476 hse->hpp.equal = __sort__hpp_equal;
1477 hse->hpp.free = hse_free;
1478
1479 INIT_LIST_HEAD(&hse->hpp.list);
1480 INIT_LIST_HEAD(&hse->hpp.sort_list);
1481 hse->hpp.elide = false;
1482 hse->hpp.len = 0;
1483 hse->hpp.user_len = 0;
1484
1485 return hse;
1486 }
1487
1488 static void hpp_free(struct perf_hpp_fmt *fmt)
1489 {
1490 free(fmt);
1491 }
1492
1493 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1494 {
1495 struct perf_hpp_fmt *fmt;
1496
1497 fmt = memdup(hd->fmt, sizeof(*fmt));
1498 if (fmt) {
1499 INIT_LIST_HEAD(&fmt->list);
1500 INIT_LIST_HEAD(&fmt->sort_list);
1501 fmt->free = hpp_free;
1502 }
1503
1504 return fmt;
1505 }
1506
1507 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1508 {
1509 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1510
1511 if (hse == NULL)
1512 return -1;
1513
1514 perf_hpp__register_sort_field(&hse->hpp);
1515 return 0;
1516 }
1517
1518 static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
1519 struct sort_dimension *sd)
1520 {
1521 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1522
1523 if (hse == NULL)
1524 return -1;
1525
1526 perf_hpp_list__column_register(list, &hse->hpp);
1527 return 0;
1528 }
1529
1530 struct hpp_dynamic_entry {
1531 struct perf_hpp_fmt hpp;
1532 struct perf_evsel *evsel;
1533 struct format_field *field;
1534 unsigned dynamic_len;
1535 bool raw_trace;
1536 };
1537
1538 static int hde_width(struct hpp_dynamic_entry *hde)
1539 {
1540 if (!hde->hpp.len) {
1541 int len = hde->dynamic_len;
1542 int namelen = strlen(hde->field->name);
1543 int fieldlen = hde->field->size;
1544
1545 if (namelen > len)
1546 len = namelen;
1547
1548 if (!(hde->field->flags & FIELD_IS_STRING)) {
1549 /* length for print hex numbers */
1550 fieldlen = hde->field->size * 2 + 2;
1551 }
1552 if (fieldlen > len)
1553 len = fieldlen;
1554
1555 hde->hpp.len = len;
1556 }
1557 return hde->hpp.len;
1558 }
1559
1560 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1561 struct hist_entry *he)
1562 {
1563 char *str, *pos;
1564 struct format_field *field = hde->field;
1565 size_t namelen;
1566 bool last = false;
1567
1568 if (hde->raw_trace)
1569 return;
1570
1571 /* parse pretty print result and update max length */
1572 if (!he->trace_output)
1573 he->trace_output = get_trace_output(he);
1574
1575 namelen = strlen(field->name);
1576 str = he->trace_output;
1577
1578 while (str) {
1579 pos = strchr(str, ' ');
1580 if (pos == NULL) {
1581 last = true;
1582 pos = str + strlen(str);
1583 }
1584
1585 if (!strncmp(str, field->name, namelen)) {
1586 size_t len;
1587
1588 str += namelen + 1;
1589 len = pos - str;
1590
1591 if (len > hde->dynamic_len)
1592 hde->dynamic_len = len;
1593 break;
1594 }
1595
1596 if (last)
1597 str = NULL;
1598 else
1599 str = pos + 1;
1600 }
1601 }
1602
1603 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1604 struct perf_evsel *evsel __maybe_unused)
1605 {
1606 struct hpp_dynamic_entry *hde;
1607 size_t len = fmt->user_len;
1608
1609 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1610
1611 if (!len)
1612 len = hde_width(hde);
1613
1614 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1615 }
1616
1617 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1618 struct perf_hpp *hpp __maybe_unused,
1619 struct perf_evsel *evsel __maybe_unused)
1620 {
1621 struct hpp_dynamic_entry *hde;
1622 size_t len = fmt->user_len;
1623
1624 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1625
1626 if (!len)
1627 len = hde_width(hde);
1628
1629 return len;
1630 }
1631
1632 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1633 {
1634 struct hpp_dynamic_entry *hde;
1635
1636 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1637
1638 return hists_to_evsel(hists) == hde->evsel;
1639 }
1640
1641 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1642 struct hist_entry *he)
1643 {
1644 struct hpp_dynamic_entry *hde;
1645 size_t len = fmt->user_len;
1646 char *str, *pos;
1647 struct format_field *field;
1648 size_t namelen;
1649 bool last = false;
1650 int ret;
1651
1652 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1653
1654 if (!len)
1655 len = hde_width(hde);
1656
1657 if (hde->raw_trace)
1658 goto raw_field;
1659
1660 field = hde->field;
1661 namelen = strlen(field->name);
1662 str = he->trace_output;
1663
1664 while (str) {
1665 pos = strchr(str, ' ');
1666 if (pos == NULL) {
1667 last = true;
1668 pos = str + strlen(str);
1669 }
1670
1671 if (!strncmp(str, field->name, namelen)) {
1672 str += namelen + 1;
1673 str = strndup(str, pos - str);
1674
1675 if (str == NULL)
1676 return scnprintf(hpp->buf, hpp->size,
1677 "%*.*s", len, len, "ERROR");
1678 break;
1679 }
1680
1681 if (last)
1682 str = NULL;
1683 else
1684 str = pos + 1;
1685 }
1686
1687 if (str == NULL) {
1688 struct trace_seq seq;
1689 raw_field:
1690 trace_seq_init(&seq);
1691 pevent_print_field(&seq, he->raw_data, hde->field);
1692 str = seq.buffer;
1693 }
1694
1695 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1696 free(str);
1697 return ret;
1698 }
1699
1700 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1701 struct hist_entry *a, struct hist_entry *b)
1702 {
1703 struct hpp_dynamic_entry *hde;
1704 struct format_field *field;
1705 unsigned offset, size;
1706
1707 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1708
1709 field = hde->field;
1710 if (field->flags & FIELD_IS_DYNAMIC) {
1711 unsigned long long dyn;
1712
1713 pevent_read_number_field(field, a->raw_data, &dyn);
1714 offset = dyn & 0xffff;
1715 size = (dyn >> 16) & 0xffff;
1716
1717 /* record max width for output */
1718 if (size > hde->dynamic_len)
1719 hde->dynamic_len = size;
1720 } else {
1721 offset = field->offset;
1722 size = field->size;
1723
1724 update_dynamic_len(hde, a);
1725 update_dynamic_len(hde, b);
1726 }
1727
1728 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1729 }
1730
1731 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1732 {
1733 return fmt->cmp == __sort__hde_cmp;
1734 }
1735
1736 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1737 {
1738 struct hpp_dynamic_entry *hde_a;
1739 struct hpp_dynamic_entry *hde_b;
1740
1741 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1742 return false;
1743
1744 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1745 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1746
1747 return hde_a->field == hde_b->field;
1748 }
1749
1750 static void hde_free(struct perf_hpp_fmt *fmt)
1751 {
1752 struct hpp_dynamic_entry *hde;
1753
1754 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1755 free(hde);
1756 }
1757
1758 static struct hpp_dynamic_entry *
1759 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1760 {
1761 struct hpp_dynamic_entry *hde;
1762
1763 hde = malloc(sizeof(*hde));
1764 if (hde == NULL) {
1765 pr_debug("Memory allocation failed\n");
1766 return NULL;
1767 }
1768
1769 hde->evsel = evsel;
1770 hde->field = field;
1771 hde->dynamic_len = 0;
1772
1773 hde->hpp.name = field->name;
1774 hde->hpp.header = __sort__hde_header;
1775 hde->hpp.width = __sort__hde_width;
1776 hde->hpp.entry = __sort__hde_entry;
1777 hde->hpp.color = NULL;
1778
1779 hde->hpp.cmp = __sort__hde_cmp;
1780 hde->hpp.collapse = __sort__hde_cmp;
1781 hde->hpp.sort = __sort__hde_cmp;
1782 hde->hpp.equal = __sort__hde_equal;
1783 hde->hpp.free = hde_free;
1784
1785 INIT_LIST_HEAD(&hde->hpp.list);
1786 INIT_LIST_HEAD(&hde->hpp.sort_list);
1787 hde->hpp.elide = false;
1788 hde->hpp.len = 0;
1789 hde->hpp.user_len = 0;
1790
1791 return hde;
1792 }
1793
1794 static int parse_field_name(char *str, char **event, char **field, char **opt)
1795 {
1796 char *event_name, *field_name, *opt_name;
1797
1798 event_name = str;
1799 field_name = strchr(str, '.');
1800
1801 if (field_name) {
1802 *field_name++ = '\0';
1803 } else {
1804 event_name = NULL;
1805 field_name = str;
1806 }
1807
1808 opt_name = strchr(field_name, '/');
1809 if (opt_name)
1810 *opt_name++ = '\0';
1811
1812 *event = event_name;
1813 *field = field_name;
1814 *opt = opt_name;
1815
1816 return 0;
1817 }
1818
1819 /* find match evsel using a given event name. The event name can be:
1820 * 1. '%' + event index (e.g. '%1' for first event)
1821 * 2. full event name (e.g. sched:sched_switch)
1822 * 3. partial event name (should not contain ':')
1823 */
1824 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1825 {
1826 struct perf_evsel *evsel = NULL;
1827 struct perf_evsel *pos;
1828 bool full_name;
1829
1830 /* case 1 */
1831 if (event_name[0] == '%') {
1832 int nr = strtol(event_name+1, NULL, 0);
1833
1834 if (nr > evlist->nr_entries)
1835 return NULL;
1836
1837 evsel = perf_evlist__first(evlist);
1838 while (--nr > 0)
1839 evsel = perf_evsel__next(evsel);
1840
1841 return evsel;
1842 }
1843
1844 full_name = !!strchr(event_name, ':');
1845 evlist__for_each(evlist, pos) {
1846 /* case 2 */
1847 if (full_name && !strcmp(pos->name, event_name))
1848 return pos;
1849 /* case 3 */
1850 if (!full_name && strstr(pos->name, event_name)) {
1851 if (evsel) {
1852 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1853 event_name, evsel->name, pos->name);
1854 return NULL;
1855 }
1856 evsel = pos;
1857 }
1858 }
1859
1860 return evsel;
1861 }
1862
1863 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1864 struct format_field *field,
1865 bool raw_trace)
1866 {
1867 struct hpp_dynamic_entry *hde;
1868
1869 hde = __alloc_dynamic_entry(evsel, field);
1870 if (hde == NULL)
1871 return -ENOMEM;
1872
1873 hde->raw_trace = raw_trace;
1874
1875 perf_hpp__register_sort_field(&hde->hpp);
1876 return 0;
1877 }
1878
1879 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1880 {
1881 int ret;
1882 struct format_field *field;
1883
1884 field = evsel->tp_format->format.fields;
1885 while (field) {
1886 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1887 if (ret < 0)
1888 return ret;
1889
1890 field = field->next;
1891 }
1892 return 0;
1893 }
1894
1895 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1896 {
1897 int ret;
1898 struct perf_evsel *evsel;
1899
1900 evlist__for_each(evlist, evsel) {
1901 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1902 continue;
1903
1904 ret = add_evsel_fields(evsel, raw_trace);
1905 if (ret < 0)
1906 return ret;
1907 }
1908 return 0;
1909 }
1910
1911 static int add_all_matching_fields(struct perf_evlist *evlist,
1912 char *field_name, bool raw_trace)
1913 {
1914 int ret = -ESRCH;
1915 struct perf_evsel *evsel;
1916 struct format_field *field;
1917
1918 evlist__for_each(evlist, evsel) {
1919 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1920 continue;
1921
1922 field = pevent_find_any_field(evsel->tp_format, field_name);
1923 if (field == NULL)
1924 continue;
1925
1926 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1927 if (ret < 0)
1928 break;
1929 }
1930 return ret;
1931 }
1932
1933 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1934 {
1935 char *str, *event_name, *field_name, *opt_name;
1936 struct perf_evsel *evsel;
1937 struct format_field *field;
1938 bool raw_trace = symbol_conf.raw_trace;
1939 int ret = 0;
1940
1941 if (evlist == NULL)
1942 return -ENOENT;
1943
1944 str = strdup(tok);
1945 if (str == NULL)
1946 return -ENOMEM;
1947
1948 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
1949 ret = -EINVAL;
1950 goto out;
1951 }
1952
1953 if (opt_name) {
1954 if (strcmp(opt_name, "raw")) {
1955 pr_debug("unsupported field option %s\n", opt_name);
1956 ret = -EINVAL;
1957 goto out;
1958 }
1959 raw_trace = true;
1960 }
1961
1962 if (!strcmp(field_name, "trace_fields")) {
1963 ret = add_all_dynamic_fields(evlist, raw_trace);
1964 goto out;
1965 }
1966
1967 if (event_name == NULL) {
1968 ret = add_all_matching_fields(evlist, field_name, raw_trace);
1969 goto out;
1970 }
1971
1972 evsel = find_evsel(evlist, event_name);
1973 if (evsel == NULL) {
1974 pr_debug("Cannot find event: %s\n", event_name);
1975 ret = -ENOENT;
1976 goto out;
1977 }
1978
1979 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
1980 pr_debug("%s is not a tracepoint event\n", event_name);
1981 ret = -EINVAL;
1982 goto out;
1983 }
1984
1985 if (!strcmp(field_name, "*")) {
1986 ret = add_evsel_fields(evsel, raw_trace);
1987 } else {
1988 field = pevent_find_any_field(evsel->tp_format, field_name);
1989 if (field == NULL) {
1990 pr_debug("Cannot find event field for %s.%s\n",
1991 event_name, field_name);
1992 return -ENOENT;
1993 }
1994
1995 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1996 }
1997
1998 out:
1999 free(str);
2000 return ret;
2001 }
2002
2003 static int __sort_dimension__add(struct sort_dimension *sd)
2004 {
2005 if (sd->taken)
2006 return 0;
2007
2008 if (__sort_dimension__add_hpp_sort(sd) < 0)
2009 return -1;
2010
2011 if (sd->entry->se_collapse)
2012 sort__need_collapse = 1;
2013
2014 sd->taken = 1;
2015
2016 return 0;
2017 }
2018
2019 static int __hpp_dimension__add(struct hpp_dimension *hd)
2020 {
2021 struct perf_hpp_fmt *fmt;
2022
2023 if (hd->taken)
2024 return 0;
2025
2026 fmt = __hpp_dimension__alloc_hpp(hd);
2027 if (!fmt)
2028 return -1;
2029
2030 hd->taken = 1;
2031 perf_hpp__register_sort_field(fmt);
2032 return 0;
2033 }
2034
2035 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2036 struct sort_dimension *sd)
2037 {
2038 if (sd->taken)
2039 return 0;
2040
2041 if (__sort_dimension__add_hpp_output(list, sd) < 0)
2042 return -1;
2043
2044 sd->taken = 1;
2045 return 0;
2046 }
2047
2048 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2049 struct hpp_dimension *hd)
2050 {
2051 struct perf_hpp_fmt *fmt;
2052
2053 if (hd->taken)
2054 return 0;
2055
2056 fmt = __hpp_dimension__alloc_hpp(hd);
2057 if (!fmt)
2058 return -1;
2059
2060 hd->taken = 1;
2061 perf_hpp_list__column_register(list, fmt);
2062 return 0;
2063 }
2064
2065 int hpp_dimension__add_output(unsigned col)
2066 {
2067 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2068 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2069 }
2070
2071 static int sort_dimension__add(const char *tok,
2072 struct perf_evlist *evlist __maybe_unused)
2073 {
2074 unsigned int i;
2075
2076 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2077 struct sort_dimension *sd = &common_sort_dimensions[i];
2078
2079 if (strncasecmp(tok, sd->name, strlen(tok)))
2080 continue;
2081
2082 if (sd->entry == &sort_parent) {
2083 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2084 if (ret) {
2085 char err[BUFSIZ];
2086
2087 regerror(ret, &parent_regex, err, sizeof(err));
2088 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2089 return -EINVAL;
2090 }
2091 sort__has_parent = 1;
2092 } else if (sd->entry == &sort_sym) {
2093 sort__has_sym = 1;
2094 /*
2095 * perf diff displays the performance difference amongst
2096 * two or more perf.data files. Those files could come
2097 * from different binaries. So we should not compare
2098 * their ips, but the name of symbol.
2099 */
2100 if (sort__mode == SORT_MODE__DIFF)
2101 sd->entry->se_collapse = sort__sym_sort;
2102
2103 } else if (sd->entry == &sort_dso) {
2104 sort__has_dso = 1;
2105 } else if (sd->entry == &sort_socket) {
2106 sort__has_socket = 1;
2107 } else if (sd->entry == &sort_thread) {
2108 sort__has_thread = 1;
2109 }
2110
2111 return __sort_dimension__add(sd);
2112 }
2113
2114 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2115 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2116
2117 if (strncasecmp(tok, hd->name, strlen(tok)))
2118 continue;
2119
2120 return __hpp_dimension__add(hd);
2121 }
2122
2123 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2124 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2125
2126 if (strncasecmp(tok, sd->name, strlen(tok)))
2127 continue;
2128
2129 if (sort__mode != SORT_MODE__BRANCH)
2130 return -EINVAL;
2131
2132 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2133 sort__has_sym = 1;
2134
2135 __sort_dimension__add(sd);
2136 return 0;
2137 }
2138
2139 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2140 struct sort_dimension *sd = &memory_sort_dimensions[i];
2141
2142 if (strncasecmp(tok, sd->name, strlen(tok)))
2143 continue;
2144
2145 if (sort__mode != SORT_MODE__MEMORY)
2146 return -EINVAL;
2147
2148 if (sd->entry == &sort_mem_daddr_sym)
2149 sort__has_sym = 1;
2150
2151 __sort_dimension__add(sd);
2152 return 0;
2153 }
2154
2155 if (!add_dynamic_entry(evlist, tok))
2156 return 0;
2157
2158 return -ESRCH;
2159 }
2160
2161 static int setup_sort_list(char *str, struct perf_evlist *evlist)
2162 {
2163 char *tmp, *tok;
2164 int ret = 0;
2165
2166 for (tok = strtok_r(str, ", ", &tmp);
2167 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2168 ret = sort_dimension__add(tok, evlist);
2169 if (ret == -EINVAL) {
2170 error("Invalid --sort key: `%s'", tok);
2171 break;
2172 } else if (ret == -ESRCH) {
2173 error("Unknown --sort key: `%s'", tok);
2174 break;
2175 }
2176 }
2177
2178 return ret;
2179 }
2180
2181 static const char *get_default_sort_order(struct perf_evlist *evlist)
2182 {
2183 const char *default_sort_orders[] = {
2184 default_sort_order,
2185 default_branch_sort_order,
2186 default_mem_sort_order,
2187 default_top_sort_order,
2188 default_diff_sort_order,
2189 default_tracepoint_sort_order,
2190 };
2191 bool use_trace = true;
2192 struct perf_evsel *evsel;
2193
2194 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2195
2196 if (evlist == NULL)
2197 goto out_no_evlist;
2198
2199 evlist__for_each(evlist, evsel) {
2200 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2201 use_trace = false;
2202 break;
2203 }
2204 }
2205
2206 if (use_trace) {
2207 sort__mode = SORT_MODE__TRACEPOINT;
2208 if (symbol_conf.raw_trace)
2209 return "trace_fields";
2210 }
2211 out_no_evlist:
2212 return default_sort_orders[sort__mode];
2213 }
2214
2215 static int setup_sort_order(struct perf_evlist *evlist)
2216 {
2217 char *new_sort_order;
2218
2219 /*
2220 * Append '+'-prefixed sort order to the default sort
2221 * order string.
2222 */
2223 if (!sort_order || is_strict_order(sort_order))
2224 return 0;
2225
2226 if (sort_order[1] == '\0') {
2227 error("Invalid --sort key: `+'");
2228 return -EINVAL;
2229 }
2230
2231 /*
2232 * We allocate new sort_order string, but we never free it,
2233 * because it's checked over the rest of the code.
2234 */
2235 if (asprintf(&new_sort_order, "%s,%s",
2236 get_default_sort_order(evlist), sort_order + 1) < 0) {
2237 error("Not enough memory to set up --sort");
2238 return -ENOMEM;
2239 }
2240
2241 sort_order = new_sort_order;
2242 return 0;
2243 }
2244
2245 /*
2246 * Adds 'pre,' prefix into 'str' is 'pre' is
2247 * not already part of 'str'.
2248 */
2249 static char *prefix_if_not_in(const char *pre, char *str)
2250 {
2251 char *n;
2252
2253 if (!str || strstr(str, pre))
2254 return str;
2255
2256 if (asprintf(&n, "%s,%s", pre, str) < 0)
2257 return NULL;
2258
2259 free(str);
2260 return n;
2261 }
2262
2263 static char *setup_overhead(char *keys)
2264 {
2265 keys = prefix_if_not_in("overhead", keys);
2266
2267 if (symbol_conf.cumulate_callchain)
2268 keys = prefix_if_not_in("overhead_children", keys);
2269
2270 return keys;
2271 }
2272
2273 static int __setup_sorting(struct perf_evlist *evlist)
2274 {
2275 char *str;
2276 const char *sort_keys;
2277 int ret = 0;
2278
2279 ret = setup_sort_order(evlist);
2280 if (ret)
2281 return ret;
2282
2283 sort_keys = sort_order;
2284 if (sort_keys == NULL) {
2285 if (is_strict_order(field_order)) {
2286 /*
2287 * If user specified field order but no sort order,
2288 * we'll honor it and not add default sort orders.
2289 */
2290 return 0;
2291 }
2292
2293 sort_keys = get_default_sort_order(evlist);
2294 }
2295
2296 str = strdup(sort_keys);
2297 if (str == NULL) {
2298 error("Not enough memory to setup sort keys");
2299 return -ENOMEM;
2300 }
2301
2302 /*
2303 * Prepend overhead fields for backward compatibility.
2304 */
2305 if (!is_strict_order(field_order)) {
2306 str = setup_overhead(str);
2307 if (str == NULL) {
2308 error("Not enough memory to setup overhead keys");
2309 return -ENOMEM;
2310 }
2311 }
2312
2313 ret = setup_sort_list(str, evlist);
2314
2315 free(str);
2316 return ret;
2317 }
2318
2319 void perf_hpp__set_elide(int idx, bool elide)
2320 {
2321 struct perf_hpp_fmt *fmt;
2322 struct hpp_sort_entry *hse;
2323
2324 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2325 if (!perf_hpp__is_sort_entry(fmt))
2326 continue;
2327
2328 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2329 if (hse->se->se_width_idx == idx) {
2330 fmt->elide = elide;
2331 break;
2332 }
2333 }
2334 }
2335
2336 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2337 {
2338 if (list && strlist__nr_entries(list) == 1) {
2339 if (fp != NULL)
2340 fprintf(fp, "# %s: %s\n", list_name,
2341 strlist__entry(list, 0)->s);
2342 return true;
2343 }
2344 return false;
2345 }
2346
2347 static bool get_elide(int idx, FILE *output)
2348 {
2349 switch (idx) {
2350 case HISTC_SYMBOL:
2351 return __get_elide(symbol_conf.sym_list, "symbol", output);
2352 case HISTC_DSO:
2353 return __get_elide(symbol_conf.dso_list, "dso", output);
2354 case HISTC_COMM:
2355 return __get_elide(symbol_conf.comm_list, "comm", output);
2356 default:
2357 break;
2358 }
2359
2360 if (sort__mode != SORT_MODE__BRANCH)
2361 return false;
2362
2363 switch (idx) {
2364 case HISTC_SYMBOL_FROM:
2365 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2366 case HISTC_SYMBOL_TO:
2367 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2368 case HISTC_DSO_FROM:
2369 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2370 case HISTC_DSO_TO:
2371 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2372 default:
2373 break;
2374 }
2375
2376 return false;
2377 }
2378
2379 void sort__setup_elide(FILE *output)
2380 {
2381 struct perf_hpp_fmt *fmt;
2382 struct hpp_sort_entry *hse;
2383
2384 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2385 if (!perf_hpp__is_sort_entry(fmt))
2386 continue;
2387
2388 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2389 fmt->elide = get_elide(hse->se->se_width_idx, output);
2390 }
2391
2392 /*
2393 * It makes no sense to elide all of sort entries.
2394 * Just revert them to show up again.
2395 */
2396 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2397 if (!perf_hpp__is_sort_entry(fmt))
2398 continue;
2399
2400 if (!fmt->elide)
2401 return;
2402 }
2403
2404 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2405 if (!perf_hpp__is_sort_entry(fmt))
2406 continue;
2407
2408 fmt->elide = false;
2409 }
2410 }
2411
2412 static int output_field_add(struct perf_hpp_list *list, char *tok)
2413 {
2414 unsigned int i;
2415
2416 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2417 struct sort_dimension *sd = &common_sort_dimensions[i];
2418
2419 if (strncasecmp(tok, sd->name, strlen(tok)))
2420 continue;
2421
2422 return __sort_dimension__add_output(list, sd);
2423 }
2424
2425 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2426 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2427
2428 if (strncasecmp(tok, hd->name, strlen(tok)))
2429 continue;
2430
2431 return __hpp_dimension__add_output(list, hd);
2432 }
2433
2434 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2435 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2436
2437 if (strncasecmp(tok, sd->name, strlen(tok)))
2438 continue;
2439
2440 return __sort_dimension__add_output(list, sd);
2441 }
2442
2443 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2444 struct sort_dimension *sd = &memory_sort_dimensions[i];
2445
2446 if (strncasecmp(tok, sd->name, strlen(tok)))
2447 continue;
2448
2449 return __sort_dimension__add_output(list, sd);
2450 }
2451
2452 return -ESRCH;
2453 }
2454
2455 static int setup_output_list(struct perf_hpp_list *list, char *str)
2456 {
2457 char *tmp, *tok;
2458 int ret = 0;
2459
2460 for (tok = strtok_r(str, ", ", &tmp);
2461 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2462 ret = output_field_add(list, tok);
2463 if (ret == -EINVAL) {
2464 error("Invalid --fields key: `%s'", tok);
2465 break;
2466 } else if (ret == -ESRCH) {
2467 error("Unknown --fields key: `%s'", tok);
2468 break;
2469 }
2470 }
2471
2472 return ret;
2473 }
2474
2475 static void reset_dimensions(void)
2476 {
2477 unsigned int i;
2478
2479 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2480 common_sort_dimensions[i].taken = 0;
2481
2482 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2483 hpp_sort_dimensions[i].taken = 0;
2484
2485 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2486 bstack_sort_dimensions[i].taken = 0;
2487
2488 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2489 memory_sort_dimensions[i].taken = 0;
2490 }
2491
2492 bool is_strict_order(const char *order)
2493 {
2494 return order && (*order != '+');
2495 }
2496
2497 static int __setup_output_field(void)
2498 {
2499 char *str, *strp;
2500 int ret = -EINVAL;
2501
2502 if (field_order == NULL)
2503 return 0;
2504
2505 strp = str = strdup(field_order);
2506 if (str == NULL) {
2507 error("Not enough memory to setup output fields");
2508 return -ENOMEM;
2509 }
2510
2511 if (!is_strict_order(field_order))
2512 strp++;
2513
2514 if (!strlen(strp)) {
2515 error("Invalid --fields key: `+'");
2516 goto out;
2517 }
2518
2519 ret = setup_output_list(&perf_hpp_list, strp);
2520
2521 out:
2522 free(str);
2523 return ret;
2524 }
2525
2526 int setup_sorting(struct perf_evlist *evlist)
2527 {
2528 int err;
2529
2530 err = __setup_sorting(evlist);
2531 if (err < 0)
2532 return err;
2533
2534 if (parent_pattern != default_parent_pattern) {
2535 err = sort_dimension__add("parent", evlist);
2536 if (err < 0)
2537 return err;
2538 }
2539
2540 reset_dimensions();
2541
2542 /*
2543 * perf diff doesn't use default hpp output fields.
2544 */
2545 if (sort__mode != SORT_MODE__DIFF)
2546 perf_hpp__init();
2547
2548 err = __setup_output_field();
2549 if (err < 0)
2550 return err;
2551
2552 /* copy sort keys to output fields */
2553 perf_hpp__setup_output_field(&perf_hpp_list);
2554 /* and then copy output fields to sort keys */
2555 perf_hpp__append_sort_keys(&perf_hpp_list);
2556
2557 return 0;
2558 }
2559
2560 void reset_output_field(void)
2561 {
2562 sort__need_collapse = 0;
2563 sort__has_parent = 0;
2564 sort__has_sym = 0;
2565 sort__has_dso = 0;
2566
2567 field_order = NULL;
2568 sort_order = NULL;
2569
2570 reset_dimensions();
2571 perf_hpp__reset_output_field(&perf_hpp_list);
2572 }
This page took 0.07896 seconds and 4 git commands to generate.