perf tools: Fix column width setting on 'trace' sort key
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9
10 regex_t parent_regex;
11 const char default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char *parent_pattern = default_parent_pattern;
13 const char default_sort_order[] = "comm,dso,symbol";
14 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order[] = "dso,symbol";
17 const char default_diff_sort_order[] = "dso,symbol";
18 const char default_tracepoint_sort_order[] = "trace";
19 const char *sort_order;
20 const char *field_order;
21 regex_t ignore_callees_regex;
22 int have_ignore_callees = 0;
23 int sort__need_collapse = 0;
24 int sort__has_parent = 0;
25 int sort__has_sym = 0;
26 int sort__has_dso = 0;
27 int sort__has_socket = 0;
28 int sort__has_thread = 0;
29 enum sort_mode sort__mode = SORT_MODE__NORMAL;
30
31 /*
32 * Replaces all occurrences of a char used with the:
33 *
34 * -t, --field-separator
35 *
36 * option, that uses a special separator character and don't pad with spaces,
37 * replacing all occurances of this separator in symbol names (and other
38 * output) with a '.' character, that thus it's the only non valid separator.
39 */
40 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
41 {
42 int n;
43 va_list ap;
44
45 va_start(ap, fmt);
46 n = vsnprintf(bf, size, fmt, ap);
47 if (symbol_conf.field_sep && n > 0) {
48 char *sep = bf;
49
50 while (1) {
51 sep = strchr(sep, *symbol_conf.field_sep);
52 if (sep == NULL)
53 break;
54 *sep = '.';
55 }
56 }
57 va_end(ap);
58
59 if (n >= (int)size)
60 return size - 1;
61 return n;
62 }
63
64 static int64_t cmp_null(const void *l, const void *r)
65 {
66 if (!l && !r)
67 return 0;
68 else if (!l)
69 return -1;
70 else
71 return 1;
72 }
73
74 /* --sort pid */
75
76 static int64_t
77 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
78 {
79 return right->thread->tid - left->thread->tid;
80 }
81
82 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
83 size_t size, unsigned int width)
84 {
85 const char *comm = thread__comm_str(he->thread);
86
87 width = max(7U, width) - 6;
88 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
89 width, width, comm ?: "");
90 }
91
92 struct sort_entry sort_thread = {
93 .se_header = " Pid:Command",
94 .se_cmp = sort__thread_cmp,
95 .se_snprintf = hist_entry__thread_snprintf,
96 .se_width_idx = HISTC_THREAD,
97 };
98
99 /* --sort comm */
100
101 static int64_t
102 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
103 {
104 /* Compare the addr that should be unique among comm */
105 return strcmp(comm__str(right->comm), comm__str(left->comm));
106 }
107
108 static int64_t
109 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
110 {
111 /* Compare the addr that should be unique among comm */
112 return strcmp(comm__str(right->comm), comm__str(left->comm));
113 }
114
115 static int64_t
116 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
117 {
118 return strcmp(comm__str(right->comm), comm__str(left->comm));
119 }
120
121 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
122 size_t size, unsigned int width)
123 {
124 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
125 }
126
127 struct sort_entry sort_comm = {
128 .se_header = "Command",
129 .se_cmp = sort__comm_cmp,
130 .se_collapse = sort__comm_collapse,
131 .se_sort = sort__comm_sort,
132 .se_snprintf = hist_entry__comm_snprintf,
133 .se_width_idx = HISTC_COMM,
134 };
135
136 /* --sort dso */
137
138 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
139 {
140 struct dso *dso_l = map_l ? map_l->dso : NULL;
141 struct dso *dso_r = map_r ? map_r->dso : NULL;
142 const char *dso_name_l, *dso_name_r;
143
144 if (!dso_l || !dso_r)
145 return cmp_null(dso_r, dso_l);
146
147 if (verbose) {
148 dso_name_l = dso_l->long_name;
149 dso_name_r = dso_r->long_name;
150 } else {
151 dso_name_l = dso_l->short_name;
152 dso_name_r = dso_r->short_name;
153 }
154
155 return strcmp(dso_name_l, dso_name_r);
156 }
157
158 static int64_t
159 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
160 {
161 return _sort__dso_cmp(right->ms.map, left->ms.map);
162 }
163
164 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
165 size_t size, unsigned int width)
166 {
167 if (map && map->dso) {
168 const char *dso_name = !verbose ? map->dso->short_name :
169 map->dso->long_name;
170 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
171 }
172
173 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
174 }
175
176 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
177 size_t size, unsigned int width)
178 {
179 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
180 }
181
182 struct sort_entry sort_dso = {
183 .se_header = "Shared Object",
184 .se_cmp = sort__dso_cmp,
185 .se_snprintf = hist_entry__dso_snprintf,
186 .se_width_idx = HISTC_DSO,
187 };
188
189 /* --sort symbol */
190
191 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
192 {
193 return (int64_t)(right_ip - left_ip);
194 }
195
196 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
197 {
198 if (!sym_l || !sym_r)
199 return cmp_null(sym_l, sym_r);
200
201 if (sym_l == sym_r)
202 return 0;
203
204 if (sym_l->start != sym_r->start)
205 return (int64_t)(sym_r->start - sym_l->start);
206
207 return (int64_t)(sym_r->end - sym_l->end);
208 }
209
210 static int64_t
211 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
212 {
213 int64_t ret;
214
215 if (!left->ms.sym && !right->ms.sym)
216 return _sort__addr_cmp(left->ip, right->ip);
217
218 /*
219 * comparing symbol address alone is not enough since it's a
220 * relative address within a dso.
221 */
222 if (!sort__has_dso) {
223 ret = sort__dso_cmp(left, right);
224 if (ret != 0)
225 return ret;
226 }
227
228 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
229 }
230
231 static int64_t
232 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
233 {
234 if (!left->ms.sym || !right->ms.sym)
235 return cmp_null(left->ms.sym, right->ms.sym);
236
237 return strcmp(right->ms.sym->name, left->ms.sym->name);
238 }
239
240 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
241 u64 ip, char level, char *bf, size_t size,
242 unsigned int width)
243 {
244 size_t ret = 0;
245
246 if (verbose) {
247 char o = map ? dso__symtab_origin(map->dso) : '!';
248 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
249 BITS_PER_LONG / 4 + 2, ip, o);
250 }
251
252 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
253 if (sym && map) {
254 if (map->type == MAP__VARIABLE) {
255 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
256 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
257 ip - map->unmap_ip(map, sym->start));
258 } else {
259 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
260 width - ret,
261 sym->name);
262 }
263 } else {
264 size_t len = BITS_PER_LONG / 4;
265 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
266 len, ip);
267 }
268
269 return ret;
270 }
271
272 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
273 size_t size, unsigned int width)
274 {
275 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
276 he->level, bf, size, width);
277 }
278
279 struct sort_entry sort_sym = {
280 .se_header = "Symbol",
281 .se_cmp = sort__sym_cmp,
282 .se_sort = sort__sym_sort,
283 .se_snprintf = hist_entry__sym_snprintf,
284 .se_width_idx = HISTC_SYMBOL,
285 };
286
287 /* --sort srcline */
288
289 static char *hist_entry__get_srcline(struct hist_entry *he)
290 {
291 struct map *map = he->ms.map;
292
293 if (!map)
294 return SRCLINE_UNKNOWN;
295
296 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
297 he->ms.sym, true);
298 }
299
300 static int64_t
301 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
302 {
303 if (!left->srcline)
304 left->srcline = hist_entry__get_srcline(left);
305 if (!right->srcline)
306 right->srcline = hist_entry__get_srcline(right);
307
308 return strcmp(right->srcline, left->srcline);
309 }
310
311 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
312 size_t size, unsigned int width)
313 {
314 if (!he->srcline)
315 he->srcline = hist_entry__get_srcline(he);
316
317 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
318 }
319
320 struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325 };
326
327 /* --sort srcfile */
328
329 static char no_srcfile[1];
330
331 static char *hist_entry__get_srcfile(struct hist_entry *e)
332 {
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
336 if (!map)
337 return no_srcfile;
338
339 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
340 e->ms.sym, false, true);
341 if (!strcmp(sf, SRCLINE_UNKNOWN))
342 return no_srcfile;
343 p = strchr(sf, ':');
344 if (p && *sf) {
345 *p = 0;
346 return sf;
347 }
348 free(sf);
349 return no_srcfile;
350 }
351
352 static int64_t
353 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
354 {
355 if (!left->srcfile)
356 left->srcfile = hist_entry__get_srcfile(left);
357 if (!right->srcfile)
358 right->srcfile = hist_entry__get_srcfile(right);
359
360 return strcmp(right->srcfile, left->srcfile);
361 }
362
363 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
364 size_t size, unsigned int width)
365 {
366 if (!he->srcfile)
367 he->srcfile = hist_entry__get_srcfile(he);
368
369 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
370 }
371
372 struct sort_entry sort_srcfile = {
373 .se_header = "Source File",
374 .se_cmp = sort__srcfile_cmp,
375 .se_snprintf = hist_entry__srcfile_snprintf,
376 .se_width_idx = HISTC_SRCFILE,
377 };
378
379 /* --sort parent */
380
381 static int64_t
382 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
383 {
384 struct symbol *sym_l = left->parent;
385 struct symbol *sym_r = right->parent;
386
387 if (!sym_l || !sym_r)
388 return cmp_null(sym_l, sym_r);
389
390 return strcmp(sym_r->name, sym_l->name);
391 }
392
393 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
394 size_t size, unsigned int width)
395 {
396 return repsep_snprintf(bf, size, "%-*.*s", width, width,
397 he->parent ? he->parent->name : "[other]");
398 }
399
400 struct sort_entry sort_parent = {
401 .se_header = "Parent symbol",
402 .se_cmp = sort__parent_cmp,
403 .se_snprintf = hist_entry__parent_snprintf,
404 .se_width_idx = HISTC_PARENT,
405 };
406
407 /* --sort cpu */
408
409 static int64_t
410 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
411 {
412 return right->cpu - left->cpu;
413 }
414
415 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
416 size_t size, unsigned int width)
417 {
418 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
419 }
420
421 struct sort_entry sort_cpu = {
422 .se_header = "CPU",
423 .se_cmp = sort__cpu_cmp,
424 .se_snprintf = hist_entry__cpu_snprintf,
425 .se_width_idx = HISTC_CPU,
426 };
427
428 /* --sort socket */
429
430 static int64_t
431 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
432 {
433 return right->socket - left->socket;
434 }
435
436 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
437 size_t size, unsigned int width)
438 {
439 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
440 }
441
442 struct sort_entry sort_socket = {
443 .se_header = "Socket",
444 .se_cmp = sort__socket_cmp,
445 .se_snprintf = hist_entry__socket_snprintf,
446 .se_width_idx = HISTC_SOCKET,
447 };
448
449 /* --sort trace */
450
451 static char *get_trace_output(struct hist_entry *he)
452 {
453 struct trace_seq seq;
454 struct perf_evsel *evsel;
455 struct pevent_record rec = {
456 .data = he->raw_data,
457 .size = he->raw_size,
458 };
459
460 evsel = hists_to_evsel(he->hists);
461
462 trace_seq_init(&seq);
463 if (symbol_conf.raw_trace) {
464 pevent_print_fields(&seq, he->raw_data, he->raw_size,
465 evsel->tp_format);
466 } else {
467 pevent_event_info(&seq, evsel->tp_format, &rec);
468 }
469 return seq.buffer;
470 }
471
472 static int64_t
473 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
474 {
475 struct perf_evsel *evsel;
476
477 evsel = hists_to_evsel(left->hists);
478 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
479 return 0;
480
481 if (left->trace_output == NULL)
482 left->trace_output = get_trace_output(left);
483 if (right->trace_output == NULL)
484 right->trace_output = get_trace_output(right);
485
486 return strcmp(right->trace_output, left->trace_output);
487 }
488
489 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
490 size_t size, unsigned int width)
491 {
492 struct perf_evsel *evsel;
493
494 evsel = hists_to_evsel(he->hists);
495 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
496 return scnprintf(bf, size, "%-.*s", width, "N/A");
497
498 if (he->trace_output == NULL)
499 he->trace_output = get_trace_output(he);
500 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
501 }
502
503 struct sort_entry sort_trace = {
504 .se_header = "Trace output",
505 .se_cmp = sort__trace_cmp,
506 .se_snprintf = hist_entry__trace_snprintf,
507 .se_width_idx = HISTC_TRACE,
508 };
509
510 /* sort keys for branch stacks */
511
512 static int64_t
513 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
514 {
515 if (!left->branch_info || !right->branch_info)
516 return cmp_null(left->branch_info, right->branch_info);
517
518 return _sort__dso_cmp(left->branch_info->from.map,
519 right->branch_info->from.map);
520 }
521
522 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
523 size_t size, unsigned int width)
524 {
525 if (he->branch_info)
526 return _hist_entry__dso_snprintf(he->branch_info->from.map,
527 bf, size, width);
528 else
529 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
530 }
531
532 static int64_t
533 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
534 {
535 if (!left->branch_info || !right->branch_info)
536 return cmp_null(left->branch_info, right->branch_info);
537
538 return _sort__dso_cmp(left->branch_info->to.map,
539 right->branch_info->to.map);
540 }
541
542 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
543 size_t size, unsigned int width)
544 {
545 if (he->branch_info)
546 return _hist_entry__dso_snprintf(he->branch_info->to.map,
547 bf, size, width);
548 else
549 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
550 }
551
552 static int64_t
553 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
554 {
555 struct addr_map_symbol *from_l = &left->branch_info->from;
556 struct addr_map_symbol *from_r = &right->branch_info->from;
557
558 if (!left->branch_info || !right->branch_info)
559 return cmp_null(left->branch_info, right->branch_info);
560
561 from_l = &left->branch_info->from;
562 from_r = &right->branch_info->from;
563
564 if (!from_l->sym && !from_r->sym)
565 return _sort__addr_cmp(from_l->addr, from_r->addr);
566
567 return _sort__sym_cmp(from_l->sym, from_r->sym);
568 }
569
570 static int64_t
571 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
572 {
573 struct addr_map_symbol *to_l, *to_r;
574
575 if (!left->branch_info || !right->branch_info)
576 return cmp_null(left->branch_info, right->branch_info);
577
578 to_l = &left->branch_info->to;
579 to_r = &right->branch_info->to;
580
581 if (!to_l->sym && !to_r->sym)
582 return _sort__addr_cmp(to_l->addr, to_r->addr);
583
584 return _sort__sym_cmp(to_l->sym, to_r->sym);
585 }
586
587 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
588 size_t size, unsigned int width)
589 {
590 if (he->branch_info) {
591 struct addr_map_symbol *from = &he->branch_info->from;
592
593 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
594 he->level, bf, size, width);
595 }
596
597 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
598 }
599
600 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
601 size_t size, unsigned int width)
602 {
603 if (he->branch_info) {
604 struct addr_map_symbol *to = &he->branch_info->to;
605
606 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
607 he->level, bf, size, width);
608 }
609
610 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
611 }
612
613 struct sort_entry sort_dso_from = {
614 .se_header = "Source Shared Object",
615 .se_cmp = sort__dso_from_cmp,
616 .se_snprintf = hist_entry__dso_from_snprintf,
617 .se_width_idx = HISTC_DSO_FROM,
618 };
619
620 struct sort_entry sort_dso_to = {
621 .se_header = "Target Shared Object",
622 .se_cmp = sort__dso_to_cmp,
623 .se_snprintf = hist_entry__dso_to_snprintf,
624 .se_width_idx = HISTC_DSO_TO,
625 };
626
627 struct sort_entry sort_sym_from = {
628 .se_header = "Source Symbol",
629 .se_cmp = sort__sym_from_cmp,
630 .se_snprintf = hist_entry__sym_from_snprintf,
631 .se_width_idx = HISTC_SYMBOL_FROM,
632 };
633
634 struct sort_entry sort_sym_to = {
635 .se_header = "Target Symbol",
636 .se_cmp = sort__sym_to_cmp,
637 .se_snprintf = hist_entry__sym_to_snprintf,
638 .se_width_idx = HISTC_SYMBOL_TO,
639 };
640
641 static int64_t
642 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
643 {
644 unsigned char mp, p;
645
646 if (!left->branch_info || !right->branch_info)
647 return cmp_null(left->branch_info, right->branch_info);
648
649 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
650 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
651 return mp || p;
652 }
653
654 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
655 size_t size, unsigned int width){
656 static const char *out = "N/A";
657
658 if (he->branch_info) {
659 if (he->branch_info->flags.predicted)
660 out = "N";
661 else if (he->branch_info->flags.mispred)
662 out = "Y";
663 }
664
665 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
666 }
667
668 static int64_t
669 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
670 {
671 return left->branch_info->flags.cycles -
672 right->branch_info->flags.cycles;
673 }
674
675 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
676 size_t size, unsigned int width)
677 {
678 if (he->branch_info->flags.cycles == 0)
679 return repsep_snprintf(bf, size, "%-*s", width, "-");
680 return repsep_snprintf(bf, size, "%-*hd", width,
681 he->branch_info->flags.cycles);
682 }
683
684 struct sort_entry sort_cycles = {
685 .se_header = "Basic Block Cycles",
686 .se_cmp = sort__cycles_cmp,
687 .se_snprintf = hist_entry__cycles_snprintf,
688 .se_width_idx = HISTC_CYCLES,
689 };
690
691 /* --sort daddr_sym */
692 static int64_t
693 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
694 {
695 uint64_t l = 0, r = 0;
696
697 if (left->mem_info)
698 l = left->mem_info->daddr.addr;
699 if (right->mem_info)
700 r = right->mem_info->daddr.addr;
701
702 return (int64_t)(r - l);
703 }
704
705 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
706 size_t size, unsigned int width)
707 {
708 uint64_t addr = 0;
709 struct map *map = NULL;
710 struct symbol *sym = NULL;
711
712 if (he->mem_info) {
713 addr = he->mem_info->daddr.addr;
714 map = he->mem_info->daddr.map;
715 sym = he->mem_info->daddr.sym;
716 }
717 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
718 width);
719 }
720
721 static int64_t
722 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
723 {
724 uint64_t l = 0, r = 0;
725
726 if (left->mem_info)
727 l = left->mem_info->iaddr.addr;
728 if (right->mem_info)
729 r = right->mem_info->iaddr.addr;
730
731 return (int64_t)(r - l);
732 }
733
734 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
735 size_t size, unsigned int width)
736 {
737 uint64_t addr = 0;
738 struct map *map = NULL;
739 struct symbol *sym = NULL;
740
741 if (he->mem_info) {
742 addr = he->mem_info->iaddr.addr;
743 map = he->mem_info->iaddr.map;
744 sym = he->mem_info->iaddr.sym;
745 }
746 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
747 width);
748 }
749
750 static int64_t
751 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
752 {
753 struct map *map_l = NULL;
754 struct map *map_r = NULL;
755
756 if (left->mem_info)
757 map_l = left->mem_info->daddr.map;
758 if (right->mem_info)
759 map_r = right->mem_info->daddr.map;
760
761 return _sort__dso_cmp(map_l, map_r);
762 }
763
764 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
765 size_t size, unsigned int width)
766 {
767 struct map *map = NULL;
768
769 if (he->mem_info)
770 map = he->mem_info->daddr.map;
771
772 return _hist_entry__dso_snprintf(map, bf, size, width);
773 }
774
775 static int64_t
776 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
777 {
778 union perf_mem_data_src data_src_l;
779 union perf_mem_data_src data_src_r;
780
781 if (left->mem_info)
782 data_src_l = left->mem_info->data_src;
783 else
784 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
785
786 if (right->mem_info)
787 data_src_r = right->mem_info->data_src;
788 else
789 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
790
791 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
792 }
793
794 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
795 size_t size, unsigned int width)
796 {
797 const char *out;
798 u64 mask = PERF_MEM_LOCK_NA;
799
800 if (he->mem_info)
801 mask = he->mem_info->data_src.mem_lock;
802
803 if (mask & PERF_MEM_LOCK_NA)
804 out = "N/A";
805 else if (mask & PERF_MEM_LOCK_LOCKED)
806 out = "Yes";
807 else
808 out = "No";
809
810 return repsep_snprintf(bf, size, "%.*s", width, out);
811 }
812
813 static int64_t
814 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
815 {
816 union perf_mem_data_src data_src_l;
817 union perf_mem_data_src data_src_r;
818
819 if (left->mem_info)
820 data_src_l = left->mem_info->data_src;
821 else
822 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
823
824 if (right->mem_info)
825 data_src_r = right->mem_info->data_src;
826 else
827 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
828
829 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
830 }
831
832 static const char * const tlb_access[] = {
833 "N/A",
834 "HIT",
835 "MISS",
836 "L1",
837 "L2",
838 "Walker",
839 "Fault",
840 };
841 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
842
843 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
844 size_t size, unsigned int width)
845 {
846 char out[64];
847 size_t sz = sizeof(out) - 1; /* -1 for null termination */
848 size_t l = 0, i;
849 u64 m = PERF_MEM_TLB_NA;
850 u64 hit, miss;
851
852 out[0] = '\0';
853
854 if (he->mem_info)
855 m = he->mem_info->data_src.mem_dtlb;
856
857 hit = m & PERF_MEM_TLB_HIT;
858 miss = m & PERF_MEM_TLB_MISS;
859
860 /* already taken care of */
861 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
862
863 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
864 if (!(m & 0x1))
865 continue;
866 if (l) {
867 strcat(out, " or ");
868 l += 4;
869 }
870 strncat(out, tlb_access[i], sz - l);
871 l += strlen(tlb_access[i]);
872 }
873 if (*out == '\0')
874 strcpy(out, "N/A");
875 if (hit)
876 strncat(out, " hit", sz - l);
877 if (miss)
878 strncat(out, " miss", sz - l);
879
880 return repsep_snprintf(bf, size, "%-*s", width, out);
881 }
882
883 static int64_t
884 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
885 {
886 union perf_mem_data_src data_src_l;
887 union perf_mem_data_src data_src_r;
888
889 if (left->mem_info)
890 data_src_l = left->mem_info->data_src;
891 else
892 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
893
894 if (right->mem_info)
895 data_src_r = right->mem_info->data_src;
896 else
897 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
898
899 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
900 }
901
902 static const char * const mem_lvl[] = {
903 "N/A",
904 "HIT",
905 "MISS",
906 "L1",
907 "LFB",
908 "L2",
909 "L3",
910 "Local RAM",
911 "Remote RAM (1 hop)",
912 "Remote RAM (2 hops)",
913 "Remote Cache (1 hop)",
914 "Remote Cache (2 hops)",
915 "I/O",
916 "Uncached",
917 };
918 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
919
920 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
921 size_t size, unsigned int width)
922 {
923 char out[64];
924 size_t sz = sizeof(out) - 1; /* -1 for null termination */
925 size_t i, l = 0;
926 u64 m = PERF_MEM_LVL_NA;
927 u64 hit, miss;
928
929 if (he->mem_info)
930 m = he->mem_info->data_src.mem_lvl;
931
932 out[0] = '\0';
933
934 hit = m & PERF_MEM_LVL_HIT;
935 miss = m & PERF_MEM_LVL_MISS;
936
937 /* already taken care of */
938 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
939
940 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
941 if (!(m & 0x1))
942 continue;
943 if (l) {
944 strcat(out, " or ");
945 l += 4;
946 }
947 strncat(out, mem_lvl[i], sz - l);
948 l += strlen(mem_lvl[i]);
949 }
950 if (*out == '\0')
951 strcpy(out, "N/A");
952 if (hit)
953 strncat(out, " hit", sz - l);
954 if (miss)
955 strncat(out, " miss", sz - l);
956
957 return repsep_snprintf(bf, size, "%-*s", width, out);
958 }
959
960 static int64_t
961 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
962 {
963 union perf_mem_data_src data_src_l;
964 union perf_mem_data_src data_src_r;
965
966 if (left->mem_info)
967 data_src_l = left->mem_info->data_src;
968 else
969 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
970
971 if (right->mem_info)
972 data_src_r = right->mem_info->data_src;
973 else
974 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
975
976 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
977 }
978
979 static const char * const snoop_access[] = {
980 "N/A",
981 "None",
982 "Miss",
983 "Hit",
984 "HitM",
985 };
986 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
987
988 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
989 size_t size, unsigned int width)
990 {
991 char out[64];
992 size_t sz = sizeof(out) - 1; /* -1 for null termination */
993 size_t i, l = 0;
994 u64 m = PERF_MEM_SNOOP_NA;
995
996 out[0] = '\0';
997
998 if (he->mem_info)
999 m = he->mem_info->data_src.mem_snoop;
1000
1001 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1002 if (!(m & 0x1))
1003 continue;
1004 if (l) {
1005 strcat(out, " or ");
1006 l += 4;
1007 }
1008 strncat(out, snoop_access[i], sz - l);
1009 l += strlen(snoop_access[i]);
1010 }
1011
1012 if (*out == '\0')
1013 strcpy(out, "N/A");
1014
1015 return repsep_snprintf(bf, size, "%-*s", width, out);
1016 }
1017
1018 static inline u64 cl_address(u64 address)
1019 {
1020 /* return the cacheline of the address */
1021 return (address & ~(cacheline_size - 1));
1022 }
1023
1024 static int64_t
1025 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1026 {
1027 u64 l, r;
1028 struct map *l_map, *r_map;
1029
1030 if (!left->mem_info) return -1;
1031 if (!right->mem_info) return 1;
1032
1033 /* group event types together */
1034 if (left->cpumode > right->cpumode) return -1;
1035 if (left->cpumode < right->cpumode) return 1;
1036
1037 l_map = left->mem_info->daddr.map;
1038 r_map = right->mem_info->daddr.map;
1039
1040 /* if both are NULL, jump to sort on al_addr instead */
1041 if (!l_map && !r_map)
1042 goto addr;
1043
1044 if (!l_map) return -1;
1045 if (!r_map) return 1;
1046
1047 if (l_map->maj > r_map->maj) return -1;
1048 if (l_map->maj < r_map->maj) return 1;
1049
1050 if (l_map->min > r_map->min) return -1;
1051 if (l_map->min < r_map->min) return 1;
1052
1053 if (l_map->ino > r_map->ino) return -1;
1054 if (l_map->ino < r_map->ino) return 1;
1055
1056 if (l_map->ino_generation > r_map->ino_generation) return -1;
1057 if (l_map->ino_generation < r_map->ino_generation) return 1;
1058
1059 /*
1060 * Addresses with no major/minor numbers are assumed to be
1061 * anonymous in userspace. Sort those on pid then address.
1062 *
1063 * The kernel and non-zero major/minor mapped areas are
1064 * assumed to be unity mapped. Sort those on address.
1065 */
1066
1067 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1068 (!(l_map->flags & MAP_SHARED)) &&
1069 !l_map->maj && !l_map->min && !l_map->ino &&
1070 !l_map->ino_generation) {
1071 /* userspace anonymous */
1072
1073 if (left->thread->pid_ > right->thread->pid_) return -1;
1074 if (left->thread->pid_ < right->thread->pid_) return 1;
1075 }
1076
1077 addr:
1078 /* al_addr does all the right addr - start + offset calculations */
1079 l = cl_address(left->mem_info->daddr.al_addr);
1080 r = cl_address(right->mem_info->daddr.al_addr);
1081
1082 if (l > r) return -1;
1083 if (l < r) return 1;
1084
1085 return 0;
1086 }
1087
1088 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1089 size_t size, unsigned int width)
1090 {
1091
1092 uint64_t addr = 0;
1093 struct map *map = NULL;
1094 struct symbol *sym = NULL;
1095 char level = he->level;
1096
1097 if (he->mem_info) {
1098 addr = cl_address(he->mem_info->daddr.al_addr);
1099 map = he->mem_info->daddr.map;
1100 sym = he->mem_info->daddr.sym;
1101
1102 /* print [s] for shared data mmaps */
1103 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1104 map && (map->type == MAP__VARIABLE) &&
1105 (map->flags & MAP_SHARED) &&
1106 (map->maj || map->min || map->ino ||
1107 map->ino_generation))
1108 level = 's';
1109 else if (!map)
1110 level = 'X';
1111 }
1112 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1113 width);
1114 }
1115
1116 struct sort_entry sort_mispredict = {
1117 .se_header = "Branch Mispredicted",
1118 .se_cmp = sort__mispredict_cmp,
1119 .se_snprintf = hist_entry__mispredict_snprintf,
1120 .se_width_idx = HISTC_MISPREDICT,
1121 };
1122
1123 static u64 he_weight(struct hist_entry *he)
1124 {
1125 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1126 }
1127
1128 static int64_t
1129 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1130 {
1131 return he_weight(left) - he_weight(right);
1132 }
1133
1134 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1135 size_t size, unsigned int width)
1136 {
1137 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1138 }
1139
1140 struct sort_entry sort_local_weight = {
1141 .se_header = "Local Weight",
1142 .se_cmp = sort__local_weight_cmp,
1143 .se_snprintf = hist_entry__local_weight_snprintf,
1144 .se_width_idx = HISTC_LOCAL_WEIGHT,
1145 };
1146
1147 static int64_t
1148 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1149 {
1150 return left->stat.weight - right->stat.weight;
1151 }
1152
1153 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1154 size_t size, unsigned int width)
1155 {
1156 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1157 }
1158
1159 struct sort_entry sort_global_weight = {
1160 .se_header = "Weight",
1161 .se_cmp = sort__global_weight_cmp,
1162 .se_snprintf = hist_entry__global_weight_snprintf,
1163 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1164 };
1165
1166 struct sort_entry sort_mem_daddr_sym = {
1167 .se_header = "Data Symbol",
1168 .se_cmp = sort__daddr_cmp,
1169 .se_snprintf = hist_entry__daddr_snprintf,
1170 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1171 };
1172
1173 struct sort_entry sort_mem_iaddr_sym = {
1174 .se_header = "Code Symbol",
1175 .se_cmp = sort__iaddr_cmp,
1176 .se_snprintf = hist_entry__iaddr_snprintf,
1177 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1178 };
1179
1180 struct sort_entry sort_mem_daddr_dso = {
1181 .se_header = "Data Object",
1182 .se_cmp = sort__dso_daddr_cmp,
1183 .se_snprintf = hist_entry__dso_daddr_snprintf,
1184 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1185 };
1186
1187 struct sort_entry sort_mem_locked = {
1188 .se_header = "Locked",
1189 .se_cmp = sort__locked_cmp,
1190 .se_snprintf = hist_entry__locked_snprintf,
1191 .se_width_idx = HISTC_MEM_LOCKED,
1192 };
1193
1194 struct sort_entry sort_mem_tlb = {
1195 .se_header = "TLB access",
1196 .se_cmp = sort__tlb_cmp,
1197 .se_snprintf = hist_entry__tlb_snprintf,
1198 .se_width_idx = HISTC_MEM_TLB,
1199 };
1200
1201 struct sort_entry sort_mem_lvl = {
1202 .se_header = "Memory access",
1203 .se_cmp = sort__lvl_cmp,
1204 .se_snprintf = hist_entry__lvl_snprintf,
1205 .se_width_idx = HISTC_MEM_LVL,
1206 };
1207
1208 struct sort_entry sort_mem_snoop = {
1209 .se_header = "Snoop",
1210 .se_cmp = sort__snoop_cmp,
1211 .se_snprintf = hist_entry__snoop_snprintf,
1212 .se_width_idx = HISTC_MEM_SNOOP,
1213 };
1214
1215 struct sort_entry sort_mem_dcacheline = {
1216 .se_header = "Data Cacheline",
1217 .se_cmp = sort__dcacheline_cmp,
1218 .se_snprintf = hist_entry__dcacheline_snprintf,
1219 .se_width_idx = HISTC_MEM_DCACHELINE,
1220 };
1221
1222 static int64_t
1223 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1224 {
1225 if (!left->branch_info || !right->branch_info)
1226 return cmp_null(left->branch_info, right->branch_info);
1227
1228 return left->branch_info->flags.abort !=
1229 right->branch_info->flags.abort;
1230 }
1231
1232 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1233 size_t size, unsigned int width)
1234 {
1235 static const char *out = "N/A";
1236
1237 if (he->branch_info) {
1238 if (he->branch_info->flags.abort)
1239 out = "A";
1240 else
1241 out = ".";
1242 }
1243
1244 return repsep_snprintf(bf, size, "%-*s", width, out);
1245 }
1246
1247 struct sort_entry sort_abort = {
1248 .se_header = "Transaction abort",
1249 .se_cmp = sort__abort_cmp,
1250 .se_snprintf = hist_entry__abort_snprintf,
1251 .se_width_idx = HISTC_ABORT,
1252 };
1253
1254 static int64_t
1255 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1256 {
1257 if (!left->branch_info || !right->branch_info)
1258 return cmp_null(left->branch_info, right->branch_info);
1259
1260 return left->branch_info->flags.in_tx !=
1261 right->branch_info->flags.in_tx;
1262 }
1263
1264 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1265 size_t size, unsigned int width)
1266 {
1267 static const char *out = "N/A";
1268
1269 if (he->branch_info) {
1270 if (he->branch_info->flags.in_tx)
1271 out = "T";
1272 else
1273 out = ".";
1274 }
1275
1276 return repsep_snprintf(bf, size, "%-*s", width, out);
1277 }
1278
1279 struct sort_entry sort_in_tx = {
1280 .se_header = "Branch in transaction",
1281 .se_cmp = sort__in_tx_cmp,
1282 .se_snprintf = hist_entry__in_tx_snprintf,
1283 .se_width_idx = HISTC_IN_TX,
1284 };
1285
1286 static int64_t
1287 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1288 {
1289 return left->transaction - right->transaction;
1290 }
1291
1292 static inline char *add_str(char *p, const char *str)
1293 {
1294 strcpy(p, str);
1295 return p + strlen(str);
1296 }
1297
1298 static struct txbit {
1299 unsigned flag;
1300 const char *name;
1301 int skip_for_len;
1302 } txbits[] = {
1303 { PERF_TXN_ELISION, "EL ", 0 },
1304 { PERF_TXN_TRANSACTION, "TX ", 1 },
1305 { PERF_TXN_SYNC, "SYNC ", 1 },
1306 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1307 { PERF_TXN_RETRY, "RETRY ", 0 },
1308 { PERF_TXN_CONFLICT, "CON ", 0 },
1309 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1310 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1311 { 0, NULL, 0 }
1312 };
1313
1314 int hist_entry__transaction_len(void)
1315 {
1316 int i;
1317 int len = 0;
1318
1319 for (i = 0; txbits[i].name; i++) {
1320 if (!txbits[i].skip_for_len)
1321 len += strlen(txbits[i].name);
1322 }
1323 len += 4; /* :XX<space> */
1324 return len;
1325 }
1326
1327 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1328 size_t size, unsigned int width)
1329 {
1330 u64 t = he->transaction;
1331 char buf[128];
1332 char *p = buf;
1333 int i;
1334
1335 buf[0] = 0;
1336 for (i = 0; txbits[i].name; i++)
1337 if (txbits[i].flag & t)
1338 p = add_str(p, txbits[i].name);
1339 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1340 p = add_str(p, "NEITHER ");
1341 if (t & PERF_TXN_ABORT_MASK) {
1342 sprintf(p, ":%" PRIx64,
1343 (t & PERF_TXN_ABORT_MASK) >>
1344 PERF_TXN_ABORT_SHIFT);
1345 p += strlen(p);
1346 }
1347
1348 return repsep_snprintf(bf, size, "%-*s", width, buf);
1349 }
1350
1351 struct sort_entry sort_transaction = {
1352 .se_header = "Transaction ",
1353 .se_cmp = sort__transaction_cmp,
1354 .se_snprintf = hist_entry__transaction_snprintf,
1355 .se_width_idx = HISTC_TRANSACTION,
1356 };
1357
1358 struct sort_dimension {
1359 const char *name;
1360 struct sort_entry *entry;
1361 int taken;
1362 };
1363
1364 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1365
1366 static struct sort_dimension common_sort_dimensions[] = {
1367 DIM(SORT_PID, "pid", sort_thread),
1368 DIM(SORT_COMM, "comm", sort_comm),
1369 DIM(SORT_DSO, "dso", sort_dso),
1370 DIM(SORT_SYM, "symbol", sort_sym),
1371 DIM(SORT_PARENT, "parent", sort_parent),
1372 DIM(SORT_CPU, "cpu", sort_cpu),
1373 DIM(SORT_SOCKET, "socket", sort_socket),
1374 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1375 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1376 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1377 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1378 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1379 DIM(SORT_TRACE, "trace", sort_trace),
1380 };
1381
1382 #undef DIM
1383
1384 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1385
1386 static struct sort_dimension bstack_sort_dimensions[] = {
1387 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1388 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1389 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1390 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1391 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1392 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1393 DIM(SORT_ABORT, "abort", sort_abort),
1394 DIM(SORT_CYCLES, "cycles", sort_cycles),
1395 };
1396
1397 #undef DIM
1398
1399 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1400
1401 static struct sort_dimension memory_sort_dimensions[] = {
1402 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1403 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1404 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1405 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1406 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1407 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1408 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1409 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1410 };
1411
1412 #undef DIM
1413
1414 struct hpp_dimension {
1415 const char *name;
1416 struct perf_hpp_fmt *fmt;
1417 int taken;
1418 };
1419
1420 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1421
1422 static struct hpp_dimension hpp_sort_dimensions[] = {
1423 DIM(PERF_HPP__OVERHEAD, "overhead"),
1424 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1425 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1426 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1427 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1428 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1429 DIM(PERF_HPP__SAMPLES, "sample"),
1430 DIM(PERF_HPP__PERIOD, "period"),
1431 };
1432
1433 #undef DIM
1434
1435 struct hpp_sort_entry {
1436 struct perf_hpp_fmt hpp;
1437 struct sort_entry *se;
1438 };
1439
1440 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1441 {
1442 struct hpp_sort_entry *hse;
1443
1444 if (!perf_hpp__is_sort_entry(fmt))
1445 return;
1446
1447 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1448 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1449 }
1450
1451 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1452 struct perf_evsel *evsel)
1453 {
1454 struct hpp_sort_entry *hse;
1455 size_t len = fmt->user_len;
1456
1457 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1458
1459 if (!len)
1460 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1461
1462 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1463 }
1464
1465 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1466 struct perf_hpp *hpp __maybe_unused,
1467 struct perf_evsel *evsel)
1468 {
1469 struct hpp_sort_entry *hse;
1470 size_t len = fmt->user_len;
1471
1472 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1473
1474 if (!len)
1475 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1476
1477 return len;
1478 }
1479
1480 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1481 struct hist_entry *he)
1482 {
1483 struct hpp_sort_entry *hse;
1484 size_t len = fmt->user_len;
1485
1486 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1487
1488 if (!len)
1489 len = hists__col_len(he->hists, hse->se->se_width_idx);
1490
1491 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1492 }
1493
1494 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1495 struct hist_entry *a, struct hist_entry *b)
1496 {
1497 struct hpp_sort_entry *hse;
1498
1499 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1500 return hse->se->se_cmp(a, b);
1501 }
1502
1503 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1504 struct hist_entry *a, struct hist_entry *b)
1505 {
1506 struct hpp_sort_entry *hse;
1507 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1508
1509 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1510 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1511 return collapse_fn(a, b);
1512 }
1513
1514 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1515 struct hist_entry *a, struct hist_entry *b)
1516 {
1517 struct hpp_sort_entry *hse;
1518 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1519
1520 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1521 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1522 return sort_fn(a, b);
1523 }
1524
1525 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1526 {
1527 return format->header == __sort__hpp_header;
1528 }
1529
1530 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1531 {
1532 struct hpp_sort_entry *hse_a;
1533 struct hpp_sort_entry *hse_b;
1534
1535 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1536 return false;
1537
1538 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1539 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1540
1541 return hse_a->se == hse_b->se;
1542 }
1543
1544 static void hse_free(struct perf_hpp_fmt *fmt)
1545 {
1546 struct hpp_sort_entry *hse;
1547
1548 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1549 free(hse);
1550 }
1551
1552 static struct hpp_sort_entry *
1553 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1554 {
1555 struct hpp_sort_entry *hse;
1556
1557 hse = malloc(sizeof(*hse));
1558 if (hse == NULL) {
1559 pr_err("Memory allocation failed\n");
1560 return NULL;
1561 }
1562
1563 hse->se = sd->entry;
1564 hse->hpp.name = sd->entry->se_header;
1565 hse->hpp.header = __sort__hpp_header;
1566 hse->hpp.width = __sort__hpp_width;
1567 hse->hpp.entry = __sort__hpp_entry;
1568 hse->hpp.color = NULL;
1569
1570 hse->hpp.cmp = __sort__hpp_cmp;
1571 hse->hpp.collapse = __sort__hpp_collapse;
1572 hse->hpp.sort = __sort__hpp_sort;
1573 hse->hpp.equal = __sort__hpp_equal;
1574 hse->hpp.free = hse_free;
1575
1576 INIT_LIST_HEAD(&hse->hpp.list);
1577 INIT_LIST_HEAD(&hse->hpp.sort_list);
1578 hse->hpp.elide = false;
1579 hse->hpp.len = 0;
1580 hse->hpp.user_len = 0;
1581
1582 return hse;
1583 }
1584
1585 static void hpp_free(struct perf_hpp_fmt *fmt)
1586 {
1587 free(fmt);
1588 }
1589
1590 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1591 {
1592 struct perf_hpp_fmt *fmt;
1593
1594 fmt = memdup(hd->fmt, sizeof(*fmt));
1595 if (fmt) {
1596 INIT_LIST_HEAD(&fmt->list);
1597 INIT_LIST_HEAD(&fmt->sort_list);
1598 fmt->free = hpp_free;
1599 }
1600
1601 return fmt;
1602 }
1603
1604 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1605 {
1606 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1607
1608 if (hse == NULL)
1609 return -1;
1610
1611 perf_hpp__register_sort_field(&hse->hpp);
1612 return 0;
1613 }
1614
1615 static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
1616 struct sort_dimension *sd)
1617 {
1618 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1619
1620 if (hse == NULL)
1621 return -1;
1622
1623 perf_hpp_list__column_register(list, &hse->hpp);
1624 return 0;
1625 }
1626
1627 struct hpp_dynamic_entry {
1628 struct perf_hpp_fmt hpp;
1629 struct perf_evsel *evsel;
1630 struct format_field *field;
1631 unsigned dynamic_len;
1632 bool raw_trace;
1633 };
1634
1635 static int hde_width(struct hpp_dynamic_entry *hde)
1636 {
1637 if (!hde->hpp.len) {
1638 int len = hde->dynamic_len;
1639 int namelen = strlen(hde->field->name);
1640 int fieldlen = hde->field->size;
1641
1642 if (namelen > len)
1643 len = namelen;
1644
1645 if (!(hde->field->flags & FIELD_IS_STRING)) {
1646 /* length for print hex numbers */
1647 fieldlen = hde->field->size * 2 + 2;
1648 }
1649 if (fieldlen > len)
1650 len = fieldlen;
1651
1652 hde->hpp.len = len;
1653 }
1654 return hde->hpp.len;
1655 }
1656
1657 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1658 struct hist_entry *he)
1659 {
1660 char *str, *pos;
1661 struct format_field *field = hde->field;
1662 size_t namelen;
1663 bool last = false;
1664
1665 if (hde->raw_trace)
1666 return;
1667
1668 /* parse pretty print result and update max length */
1669 if (!he->trace_output)
1670 he->trace_output = get_trace_output(he);
1671
1672 namelen = strlen(field->name);
1673 str = he->trace_output;
1674
1675 while (str) {
1676 pos = strchr(str, ' ');
1677 if (pos == NULL) {
1678 last = true;
1679 pos = str + strlen(str);
1680 }
1681
1682 if (!strncmp(str, field->name, namelen)) {
1683 size_t len;
1684
1685 str += namelen + 1;
1686 len = pos - str;
1687
1688 if (len > hde->dynamic_len)
1689 hde->dynamic_len = len;
1690 break;
1691 }
1692
1693 if (last)
1694 str = NULL;
1695 else
1696 str = pos + 1;
1697 }
1698 }
1699
1700 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1701 struct perf_evsel *evsel __maybe_unused)
1702 {
1703 struct hpp_dynamic_entry *hde;
1704 size_t len = fmt->user_len;
1705
1706 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1707
1708 if (!len)
1709 len = hde_width(hde);
1710
1711 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1712 }
1713
1714 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1715 struct perf_hpp *hpp __maybe_unused,
1716 struct perf_evsel *evsel __maybe_unused)
1717 {
1718 struct hpp_dynamic_entry *hde;
1719 size_t len = fmt->user_len;
1720
1721 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1722
1723 if (!len)
1724 len = hde_width(hde);
1725
1726 return len;
1727 }
1728
1729 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1730 {
1731 struct hpp_dynamic_entry *hde;
1732
1733 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1734
1735 return hists_to_evsel(hists) == hde->evsel;
1736 }
1737
1738 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1739 struct hist_entry *he)
1740 {
1741 struct hpp_dynamic_entry *hde;
1742 size_t len = fmt->user_len;
1743 char *str, *pos;
1744 struct format_field *field;
1745 size_t namelen;
1746 bool last = false;
1747 int ret;
1748
1749 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1750
1751 if (!len)
1752 len = hde_width(hde);
1753
1754 if (hde->raw_trace)
1755 goto raw_field;
1756
1757 field = hde->field;
1758 namelen = strlen(field->name);
1759 str = he->trace_output;
1760
1761 while (str) {
1762 pos = strchr(str, ' ');
1763 if (pos == NULL) {
1764 last = true;
1765 pos = str + strlen(str);
1766 }
1767
1768 if (!strncmp(str, field->name, namelen)) {
1769 str += namelen + 1;
1770 str = strndup(str, pos - str);
1771
1772 if (str == NULL)
1773 return scnprintf(hpp->buf, hpp->size,
1774 "%*.*s", len, len, "ERROR");
1775 break;
1776 }
1777
1778 if (last)
1779 str = NULL;
1780 else
1781 str = pos + 1;
1782 }
1783
1784 if (str == NULL) {
1785 struct trace_seq seq;
1786 raw_field:
1787 trace_seq_init(&seq);
1788 pevent_print_field(&seq, he->raw_data, hde->field);
1789 str = seq.buffer;
1790 }
1791
1792 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1793 free(str);
1794 return ret;
1795 }
1796
1797 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1798 struct hist_entry *a, struct hist_entry *b)
1799 {
1800 struct hpp_dynamic_entry *hde;
1801 struct format_field *field;
1802 unsigned offset, size;
1803
1804 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1805
1806 field = hde->field;
1807 if (field->flags & FIELD_IS_DYNAMIC) {
1808 unsigned long long dyn;
1809
1810 pevent_read_number_field(field, a->raw_data, &dyn);
1811 offset = dyn & 0xffff;
1812 size = (dyn >> 16) & 0xffff;
1813
1814 /* record max width for output */
1815 if (size > hde->dynamic_len)
1816 hde->dynamic_len = size;
1817 } else {
1818 offset = field->offset;
1819 size = field->size;
1820
1821 update_dynamic_len(hde, a);
1822 update_dynamic_len(hde, b);
1823 }
1824
1825 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1826 }
1827
1828 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1829 {
1830 return fmt->cmp == __sort__hde_cmp;
1831 }
1832
1833 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1834 {
1835 struct hpp_dynamic_entry *hde_a;
1836 struct hpp_dynamic_entry *hde_b;
1837
1838 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1839 return false;
1840
1841 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1842 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1843
1844 return hde_a->field == hde_b->field;
1845 }
1846
1847 static void hde_free(struct perf_hpp_fmt *fmt)
1848 {
1849 struct hpp_dynamic_entry *hde;
1850
1851 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1852 free(hde);
1853 }
1854
1855 static struct hpp_dynamic_entry *
1856 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1857 {
1858 struct hpp_dynamic_entry *hde;
1859
1860 hde = malloc(sizeof(*hde));
1861 if (hde == NULL) {
1862 pr_debug("Memory allocation failed\n");
1863 return NULL;
1864 }
1865
1866 hde->evsel = evsel;
1867 hde->field = field;
1868 hde->dynamic_len = 0;
1869
1870 hde->hpp.name = field->name;
1871 hde->hpp.header = __sort__hde_header;
1872 hde->hpp.width = __sort__hde_width;
1873 hde->hpp.entry = __sort__hde_entry;
1874 hde->hpp.color = NULL;
1875
1876 hde->hpp.cmp = __sort__hde_cmp;
1877 hde->hpp.collapse = __sort__hde_cmp;
1878 hde->hpp.sort = __sort__hde_cmp;
1879 hde->hpp.equal = __sort__hde_equal;
1880 hde->hpp.free = hde_free;
1881
1882 INIT_LIST_HEAD(&hde->hpp.list);
1883 INIT_LIST_HEAD(&hde->hpp.sort_list);
1884 hde->hpp.elide = false;
1885 hde->hpp.len = 0;
1886 hde->hpp.user_len = 0;
1887
1888 return hde;
1889 }
1890
1891 static int parse_field_name(char *str, char **event, char **field, char **opt)
1892 {
1893 char *event_name, *field_name, *opt_name;
1894
1895 event_name = str;
1896 field_name = strchr(str, '.');
1897
1898 if (field_name) {
1899 *field_name++ = '\0';
1900 } else {
1901 event_name = NULL;
1902 field_name = str;
1903 }
1904
1905 opt_name = strchr(field_name, '/');
1906 if (opt_name)
1907 *opt_name++ = '\0';
1908
1909 *event = event_name;
1910 *field = field_name;
1911 *opt = opt_name;
1912
1913 return 0;
1914 }
1915
1916 /* find match evsel using a given event name. The event name can be:
1917 * 1. '%' + event index (e.g. '%1' for first event)
1918 * 2. full event name (e.g. sched:sched_switch)
1919 * 3. partial event name (should not contain ':')
1920 */
1921 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1922 {
1923 struct perf_evsel *evsel = NULL;
1924 struct perf_evsel *pos;
1925 bool full_name;
1926
1927 /* case 1 */
1928 if (event_name[0] == '%') {
1929 int nr = strtol(event_name+1, NULL, 0);
1930
1931 if (nr > evlist->nr_entries)
1932 return NULL;
1933
1934 evsel = perf_evlist__first(evlist);
1935 while (--nr > 0)
1936 evsel = perf_evsel__next(evsel);
1937
1938 return evsel;
1939 }
1940
1941 full_name = !!strchr(event_name, ':');
1942 evlist__for_each(evlist, pos) {
1943 /* case 2 */
1944 if (full_name && !strcmp(pos->name, event_name))
1945 return pos;
1946 /* case 3 */
1947 if (!full_name && strstr(pos->name, event_name)) {
1948 if (evsel) {
1949 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1950 event_name, evsel->name, pos->name);
1951 return NULL;
1952 }
1953 evsel = pos;
1954 }
1955 }
1956
1957 return evsel;
1958 }
1959
1960 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1961 struct format_field *field,
1962 bool raw_trace)
1963 {
1964 struct hpp_dynamic_entry *hde;
1965
1966 hde = __alloc_dynamic_entry(evsel, field);
1967 if (hde == NULL)
1968 return -ENOMEM;
1969
1970 hde->raw_trace = raw_trace;
1971
1972 perf_hpp__register_sort_field(&hde->hpp);
1973 return 0;
1974 }
1975
1976 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1977 {
1978 int ret;
1979 struct format_field *field;
1980
1981 field = evsel->tp_format->format.fields;
1982 while (field) {
1983 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1984 if (ret < 0)
1985 return ret;
1986
1987 field = field->next;
1988 }
1989 return 0;
1990 }
1991
1992 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1993 {
1994 int ret;
1995 struct perf_evsel *evsel;
1996
1997 evlist__for_each(evlist, evsel) {
1998 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1999 continue;
2000
2001 ret = add_evsel_fields(evsel, raw_trace);
2002 if (ret < 0)
2003 return ret;
2004 }
2005 return 0;
2006 }
2007
2008 static int add_all_matching_fields(struct perf_evlist *evlist,
2009 char *field_name, bool raw_trace)
2010 {
2011 int ret = -ESRCH;
2012 struct perf_evsel *evsel;
2013 struct format_field *field;
2014
2015 evlist__for_each(evlist, evsel) {
2016 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2017 continue;
2018
2019 field = pevent_find_any_field(evsel->tp_format, field_name);
2020 if (field == NULL)
2021 continue;
2022
2023 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2024 if (ret < 0)
2025 break;
2026 }
2027 return ret;
2028 }
2029
2030 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
2031 {
2032 char *str, *event_name, *field_name, *opt_name;
2033 struct perf_evsel *evsel;
2034 struct format_field *field;
2035 bool raw_trace = symbol_conf.raw_trace;
2036 int ret = 0;
2037
2038 if (evlist == NULL)
2039 return -ENOENT;
2040
2041 str = strdup(tok);
2042 if (str == NULL)
2043 return -ENOMEM;
2044
2045 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2046 ret = -EINVAL;
2047 goto out;
2048 }
2049
2050 if (opt_name) {
2051 if (strcmp(opt_name, "raw")) {
2052 pr_debug("unsupported field option %s\n", opt_name);
2053 ret = -EINVAL;
2054 goto out;
2055 }
2056 raw_trace = true;
2057 }
2058
2059 if (!strcmp(field_name, "trace_fields")) {
2060 ret = add_all_dynamic_fields(evlist, raw_trace);
2061 goto out;
2062 }
2063
2064 if (event_name == NULL) {
2065 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2066 goto out;
2067 }
2068
2069 evsel = find_evsel(evlist, event_name);
2070 if (evsel == NULL) {
2071 pr_debug("Cannot find event: %s\n", event_name);
2072 ret = -ENOENT;
2073 goto out;
2074 }
2075
2076 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2077 pr_debug("%s is not a tracepoint event\n", event_name);
2078 ret = -EINVAL;
2079 goto out;
2080 }
2081
2082 if (!strcmp(field_name, "*")) {
2083 ret = add_evsel_fields(evsel, raw_trace);
2084 } else {
2085 field = pevent_find_any_field(evsel->tp_format, field_name);
2086 if (field == NULL) {
2087 pr_debug("Cannot find event field for %s.%s\n",
2088 event_name, field_name);
2089 return -ENOENT;
2090 }
2091
2092 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2093 }
2094
2095 out:
2096 free(str);
2097 return ret;
2098 }
2099
2100 static int __sort_dimension__add(struct sort_dimension *sd)
2101 {
2102 if (sd->taken)
2103 return 0;
2104
2105 if (__sort_dimension__add_hpp_sort(sd) < 0)
2106 return -1;
2107
2108 if (sd->entry->se_collapse)
2109 sort__need_collapse = 1;
2110
2111 sd->taken = 1;
2112
2113 return 0;
2114 }
2115
2116 static int __hpp_dimension__add(struct hpp_dimension *hd)
2117 {
2118 struct perf_hpp_fmt *fmt;
2119
2120 if (hd->taken)
2121 return 0;
2122
2123 fmt = __hpp_dimension__alloc_hpp(hd);
2124 if (!fmt)
2125 return -1;
2126
2127 hd->taken = 1;
2128 perf_hpp__register_sort_field(fmt);
2129 return 0;
2130 }
2131
2132 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2133 struct sort_dimension *sd)
2134 {
2135 if (sd->taken)
2136 return 0;
2137
2138 if (__sort_dimension__add_hpp_output(list, sd) < 0)
2139 return -1;
2140
2141 sd->taken = 1;
2142 return 0;
2143 }
2144
2145 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2146 struct hpp_dimension *hd)
2147 {
2148 struct perf_hpp_fmt *fmt;
2149
2150 if (hd->taken)
2151 return 0;
2152
2153 fmt = __hpp_dimension__alloc_hpp(hd);
2154 if (!fmt)
2155 return -1;
2156
2157 hd->taken = 1;
2158 perf_hpp_list__column_register(list, fmt);
2159 return 0;
2160 }
2161
2162 int hpp_dimension__add_output(unsigned col)
2163 {
2164 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2165 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2166 }
2167
2168 static int sort_dimension__add(const char *tok,
2169 struct perf_evlist *evlist __maybe_unused)
2170 {
2171 unsigned int i;
2172
2173 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2174 struct sort_dimension *sd = &common_sort_dimensions[i];
2175
2176 if (strncasecmp(tok, sd->name, strlen(tok)))
2177 continue;
2178
2179 if (sd->entry == &sort_parent) {
2180 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2181 if (ret) {
2182 char err[BUFSIZ];
2183
2184 regerror(ret, &parent_regex, err, sizeof(err));
2185 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2186 return -EINVAL;
2187 }
2188 sort__has_parent = 1;
2189 } else if (sd->entry == &sort_sym) {
2190 sort__has_sym = 1;
2191 /*
2192 * perf diff displays the performance difference amongst
2193 * two or more perf.data files. Those files could come
2194 * from different binaries. So we should not compare
2195 * their ips, but the name of symbol.
2196 */
2197 if (sort__mode == SORT_MODE__DIFF)
2198 sd->entry->se_collapse = sort__sym_sort;
2199
2200 } else if (sd->entry == &sort_dso) {
2201 sort__has_dso = 1;
2202 } else if (sd->entry == &sort_socket) {
2203 sort__has_socket = 1;
2204 } else if (sd->entry == &sort_thread) {
2205 sort__has_thread = 1;
2206 }
2207
2208 return __sort_dimension__add(sd);
2209 }
2210
2211 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2212 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2213
2214 if (strncasecmp(tok, hd->name, strlen(tok)))
2215 continue;
2216
2217 return __hpp_dimension__add(hd);
2218 }
2219
2220 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2221 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2222
2223 if (strncasecmp(tok, sd->name, strlen(tok)))
2224 continue;
2225
2226 if (sort__mode != SORT_MODE__BRANCH)
2227 return -EINVAL;
2228
2229 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2230 sort__has_sym = 1;
2231
2232 __sort_dimension__add(sd);
2233 return 0;
2234 }
2235
2236 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2237 struct sort_dimension *sd = &memory_sort_dimensions[i];
2238
2239 if (strncasecmp(tok, sd->name, strlen(tok)))
2240 continue;
2241
2242 if (sort__mode != SORT_MODE__MEMORY)
2243 return -EINVAL;
2244
2245 if (sd->entry == &sort_mem_daddr_sym)
2246 sort__has_sym = 1;
2247
2248 __sort_dimension__add(sd);
2249 return 0;
2250 }
2251
2252 if (!add_dynamic_entry(evlist, tok))
2253 return 0;
2254
2255 return -ESRCH;
2256 }
2257
2258 static int setup_sort_list(char *str, struct perf_evlist *evlist)
2259 {
2260 char *tmp, *tok;
2261 int ret = 0;
2262
2263 for (tok = strtok_r(str, ", ", &tmp);
2264 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2265 ret = sort_dimension__add(tok, evlist);
2266 if (ret == -EINVAL) {
2267 error("Invalid --sort key: `%s'", tok);
2268 break;
2269 } else if (ret == -ESRCH) {
2270 error("Unknown --sort key: `%s'", tok);
2271 break;
2272 }
2273 }
2274
2275 return ret;
2276 }
2277
2278 static const char *get_default_sort_order(struct perf_evlist *evlist)
2279 {
2280 const char *default_sort_orders[] = {
2281 default_sort_order,
2282 default_branch_sort_order,
2283 default_mem_sort_order,
2284 default_top_sort_order,
2285 default_diff_sort_order,
2286 default_tracepoint_sort_order,
2287 };
2288 bool use_trace = true;
2289 struct perf_evsel *evsel;
2290
2291 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2292
2293 if (evlist == NULL)
2294 goto out_no_evlist;
2295
2296 evlist__for_each(evlist, evsel) {
2297 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2298 use_trace = false;
2299 break;
2300 }
2301 }
2302
2303 if (use_trace) {
2304 sort__mode = SORT_MODE__TRACEPOINT;
2305 if (symbol_conf.raw_trace)
2306 return "trace_fields";
2307 }
2308 out_no_evlist:
2309 return default_sort_orders[sort__mode];
2310 }
2311
2312 static int setup_sort_order(struct perf_evlist *evlist)
2313 {
2314 char *new_sort_order;
2315
2316 /*
2317 * Append '+'-prefixed sort order to the default sort
2318 * order string.
2319 */
2320 if (!sort_order || is_strict_order(sort_order))
2321 return 0;
2322
2323 if (sort_order[1] == '\0') {
2324 error("Invalid --sort key: `+'");
2325 return -EINVAL;
2326 }
2327
2328 /*
2329 * We allocate new sort_order string, but we never free it,
2330 * because it's checked over the rest of the code.
2331 */
2332 if (asprintf(&new_sort_order, "%s,%s",
2333 get_default_sort_order(evlist), sort_order + 1) < 0) {
2334 error("Not enough memory to set up --sort");
2335 return -ENOMEM;
2336 }
2337
2338 sort_order = new_sort_order;
2339 return 0;
2340 }
2341
2342 /*
2343 * Adds 'pre,' prefix into 'str' is 'pre' is
2344 * not already part of 'str'.
2345 */
2346 static char *prefix_if_not_in(const char *pre, char *str)
2347 {
2348 char *n;
2349
2350 if (!str || strstr(str, pre))
2351 return str;
2352
2353 if (asprintf(&n, "%s,%s", pre, str) < 0)
2354 return NULL;
2355
2356 free(str);
2357 return n;
2358 }
2359
2360 static char *setup_overhead(char *keys)
2361 {
2362 keys = prefix_if_not_in("overhead", keys);
2363
2364 if (symbol_conf.cumulate_callchain)
2365 keys = prefix_if_not_in("overhead_children", keys);
2366
2367 return keys;
2368 }
2369
2370 static int __setup_sorting(struct perf_evlist *evlist)
2371 {
2372 char *str;
2373 const char *sort_keys;
2374 int ret = 0;
2375
2376 ret = setup_sort_order(evlist);
2377 if (ret)
2378 return ret;
2379
2380 sort_keys = sort_order;
2381 if (sort_keys == NULL) {
2382 if (is_strict_order(field_order)) {
2383 /*
2384 * If user specified field order but no sort order,
2385 * we'll honor it and not add default sort orders.
2386 */
2387 return 0;
2388 }
2389
2390 sort_keys = get_default_sort_order(evlist);
2391 }
2392
2393 str = strdup(sort_keys);
2394 if (str == NULL) {
2395 error("Not enough memory to setup sort keys");
2396 return -ENOMEM;
2397 }
2398
2399 /*
2400 * Prepend overhead fields for backward compatibility.
2401 */
2402 if (!is_strict_order(field_order)) {
2403 str = setup_overhead(str);
2404 if (str == NULL) {
2405 error("Not enough memory to setup overhead keys");
2406 return -ENOMEM;
2407 }
2408 }
2409
2410 ret = setup_sort_list(str, evlist);
2411
2412 free(str);
2413 return ret;
2414 }
2415
2416 void perf_hpp__set_elide(int idx, bool elide)
2417 {
2418 struct perf_hpp_fmt *fmt;
2419 struct hpp_sort_entry *hse;
2420
2421 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2422 if (!perf_hpp__is_sort_entry(fmt))
2423 continue;
2424
2425 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2426 if (hse->se->se_width_idx == idx) {
2427 fmt->elide = elide;
2428 break;
2429 }
2430 }
2431 }
2432
2433 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2434 {
2435 if (list && strlist__nr_entries(list) == 1) {
2436 if (fp != NULL)
2437 fprintf(fp, "# %s: %s\n", list_name,
2438 strlist__entry(list, 0)->s);
2439 return true;
2440 }
2441 return false;
2442 }
2443
2444 static bool get_elide(int idx, FILE *output)
2445 {
2446 switch (idx) {
2447 case HISTC_SYMBOL:
2448 return __get_elide(symbol_conf.sym_list, "symbol", output);
2449 case HISTC_DSO:
2450 return __get_elide(symbol_conf.dso_list, "dso", output);
2451 case HISTC_COMM:
2452 return __get_elide(symbol_conf.comm_list, "comm", output);
2453 default:
2454 break;
2455 }
2456
2457 if (sort__mode != SORT_MODE__BRANCH)
2458 return false;
2459
2460 switch (idx) {
2461 case HISTC_SYMBOL_FROM:
2462 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2463 case HISTC_SYMBOL_TO:
2464 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2465 case HISTC_DSO_FROM:
2466 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2467 case HISTC_DSO_TO:
2468 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2469 default:
2470 break;
2471 }
2472
2473 return false;
2474 }
2475
2476 void sort__setup_elide(FILE *output)
2477 {
2478 struct perf_hpp_fmt *fmt;
2479 struct hpp_sort_entry *hse;
2480
2481 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2482 if (!perf_hpp__is_sort_entry(fmt))
2483 continue;
2484
2485 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2486 fmt->elide = get_elide(hse->se->se_width_idx, output);
2487 }
2488
2489 /*
2490 * It makes no sense to elide all of sort entries.
2491 * Just revert them to show up again.
2492 */
2493 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2494 if (!perf_hpp__is_sort_entry(fmt))
2495 continue;
2496
2497 if (!fmt->elide)
2498 return;
2499 }
2500
2501 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2502 if (!perf_hpp__is_sort_entry(fmt))
2503 continue;
2504
2505 fmt->elide = false;
2506 }
2507 }
2508
2509 static int output_field_add(struct perf_hpp_list *list, char *tok)
2510 {
2511 unsigned int i;
2512
2513 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2514 struct sort_dimension *sd = &common_sort_dimensions[i];
2515
2516 if (strncasecmp(tok, sd->name, strlen(tok)))
2517 continue;
2518
2519 return __sort_dimension__add_output(list, sd);
2520 }
2521
2522 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2523 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2524
2525 if (strncasecmp(tok, hd->name, strlen(tok)))
2526 continue;
2527
2528 return __hpp_dimension__add_output(list, hd);
2529 }
2530
2531 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2532 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2533
2534 if (strncasecmp(tok, sd->name, strlen(tok)))
2535 continue;
2536
2537 return __sort_dimension__add_output(list, sd);
2538 }
2539
2540 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2541 struct sort_dimension *sd = &memory_sort_dimensions[i];
2542
2543 if (strncasecmp(tok, sd->name, strlen(tok)))
2544 continue;
2545
2546 return __sort_dimension__add_output(list, sd);
2547 }
2548
2549 return -ESRCH;
2550 }
2551
2552 static int setup_output_list(struct perf_hpp_list *list, char *str)
2553 {
2554 char *tmp, *tok;
2555 int ret = 0;
2556
2557 for (tok = strtok_r(str, ", ", &tmp);
2558 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2559 ret = output_field_add(list, tok);
2560 if (ret == -EINVAL) {
2561 error("Invalid --fields key: `%s'", tok);
2562 break;
2563 } else if (ret == -ESRCH) {
2564 error("Unknown --fields key: `%s'", tok);
2565 break;
2566 }
2567 }
2568
2569 return ret;
2570 }
2571
2572 static void reset_dimensions(void)
2573 {
2574 unsigned int i;
2575
2576 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2577 common_sort_dimensions[i].taken = 0;
2578
2579 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2580 hpp_sort_dimensions[i].taken = 0;
2581
2582 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2583 bstack_sort_dimensions[i].taken = 0;
2584
2585 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2586 memory_sort_dimensions[i].taken = 0;
2587 }
2588
2589 bool is_strict_order(const char *order)
2590 {
2591 return order && (*order != '+');
2592 }
2593
2594 static int __setup_output_field(void)
2595 {
2596 char *str, *strp;
2597 int ret = -EINVAL;
2598
2599 if (field_order == NULL)
2600 return 0;
2601
2602 strp = str = strdup(field_order);
2603 if (str == NULL) {
2604 error("Not enough memory to setup output fields");
2605 return -ENOMEM;
2606 }
2607
2608 if (!is_strict_order(field_order))
2609 strp++;
2610
2611 if (!strlen(strp)) {
2612 error("Invalid --fields key: `+'");
2613 goto out;
2614 }
2615
2616 ret = setup_output_list(&perf_hpp_list, strp);
2617
2618 out:
2619 free(str);
2620 return ret;
2621 }
2622
2623 int setup_sorting(struct perf_evlist *evlist)
2624 {
2625 int err;
2626
2627 err = __setup_sorting(evlist);
2628 if (err < 0)
2629 return err;
2630
2631 if (parent_pattern != default_parent_pattern) {
2632 err = sort_dimension__add("parent", evlist);
2633 if (err < 0)
2634 return err;
2635 }
2636
2637 reset_dimensions();
2638
2639 /*
2640 * perf diff doesn't use default hpp output fields.
2641 */
2642 if (sort__mode != SORT_MODE__DIFF)
2643 perf_hpp__init();
2644
2645 err = __setup_output_field();
2646 if (err < 0)
2647 return err;
2648
2649 /* copy sort keys to output fields */
2650 perf_hpp__setup_output_field(&perf_hpp_list);
2651 /* and then copy output fields to sort keys */
2652 perf_hpp__append_sort_keys(&perf_hpp_list);
2653
2654 return 0;
2655 }
2656
2657 void reset_output_field(void)
2658 {
2659 sort__need_collapse = 0;
2660 sort__has_parent = 0;
2661 sort__has_sym = 0;
2662 sort__has_dso = 0;
2663
2664 field_order = NULL;
2665 sort_order = NULL;
2666
2667 reset_dimensions();
2668 perf_hpp__reset_output_field(&perf_hpp_list);
2669 }
This page took 0.090021 seconds and 5 git commands to generate.