3e755f2bfe8f19ad0e45c9bb6c080b573eb00a38
[deliverable/linux.git] / tools / perf / util / header.c
1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
26
27 static bool no_buildid_cache = false;
28
29 static u32 header_argc;
30 static const char **header_argv;
31
32 /*
33 * magic2 = "PERFILE2"
34 * must be a numerical value to let the endianness
35 * determine the memory layout. That way we are able
36 * to detect endianness when reading the perf.data file
37 * back.
38 *
39 * we check for legacy (PERFFILE) format.
40 */
41 static const char *__perf_magic1 = "PERFFILE";
42 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
43 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
44
45 #define PERF_MAGIC __perf_magic2
46
47 struct perf_file_attr {
48 struct perf_event_attr attr;
49 struct perf_file_section ids;
50 };
51
52 void perf_header__set_feat(struct perf_header *header, int feat)
53 {
54 set_bit(feat, header->adds_features);
55 }
56
57 void perf_header__clear_feat(struct perf_header *header, int feat)
58 {
59 clear_bit(feat, header->adds_features);
60 }
61
62 bool perf_header__has_feat(const struct perf_header *header, int feat)
63 {
64 return test_bit(feat, header->adds_features);
65 }
66
67 static int do_write(int fd, const void *buf, size_t size)
68 {
69 while (size) {
70 int ret = write(fd, buf, size);
71
72 if (ret < 0)
73 return -errno;
74
75 size -= ret;
76 buf += ret;
77 }
78
79 return 0;
80 }
81
82 #define NAME_ALIGN 64
83
84 static int write_padded(int fd, const void *bf, size_t count,
85 size_t count_aligned)
86 {
87 static const char zero_buf[NAME_ALIGN];
88 int err = do_write(fd, bf, count);
89
90 if (!err)
91 err = do_write(fd, zero_buf, count_aligned - count);
92
93 return err;
94 }
95
96 static int do_write_string(int fd, const char *str)
97 {
98 u32 len, olen;
99 int ret;
100
101 olen = strlen(str) + 1;
102 len = PERF_ALIGN(olen, NAME_ALIGN);
103
104 /* write len, incl. \0 */
105 ret = do_write(fd, &len, sizeof(len));
106 if (ret < 0)
107 return ret;
108
109 return write_padded(fd, str, olen, len);
110 }
111
112 static char *do_read_string(int fd, struct perf_header *ph)
113 {
114 ssize_t sz, ret;
115 u32 len;
116 char *buf;
117
118 sz = readn(fd, &len, sizeof(len));
119 if (sz < (ssize_t)sizeof(len))
120 return NULL;
121
122 if (ph->needs_swap)
123 len = bswap_32(len);
124
125 buf = malloc(len);
126 if (!buf)
127 return NULL;
128
129 ret = readn(fd, buf, len);
130 if (ret == (ssize_t)len) {
131 /*
132 * strings are padded by zeroes
133 * thus the actual strlen of buf
134 * may be less than len
135 */
136 return buf;
137 }
138
139 free(buf);
140 return NULL;
141 }
142
143 int
144 perf_header__set_cmdline(int argc, const char **argv)
145 {
146 int i;
147
148 /*
149 * If header_argv has already been set, do not override it.
150 * This allows a command to set the cmdline, parse args and
151 * then call another builtin function that implements a
152 * command -- e.g, cmd_kvm calling cmd_record.
153 */
154 if (header_argv)
155 return 0;
156
157 header_argc = (u32)argc;
158
159 /* do not include NULL termination */
160 header_argv = calloc(argc, sizeof(char *));
161 if (!header_argv)
162 return -ENOMEM;
163
164 /*
165 * must copy argv contents because it gets moved
166 * around during option parsing
167 */
168 for (i = 0; i < argc ; i++)
169 header_argv[i] = argv[i];
170
171 return 0;
172 }
173
174 #define dsos__for_each_with_build_id(pos, head) \
175 list_for_each_entry(pos, head, node) \
176 if (!pos->has_build_id) \
177 continue; \
178 else
179
180 static int write_buildid(char *name, size_t name_len, u8 *build_id,
181 pid_t pid, u16 misc, int fd)
182 {
183 int err;
184 struct build_id_event b;
185 size_t len;
186
187 len = name_len + 1;
188 len = PERF_ALIGN(len, NAME_ALIGN);
189
190 memset(&b, 0, sizeof(b));
191 memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
192 b.pid = pid;
193 b.header.misc = misc;
194 b.header.size = sizeof(b) + len;
195
196 err = do_write(fd, &b, sizeof(b));
197 if (err < 0)
198 return err;
199
200 return write_padded(fd, name, name_len + 1, len);
201 }
202
203 static int __dsos__write_buildid_table(struct list_head *head,
204 struct machine *machine,
205 pid_t pid, u16 misc, int fd)
206 {
207 char nm[PATH_MAX];
208 struct dso *pos;
209
210 dsos__for_each_with_build_id(pos, head) {
211 int err;
212 char *name;
213 size_t name_len;
214
215 if (!pos->hit)
216 continue;
217
218 if (is_vdso_map(pos->short_name)) {
219 name = (char *) VDSO__MAP_NAME;
220 name_len = sizeof(VDSO__MAP_NAME) + 1;
221 } else if (dso__is_kcore(pos)) {
222 machine__mmap_name(machine, nm, sizeof(nm));
223 name = nm;
224 name_len = strlen(nm) + 1;
225 } else {
226 name = pos->long_name;
227 name_len = pos->long_name_len + 1;
228 }
229
230 err = write_buildid(name, name_len, pos->build_id,
231 pid, misc, fd);
232 if (err)
233 return err;
234 }
235
236 return 0;
237 }
238
239 static int machine__write_buildid_table(struct machine *machine, int fd)
240 {
241 int err;
242 u16 kmisc = PERF_RECORD_MISC_KERNEL,
243 umisc = PERF_RECORD_MISC_USER;
244
245 if (!machine__is_host(machine)) {
246 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
247 umisc = PERF_RECORD_MISC_GUEST_USER;
248 }
249
250 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
251 machine->pid, kmisc, fd);
252 if (err == 0)
253 err = __dsos__write_buildid_table(&machine->user_dsos, machine,
254 machine->pid, umisc, fd);
255 return err;
256 }
257
258 static int dsos__write_buildid_table(struct perf_header *header, int fd)
259 {
260 struct perf_session *session = container_of(header,
261 struct perf_session, header);
262 struct rb_node *nd;
263 int err = machine__write_buildid_table(&session->machines.host, fd);
264
265 if (err)
266 return err;
267
268 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
269 struct machine *pos = rb_entry(nd, struct machine, rb_node);
270 err = machine__write_buildid_table(pos, fd);
271 if (err)
272 break;
273 }
274 return err;
275 }
276
277 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
278 const char *name, bool is_kallsyms, bool is_vdso)
279 {
280 const size_t size = PATH_MAX;
281 char *realname, *filename = zalloc(size),
282 *linkname = zalloc(size), *targetname;
283 int len, err = -1;
284 bool slash = is_kallsyms || is_vdso;
285
286 if (is_kallsyms) {
287 if (symbol_conf.kptr_restrict) {
288 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
289 err = 0;
290 goto out_free;
291 }
292 realname = (char *) name;
293 } else
294 realname = realpath(name, NULL);
295
296 if (realname == NULL || filename == NULL || linkname == NULL)
297 goto out_free;
298
299 len = scnprintf(filename, size, "%s%s%s",
300 debugdir, slash ? "/" : "",
301 is_vdso ? VDSO__MAP_NAME : realname);
302 if (mkdir_p(filename, 0755))
303 goto out_free;
304
305 snprintf(filename + len, size - len, "/%s", sbuild_id);
306
307 if (access(filename, F_OK)) {
308 if (is_kallsyms) {
309 if (copyfile("/proc/kallsyms", filename))
310 goto out_free;
311 } else if (link(realname, filename) && copyfile(name, filename))
312 goto out_free;
313 }
314
315 len = scnprintf(linkname, size, "%s/.build-id/%.2s",
316 debugdir, sbuild_id);
317
318 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
319 goto out_free;
320
321 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
322 targetname = filename + strlen(debugdir) - 5;
323 memcpy(targetname, "../..", 5);
324
325 if (symlink(targetname, linkname) == 0)
326 err = 0;
327 out_free:
328 if (!is_kallsyms)
329 free(realname);
330 free(filename);
331 free(linkname);
332 return err;
333 }
334
335 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
336 const char *name, const char *debugdir,
337 bool is_kallsyms, bool is_vdso)
338 {
339 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
340
341 build_id__sprintf(build_id, build_id_size, sbuild_id);
342
343 return build_id_cache__add_s(sbuild_id, debugdir, name,
344 is_kallsyms, is_vdso);
345 }
346
347 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
348 {
349 const size_t size = PATH_MAX;
350 char *filename = zalloc(size),
351 *linkname = zalloc(size);
352 int err = -1;
353
354 if (filename == NULL || linkname == NULL)
355 goto out_free;
356
357 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
358 debugdir, sbuild_id, sbuild_id + 2);
359
360 if (access(linkname, F_OK))
361 goto out_free;
362
363 if (readlink(linkname, filename, size - 1) < 0)
364 goto out_free;
365
366 if (unlink(linkname))
367 goto out_free;
368
369 /*
370 * Since the link is relative, we must make it absolute:
371 */
372 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
373 debugdir, sbuild_id, filename);
374
375 if (unlink(linkname))
376 goto out_free;
377
378 err = 0;
379 out_free:
380 free(filename);
381 free(linkname);
382 return err;
383 }
384
385 static int dso__cache_build_id(struct dso *dso, struct machine *machine,
386 const char *debugdir)
387 {
388 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
389 bool is_vdso = is_vdso_map(dso->short_name);
390 char *name = dso->long_name;
391 char nm[PATH_MAX];
392
393 if (dso__is_kcore(dso)) {
394 is_kallsyms = true;
395 machine__mmap_name(machine, nm, sizeof(nm));
396 name = nm;
397 }
398 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
399 debugdir, is_kallsyms, is_vdso);
400 }
401
402 static int __dsos__cache_build_ids(struct list_head *head,
403 struct machine *machine, const char *debugdir)
404 {
405 struct dso *pos;
406 int err = 0;
407
408 dsos__for_each_with_build_id(pos, head)
409 if (dso__cache_build_id(pos, machine, debugdir))
410 err = -1;
411
412 return err;
413 }
414
415 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
416 {
417 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
418 debugdir);
419 ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
420 return ret;
421 }
422
423 static int perf_session__cache_build_ids(struct perf_session *session)
424 {
425 struct rb_node *nd;
426 int ret;
427 char debugdir[PATH_MAX];
428
429 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
430
431 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
432 return -1;
433
434 ret = machine__cache_build_ids(&session->machines.host, debugdir);
435
436 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
437 struct machine *pos = rb_entry(nd, struct machine, rb_node);
438 ret |= machine__cache_build_ids(pos, debugdir);
439 }
440 return ret ? -1 : 0;
441 }
442
443 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
444 {
445 bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
446 ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
447 return ret;
448 }
449
450 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
451 {
452 struct rb_node *nd;
453 bool ret = machine__read_build_ids(&session->machines.host, with_hits);
454
455 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
456 struct machine *pos = rb_entry(nd, struct machine, rb_node);
457 ret |= machine__read_build_ids(pos, with_hits);
458 }
459
460 return ret;
461 }
462
463 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
464 struct perf_evlist *evlist)
465 {
466 return read_tracing_data(fd, &evlist->entries);
467 }
468
469
470 static int write_build_id(int fd, struct perf_header *h,
471 struct perf_evlist *evlist __maybe_unused)
472 {
473 struct perf_session *session;
474 int err;
475
476 session = container_of(h, struct perf_session, header);
477
478 if (!perf_session__read_build_ids(session, true))
479 return -1;
480
481 err = dsos__write_buildid_table(h, fd);
482 if (err < 0) {
483 pr_debug("failed to write buildid table\n");
484 return err;
485 }
486 if (!no_buildid_cache)
487 perf_session__cache_build_ids(session);
488
489 return 0;
490 }
491
492 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
493 struct perf_evlist *evlist __maybe_unused)
494 {
495 struct utsname uts;
496 int ret;
497
498 ret = uname(&uts);
499 if (ret < 0)
500 return -1;
501
502 return do_write_string(fd, uts.nodename);
503 }
504
505 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
506 struct perf_evlist *evlist __maybe_unused)
507 {
508 struct utsname uts;
509 int ret;
510
511 ret = uname(&uts);
512 if (ret < 0)
513 return -1;
514
515 return do_write_string(fd, uts.release);
516 }
517
518 static int write_arch(int fd, struct perf_header *h __maybe_unused,
519 struct perf_evlist *evlist __maybe_unused)
520 {
521 struct utsname uts;
522 int ret;
523
524 ret = uname(&uts);
525 if (ret < 0)
526 return -1;
527
528 return do_write_string(fd, uts.machine);
529 }
530
531 static int write_version(int fd, struct perf_header *h __maybe_unused,
532 struct perf_evlist *evlist __maybe_unused)
533 {
534 return do_write_string(fd, perf_version_string);
535 }
536
537 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
538 struct perf_evlist *evlist __maybe_unused)
539 {
540 #ifndef CPUINFO_PROC
541 #define CPUINFO_PROC NULL
542 #endif
543 FILE *file;
544 char *buf = NULL;
545 char *s, *p;
546 const char *search = CPUINFO_PROC;
547 size_t len = 0;
548 int ret = -1;
549
550 if (!search)
551 return -1;
552
553 file = fopen("/proc/cpuinfo", "r");
554 if (!file)
555 return -1;
556
557 while (getline(&buf, &len, file) > 0) {
558 ret = strncmp(buf, search, strlen(search));
559 if (!ret)
560 break;
561 }
562
563 if (ret)
564 goto done;
565
566 s = buf;
567
568 p = strchr(buf, ':');
569 if (p && *(p+1) == ' ' && *(p+2))
570 s = p + 2;
571 p = strchr(s, '\n');
572 if (p)
573 *p = '\0';
574
575 /* squash extra space characters (branding string) */
576 p = s;
577 while (*p) {
578 if (isspace(*p)) {
579 char *r = p + 1;
580 char *q = r;
581 *p = ' ';
582 while (*q && isspace(*q))
583 q++;
584 if (q != (p+1))
585 while ((*r++ = *q++));
586 }
587 p++;
588 }
589 ret = do_write_string(fd, s);
590 done:
591 free(buf);
592 fclose(file);
593 return ret;
594 }
595
596 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
597 struct perf_evlist *evlist __maybe_unused)
598 {
599 long nr;
600 u32 nrc, nra;
601 int ret;
602
603 nr = sysconf(_SC_NPROCESSORS_CONF);
604 if (nr < 0)
605 return -1;
606
607 nrc = (u32)(nr & UINT_MAX);
608
609 nr = sysconf(_SC_NPROCESSORS_ONLN);
610 if (nr < 0)
611 return -1;
612
613 nra = (u32)(nr & UINT_MAX);
614
615 ret = do_write(fd, &nrc, sizeof(nrc));
616 if (ret < 0)
617 return ret;
618
619 return do_write(fd, &nra, sizeof(nra));
620 }
621
622 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
623 struct perf_evlist *evlist)
624 {
625 struct perf_evsel *evsel;
626 u32 nre, nri, sz;
627 int ret;
628
629 nre = evlist->nr_entries;
630
631 /*
632 * write number of events
633 */
634 ret = do_write(fd, &nre, sizeof(nre));
635 if (ret < 0)
636 return ret;
637
638 /*
639 * size of perf_event_attr struct
640 */
641 sz = (u32)sizeof(evsel->attr);
642 ret = do_write(fd, &sz, sizeof(sz));
643 if (ret < 0)
644 return ret;
645
646 list_for_each_entry(evsel, &evlist->entries, node) {
647
648 ret = do_write(fd, &evsel->attr, sz);
649 if (ret < 0)
650 return ret;
651 /*
652 * write number of unique id per event
653 * there is one id per instance of an event
654 *
655 * copy into an nri to be independent of the
656 * type of ids,
657 */
658 nri = evsel->ids;
659 ret = do_write(fd, &nri, sizeof(nri));
660 if (ret < 0)
661 return ret;
662
663 /*
664 * write event string as passed on cmdline
665 */
666 ret = do_write_string(fd, perf_evsel__name(evsel));
667 if (ret < 0)
668 return ret;
669 /*
670 * write unique ids for this event
671 */
672 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
673 if (ret < 0)
674 return ret;
675 }
676 return 0;
677 }
678
679 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
680 struct perf_evlist *evlist __maybe_unused)
681 {
682 char buf[MAXPATHLEN];
683 char proc[32];
684 u32 i, n;
685 int ret;
686
687 /*
688 * actual atual path to perf binary
689 */
690 sprintf(proc, "/proc/%d/exe", getpid());
691 ret = readlink(proc, buf, sizeof(buf));
692 if (ret <= 0)
693 return -1;
694
695 /* readlink() does not add null termination */
696 buf[ret] = '\0';
697
698 /* account for binary path */
699 n = header_argc + 1;
700
701 ret = do_write(fd, &n, sizeof(n));
702 if (ret < 0)
703 return ret;
704
705 ret = do_write_string(fd, buf);
706 if (ret < 0)
707 return ret;
708
709 for (i = 0 ; i < header_argc; i++) {
710 ret = do_write_string(fd, header_argv[i]);
711 if (ret < 0)
712 return ret;
713 }
714 return 0;
715 }
716
717 #define CORE_SIB_FMT \
718 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
719 #define THRD_SIB_FMT \
720 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
721
722 struct cpu_topo {
723 u32 core_sib;
724 u32 thread_sib;
725 char **core_siblings;
726 char **thread_siblings;
727 };
728
729 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
730 {
731 FILE *fp;
732 char filename[MAXPATHLEN];
733 char *buf = NULL, *p;
734 size_t len = 0;
735 ssize_t sret;
736 u32 i = 0;
737 int ret = -1;
738
739 sprintf(filename, CORE_SIB_FMT, cpu);
740 fp = fopen(filename, "r");
741 if (!fp)
742 goto try_threads;
743
744 sret = getline(&buf, &len, fp);
745 fclose(fp);
746 if (sret <= 0)
747 goto try_threads;
748
749 p = strchr(buf, '\n');
750 if (p)
751 *p = '\0';
752
753 for (i = 0; i < tp->core_sib; i++) {
754 if (!strcmp(buf, tp->core_siblings[i]))
755 break;
756 }
757 if (i == tp->core_sib) {
758 tp->core_siblings[i] = buf;
759 tp->core_sib++;
760 buf = NULL;
761 len = 0;
762 }
763 ret = 0;
764
765 try_threads:
766 sprintf(filename, THRD_SIB_FMT, cpu);
767 fp = fopen(filename, "r");
768 if (!fp)
769 goto done;
770
771 if (getline(&buf, &len, fp) <= 0)
772 goto done;
773
774 p = strchr(buf, '\n');
775 if (p)
776 *p = '\0';
777
778 for (i = 0; i < tp->thread_sib; i++) {
779 if (!strcmp(buf, tp->thread_siblings[i]))
780 break;
781 }
782 if (i == tp->thread_sib) {
783 tp->thread_siblings[i] = buf;
784 tp->thread_sib++;
785 buf = NULL;
786 }
787 ret = 0;
788 done:
789 if(fp)
790 fclose(fp);
791 free(buf);
792 return ret;
793 }
794
795 static void free_cpu_topo(struct cpu_topo *tp)
796 {
797 u32 i;
798
799 if (!tp)
800 return;
801
802 for (i = 0 ; i < tp->core_sib; i++)
803 free(tp->core_siblings[i]);
804
805 for (i = 0 ; i < tp->thread_sib; i++)
806 free(tp->thread_siblings[i]);
807
808 free(tp);
809 }
810
811 static struct cpu_topo *build_cpu_topology(void)
812 {
813 struct cpu_topo *tp;
814 void *addr;
815 u32 nr, i;
816 size_t sz;
817 long ncpus;
818 int ret = -1;
819
820 ncpus = sysconf(_SC_NPROCESSORS_CONF);
821 if (ncpus < 0)
822 return NULL;
823
824 nr = (u32)(ncpus & UINT_MAX);
825
826 sz = nr * sizeof(char *);
827
828 addr = calloc(1, sizeof(*tp) + 2 * sz);
829 if (!addr)
830 return NULL;
831
832 tp = addr;
833
834 addr += sizeof(*tp);
835 tp->core_siblings = addr;
836 addr += sz;
837 tp->thread_siblings = addr;
838
839 for (i = 0; i < nr; i++) {
840 ret = build_cpu_topo(tp, i);
841 if (ret < 0)
842 break;
843 }
844 if (ret) {
845 free_cpu_topo(tp);
846 tp = NULL;
847 }
848 return tp;
849 }
850
851 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
852 struct perf_evlist *evlist __maybe_unused)
853 {
854 struct cpu_topo *tp;
855 u32 i;
856 int ret;
857
858 tp = build_cpu_topology();
859 if (!tp)
860 return -1;
861
862 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
863 if (ret < 0)
864 goto done;
865
866 for (i = 0; i < tp->core_sib; i++) {
867 ret = do_write_string(fd, tp->core_siblings[i]);
868 if (ret < 0)
869 goto done;
870 }
871 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
872 if (ret < 0)
873 goto done;
874
875 for (i = 0; i < tp->thread_sib; i++) {
876 ret = do_write_string(fd, tp->thread_siblings[i]);
877 if (ret < 0)
878 break;
879 }
880 done:
881 free_cpu_topo(tp);
882 return ret;
883 }
884
885
886
887 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
888 struct perf_evlist *evlist __maybe_unused)
889 {
890 char *buf = NULL;
891 FILE *fp;
892 size_t len = 0;
893 int ret = -1, n;
894 uint64_t mem;
895
896 fp = fopen("/proc/meminfo", "r");
897 if (!fp)
898 return -1;
899
900 while (getline(&buf, &len, fp) > 0) {
901 ret = strncmp(buf, "MemTotal:", 9);
902 if (!ret)
903 break;
904 }
905 if (!ret) {
906 n = sscanf(buf, "%*s %"PRIu64, &mem);
907 if (n == 1)
908 ret = do_write(fd, &mem, sizeof(mem));
909 }
910 free(buf);
911 fclose(fp);
912 return ret;
913 }
914
915 static int write_topo_node(int fd, int node)
916 {
917 char str[MAXPATHLEN];
918 char field[32];
919 char *buf = NULL, *p;
920 size_t len = 0;
921 FILE *fp;
922 u64 mem_total, mem_free, mem;
923 int ret = -1;
924
925 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
926 fp = fopen(str, "r");
927 if (!fp)
928 return -1;
929
930 while (getline(&buf, &len, fp) > 0) {
931 /* skip over invalid lines */
932 if (!strchr(buf, ':'))
933 continue;
934 if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
935 goto done;
936 if (!strcmp(field, "MemTotal:"))
937 mem_total = mem;
938 if (!strcmp(field, "MemFree:"))
939 mem_free = mem;
940 }
941
942 fclose(fp);
943 fp = NULL;
944
945 ret = do_write(fd, &mem_total, sizeof(u64));
946 if (ret)
947 goto done;
948
949 ret = do_write(fd, &mem_free, sizeof(u64));
950 if (ret)
951 goto done;
952
953 ret = -1;
954 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
955
956 fp = fopen(str, "r");
957 if (!fp)
958 goto done;
959
960 if (getline(&buf, &len, fp) <= 0)
961 goto done;
962
963 p = strchr(buf, '\n');
964 if (p)
965 *p = '\0';
966
967 ret = do_write_string(fd, buf);
968 done:
969 free(buf);
970 if (fp)
971 fclose(fp);
972 return ret;
973 }
974
975 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
976 struct perf_evlist *evlist __maybe_unused)
977 {
978 char *buf = NULL;
979 size_t len = 0;
980 FILE *fp;
981 struct cpu_map *node_map = NULL;
982 char *c;
983 u32 nr, i, j;
984 int ret = -1;
985
986 fp = fopen("/sys/devices/system/node/online", "r");
987 if (!fp)
988 return -1;
989
990 if (getline(&buf, &len, fp) <= 0)
991 goto done;
992
993 c = strchr(buf, '\n');
994 if (c)
995 *c = '\0';
996
997 node_map = cpu_map__new(buf);
998 if (!node_map)
999 goto done;
1000
1001 nr = (u32)node_map->nr;
1002
1003 ret = do_write(fd, &nr, sizeof(nr));
1004 if (ret < 0)
1005 goto done;
1006
1007 for (i = 0; i < nr; i++) {
1008 j = (u32)node_map->map[i];
1009 ret = do_write(fd, &j, sizeof(j));
1010 if (ret < 0)
1011 break;
1012
1013 ret = write_topo_node(fd, i);
1014 if (ret < 0)
1015 break;
1016 }
1017 done:
1018 free(buf);
1019 fclose(fp);
1020 free(node_map);
1021 return ret;
1022 }
1023
1024 /*
1025 * File format:
1026 *
1027 * struct pmu_mappings {
1028 * u32 pmu_num;
1029 * struct pmu_map {
1030 * u32 type;
1031 * char name[];
1032 * }[pmu_num];
1033 * };
1034 */
1035
1036 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
1037 struct perf_evlist *evlist __maybe_unused)
1038 {
1039 struct perf_pmu *pmu = NULL;
1040 off_t offset = lseek(fd, 0, SEEK_CUR);
1041 __u32 pmu_num = 0;
1042 int ret;
1043
1044 /* write real pmu_num later */
1045 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
1046 if (ret < 0)
1047 return ret;
1048
1049 while ((pmu = perf_pmu__scan(pmu))) {
1050 if (!pmu->name)
1051 continue;
1052 pmu_num++;
1053
1054 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
1055 if (ret < 0)
1056 return ret;
1057
1058 ret = do_write_string(fd, pmu->name);
1059 if (ret < 0)
1060 return ret;
1061 }
1062
1063 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
1064 /* discard all */
1065 lseek(fd, offset, SEEK_SET);
1066 return -1;
1067 }
1068
1069 return 0;
1070 }
1071
1072 /*
1073 * File format:
1074 *
1075 * struct group_descs {
1076 * u32 nr_groups;
1077 * struct group_desc {
1078 * char name[];
1079 * u32 leader_idx;
1080 * u32 nr_members;
1081 * }[nr_groups];
1082 * };
1083 */
1084 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
1085 struct perf_evlist *evlist)
1086 {
1087 u32 nr_groups = evlist->nr_groups;
1088 struct perf_evsel *evsel;
1089 int ret;
1090
1091 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
1092 if (ret < 0)
1093 return ret;
1094
1095 list_for_each_entry(evsel, &evlist->entries, node) {
1096 if (perf_evsel__is_group_leader(evsel) &&
1097 evsel->nr_members > 1) {
1098 const char *name = evsel->group_name ?: "{anon_group}";
1099 u32 leader_idx = evsel->idx;
1100 u32 nr_members = evsel->nr_members;
1101
1102 ret = do_write_string(fd, name);
1103 if (ret < 0)
1104 return ret;
1105
1106 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
1107 if (ret < 0)
1108 return ret;
1109
1110 ret = do_write(fd, &nr_members, sizeof(nr_members));
1111 if (ret < 0)
1112 return ret;
1113 }
1114 }
1115 return 0;
1116 }
1117
1118 /*
1119 * default get_cpuid(): nothing gets recorded
1120 * actual implementation must be in arch/$(ARCH)/util/header.c
1121 */
1122 int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
1123 size_t sz __maybe_unused)
1124 {
1125 return -1;
1126 }
1127
1128 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
1129 struct perf_evlist *evlist __maybe_unused)
1130 {
1131 char buffer[64];
1132 int ret;
1133
1134 ret = get_cpuid(buffer, sizeof(buffer));
1135 if (!ret)
1136 goto write_it;
1137
1138 return -1;
1139 write_it:
1140 return do_write_string(fd, buffer);
1141 }
1142
1143 static int write_branch_stack(int fd __maybe_unused,
1144 struct perf_header *h __maybe_unused,
1145 struct perf_evlist *evlist __maybe_unused)
1146 {
1147 return 0;
1148 }
1149
1150 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1151 FILE *fp)
1152 {
1153 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1154 }
1155
1156 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1157 FILE *fp)
1158 {
1159 fprintf(fp, "# os release : %s\n", ph->env.os_release);
1160 }
1161
1162 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1163 {
1164 fprintf(fp, "# arch : %s\n", ph->env.arch);
1165 }
1166
1167 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1168 FILE *fp)
1169 {
1170 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1171 }
1172
1173 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1174 FILE *fp)
1175 {
1176 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1177 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1178 }
1179
1180 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1181 FILE *fp)
1182 {
1183 fprintf(fp, "# perf version : %s\n", ph->env.version);
1184 }
1185
1186 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1187 FILE *fp)
1188 {
1189 int nr, i;
1190 char *str;
1191
1192 nr = ph->env.nr_cmdline;
1193 str = ph->env.cmdline;
1194
1195 fprintf(fp, "# cmdline : ");
1196
1197 for (i = 0; i < nr; i++) {
1198 fprintf(fp, "%s ", str);
1199 str += strlen(str) + 1;
1200 }
1201 fputc('\n', fp);
1202 }
1203
1204 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1205 FILE *fp)
1206 {
1207 int nr, i;
1208 char *str;
1209
1210 nr = ph->env.nr_sibling_cores;
1211 str = ph->env.sibling_cores;
1212
1213 for (i = 0; i < nr; i++) {
1214 fprintf(fp, "# sibling cores : %s\n", str);
1215 str += strlen(str) + 1;
1216 }
1217
1218 nr = ph->env.nr_sibling_threads;
1219 str = ph->env.sibling_threads;
1220
1221 for (i = 0; i < nr; i++) {
1222 fprintf(fp, "# sibling threads : %s\n", str);
1223 str += strlen(str) + 1;
1224 }
1225 }
1226
1227 static void free_event_desc(struct perf_evsel *events)
1228 {
1229 struct perf_evsel *evsel;
1230
1231 if (!events)
1232 return;
1233
1234 for (evsel = events; evsel->attr.size; evsel++) {
1235 if (evsel->name)
1236 free(evsel->name);
1237 if (evsel->id)
1238 free(evsel->id);
1239 }
1240
1241 free(events);
1242 }
1243
1244 static struct perf_evsel *
1245 read_event_desc(struct perf_header *ph, int fd)
1246 {
1247 struct perf_evsel *evsel, *events = NULL;
1248 u64 *id;
1249 void *buf = NULL;
1250 u32 nre, sz, nr, i, j;
1251 ssize_t ret;
1252 size_t msz;
1253
1254 /* number of events */
1255 ret = readn(fd, &nre, sizeof(nre));
1256 if (ret != (ssize_t)sizeof(nre))
1257 goto error;
1258
1259 if (ph->needs_swap)
1260 nre = bswap_32(nre);
1261
1262 ret = readn(fd, &sz, sizeof(sz));
1263 if (ret != (ssize_t)sizeof(sz))
1264 goto error;
1265
1266 if (ph->needs_swap)
1267 sz = bswap_32(sz);
1268
1269 /* buffer to hold on file attr struct */
1270 buf = malloc(sz);
1271 if (!buf)
1272 goto error;
1273
1274 /* the last event terminates with evsel->attr.size == 0: */
1275 events = calloc(nre + 1, sizeof(*events));
1276 if (!events)
1277 goto error;
1278
1279 msz = sizeof(evsel->attr);
1280 if (sz < msz)
1281 msz = sz;
1282
1283 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1284 evsel->idx = i;
1285
1286 /*
1287 * must read entire on-file attr struct to
1288 * sync up with layout.
1289 */
1290 ret = readn(fd, buf, sz);
1291 if (ret != (ssize_t)sz)
1292 goto error;
1293
1294 if (ph->needs_swap)
1295 perf_event__attr_swap(buf);
1296
1297 memcpy(&evsel->attr, buf, msz);
1298
1299 ret = readn(fd, &nr, sizeof(nr));
1300 if (ret != (ssize_t)sizeof(nr))
1301 goto error;
1302
1303 if (ph->needs_swap) {
1304 nr = bswap_32(nr);
1305 evsel->needs_swap = true;
1306 }
1307
1308 evsel->name = do_read_string(fd, ph);
1309
1310 if (!nr)
1311 continue;
1312
1313 id = calloc(nr, sizeof(*id));
1314 if (!id)
1315 goto error;
1316 evsel->ids = nr;
1317 evsel->id = id;
1318
1319 for (j = 0 ; j < nr; j++) {
1320 ret = readn(fd, id, sizeof(*id));
1321 if (ret != (ssize_t)sizeof(*id))
1322 goto error;
1323 if (ph->needs_swap)
1324 *id = bswap_64(*id);
1325 id++;
1326 }
1327 }
1328 out:
1329 if (buf)
1330 free(buf);
1331 return events;
1332 error:
1333 if (events)
1334 free_event_desc(events);
1335 events = NULL;
1336 goto out;
1337 }
1338
1339 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1340 {
1341 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1342 u32 j;
1343 u64 *id;
1344
1345 if (!events) {
1346 fprintf(fp, "# event desc: not available or unable to read\n");
1347 return;
1348 }
1349
1350 for (evsel = events; evsel->attr.size; evsel++) {
1351 fprintf(fp, "# event : name = %s, ", evsel->name);
1352
1353 fprintf(fp, "type = %d, config = 0x%"PRIx64
1354 ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
1355 evsel->attr.type,
1356 (u64)evsel->attr.config,
1357 (u64)evsel->attr.config1,
1358 (u64)evsel->attr.config2);
1359
1360 fprintf(fp, ", excl_usr = %d, excl_kern = %d",
1361 evsel->attr.exclude_user,
1362 evsel->attr.exclude_kernel);
1363
1364 fprintf(fp, ", excl_host = %d, excl_guest = %d",
1365 evsel->attr.exclude_host,
1366 evsel->attr.exclude_guest);
1367
1368 fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
1369
1370 fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2);
1371 fprintf(fp, ", attr_mmap = %d", evsel->attr.mmap);
1372 fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data);
1373 if (evsel->ids) {
1374 fprintf(fp, ", id = {");
1375 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1376 if (j)
1377 fputc(',', fp);
1378 fprintf(fp, " %"PRIu64, *id);
1379 }
1380 fprintf(fp, " }");
1381 }
1382
1383 fputc('\n', fp);
1384 }
1385
1386 free_event_desc(events);
1387 }
1388
1389 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1390 FILE *fp)
1391 {
1392 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1393 }
1394
1395 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1396 FILE *fp)
1397 {
1398 u32 nr, c, i;
1399 char *str, *tmp;
1400 uint64_t mem_total, mem_free;
1401
1402 /* nr nodes */
1403 nr = ph->env.nr_numa_nodes;
1404 str = ph->env.numa_nodes;
1405
1406 for (i = 0; i < nr; i++) {
1407 /* node number */
1408 c = strtoul(str, &tmp, 0);
1409 if (*tmp != ':')
1410 goto error;
1411
1412 str = tmp + 1;
1413 mem_total = strtoull(str, &tmp, 0);
1414 if (*tmp != ':')
1415 goto error;
1416
1417 str = tmp + 1;
1418 mem_free = strtoull(str, &tmp, 0);
1419 if (*tmp != ':')
1420 goto error;
1421
1422 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1423 " free = %"PRIu64" kB\n",
1424 c, mem_total, mem_free);
1425
1426 str = tmp + 1;
1427 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1428
1429 str += strlen(str) + 1;
1430 }
1431 return;
1432 error:
1433 fprintf(fp, "# numa topology : not available\n");
1434 }
1435
1436 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1437 {
1438 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1439 }
1440
1441 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1442 int fd __maybe_unused, FILE *fp)
1443 {
1444 fprintf(fp, "# contains samples with branch stack\n");
1445 }
1446
1447 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1448 FILE *fp)
1449 {
1450 const char *delimiter = "# pmu mappings: ";
1451 char *str, *tmp;
1452 u32 pmu_num;
1453 u32 type;
1454
1455 pmu_num = ph->env.nr_pmu_mappings;
1456 if (!pmu_num) {
1457 fprintf(fp, "# pmu mappings: not available\n");
1458 return;
1459 }
1460
1461 str = ph->env.pmu_mappings;
1462
1463 while (pmu_num) {
1464 type = strtoul(str, &tmp, 0);
1465 if (*tmp != ':')
1466 goto error;
1467
1468 str = tmp + 1;
1469 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1470
1471 delimiter = ", ";
1472 str += strlen(str) + 1;
1473 pmu_num--;
1474 }
1475
1476 fprintf(fp, "\n");
1477
1478 if (!pmu_num)
1479 return;
1480 error:
1481 fprintf(fp, "# pmu mappings: unable to read\n");
1482 }
1483
1484 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1485 FILE *fp)
1486 {
1487 struct perf_session *session;
1488 struct perf_evsel *evsel;
1489 u32 nr = 0;
1490
1491 session = container_of(ph, struct perf_session, header);
1492
1493 list_for_each_entry(evsel, &session->evlist->entries, node) {
1494 if (perf_evsel__is_group_leader(evsel) &&
1495 evsel->nr_members > 1) {
1496 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1497 perf_evsel__name(evsel));
1498
1499 nr = evsel->nr_members - 1;
1500 } else if (nr) {
1501 fprintf(fp, ",%s", perf_evsel__name(evsel));
1502
1503 if (--nr == 0)
1504 fprintf(fp, "}\n");
1505 }
1506 }
1507 }
1508
1509 static int __event_process_build_id(struct build_id_event *bev,
1510 char *filename,
1511 struct perf_session *session)
1512 {
1513 int err = -1;
1514 struct list_head *head;
1515 struct machine *machine;
1516 u16 misc;
1517 struct dso *dso;
1518 enum dso_kernel_type dso_type;
1519
1520 machine = perf_session__findnew_machine(session, bev->pid);
1521 if (!machine)
1522 goto out;
1523
1524 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1525
1526 switch (misc) {
1527 case PERF_RECORD_MISC_KERNEL:
1528 dso_type = DSO_TYPE_KERNEL;
1529 head = &machine->kernel_dsos;
1530 break;
1531 case PERF_RECORD_MISC_GUEST_KERNEL:
1532 dso_type = DSO_TYPE_GUEST_KERNEL;
1533 head = &machine->kernel_dsos;
1534 break;
1535 case PERF_RECORD_MISC_USER:
1536 case PERF_RECORD_MISC_GUEST_USER:
1537 dso_type = DSO_TYPE_USER;
1538 head = &machine->user_dsos;
1539 break;
1540 default:
1541 goto out;
1542 }
1543
1544 dso = __dsos__findnew(head, filename);
1545 if (dso != NULL) {
1546 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1547
1548 dso__set_build_id(dso, &bev->build_id);
1549
1550 if (filename[0] == '[')
1551 dso->kernel = dso_type;
1552
1553 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1554 sbuild_id);
1555 pr_debug("build id event received for %s: %s\n",
1556 dso->long_name, sbuild_id);
1557 }
1558
1559 err = 0;
1560 out:
1561 return err;
1562 }
1563
1564 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1565 int input, u64 offset, u64 size)
1566 {
1567 struct perf_session *session = container_of(header, struct perf_session, header);
1568 struct {
1569 struct perf_event_header header;
1570 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1571 char filename[0];
1572 } old_bev;
1573 struct build_id_event bev;
1574 char filename[PATH_MAX];
1575 u64 limit = offset + size;
1576
1577 while (offset < limit) {
1578 ssize_t len;
1579
1580 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1581 return -1;
1582
1583 if (header->needs_swap)
1584 perf_event_header__bswap(&old_bev.header);
1585
1586 len = old_bev.header.size - sizeof(old_bev);
1587 if (readn(input, filename, len) != len)
1588 return -1;
1589
1590 bev.header = old_bev.header;
1591
1592 /*
1593 * As the pid is the missing value, we need to fill
1594 * it properly. The header.misc value give us nice hint.
1595 */
1596 bev.pid = HOST_KERNEL_ID;
1597 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1598 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1599 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1600
1601 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1602 __event_process_build_id(&bev, filename, session);
1603
1604 offset += bev.header.size;
1605 }
1606
1607 return 0;
1608 }
1609
1610 static int perf_header__read_build_ids(struct perf_header *header,
1611 int input, u64 offset, u64 size)
1612 {
1613 struct perf_session *session = container_of(header, struct perf_session, header);
1614 struct build_id_event bev;
1615 char filename[PATH_MAX];
1616 u64 limit = offset + size, orig_offset = offset;
1617 int err = -1;
1618
1619 while (offset < limit) {
1620 ssize_t len;
1621
1622 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1623 goto out;
1624
1625 if (header->needs_swap)
1626 perf_event_header__bswap(&bev.header);
1627
1628 len = bev.header.size - sizeof(bev);
1629 if (readn(input, filename, len) != len)
1630 goto out;
1631 /*
1632 * The a1645ce1 changeset:
1633 *
1634 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1635 *
1636 * Added a field to struct build_id_event that broke the file
1637 * format.
1638 *
1639 * Since the kernel build-id is the first entry, process the
1640 * table using the old format if the well known
1641 * '[kernel.kallsyms]' string for the kernel build-id has the
1642 * first 4 characters chopped off (where the pid_t sits).
1643 */
1644 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1645 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1646 return -1;
1647 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1648 }
1649
1650 __event_process_build_id(&bev, filename, session);
1651
1652 offset += bev.header.size;
1653 }
1654 err = 0;
1655 out:
1656 return err;
1657 }
1658
1659 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1660 struct perf_header *ph __maybe_unused,
1661 int fd, void *data)
1662 {
1663 ssize_t ret = trace_report(fd, data, false);
1664 return ret < 0 ? -1 : 0;
1665 }
1666
1667 static int process_build_id(struct perf_file_section *section,
1668 struct perf_header *ph, int fd,
1669 void *data __maybe_unused)
1670 {
1671 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1672 pr_debug("Failed to read buildids, continuing...\n");
1673 return 0;
1674 }
1675
1676 static int process_hostname(struct perf_file_section *section __maybe_unused,
1677 struct perf_header *ph, int fd,
1678 void *data __maybe_unused)
1679 {
1680 ph->env.hostname = do_read_string(fd, ph);
1681 return ph->env.hostname ? 0 : -ENOMEM;
1682 }
1683
1684 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1685 struct perf_header *ph, int fd,
1686 void *data __maybe_unused)
1687 {
1688 ph->env.os_release = do_read_string(fd, ph);
1689 return ph->env.os_release ? 0 : -ENOMEM;
1690 }
1691
1692 static int process_version(struct perf_file_section *section __maybe_unused,
1693 struct perf_header *ph, int fd,
1694 void *data __maybe_unused)
1695 {
1696 ph->env.version = do_read_string(fd, ph);
1697 return ph->env.version ? 0 : -ENOMEM;
1698 }
1699
1700 static int process_arch(struct perf_file_section *section __maybe_unused,
1701 struct perf_header *ph, int fd,
1702 void *data __maybe_unused)
1703 {
1704 ph->env.arch = do_read_string(fd, ph);
1705 return ph->env.arch ? 0 : -ENOMEM;
1706 }
1707
1708 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1709 struct perf_header *ph, int fd,
1710 void *data __maybe_unused)
1711 {
1712 ssize_t ret;
1713 u32 nr;
1714
1715 ret = readn(fd, &nr, sizeof(nr));
1716 if (ret != sizeof(nr))
1717 return -1;
1718
1719 if (ph->needs_swap)
1720 nr = bswap_32(nr);
1721
1722 ph->env.nr_cpus_online = nr;
1723
1724 ret = readn(fd, &nr, sizeof(nr));
1725 if (ret != sizeof(nr))
1726 return -1;
1727
1728 if (ph->needs_swap)
1729 nr = bswap_32(nr);
1730
1731 ph->env.nr_cpus_avail = nr;
1732 return 0;
1733 }
1734
1735 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1736 struct perf_header *ph, int fd,
1737 void *data __maybe_unused)
1738 {
1739 ph->env.cpu_desc = do_read_string(fd, ph);
1740 return ph->env.cpu_desc ? 0 : -ENOMEM;
1741 }
1742
1743 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1744 struct perf_header *ph, int fd,
1745 void *data __maybe_unused)
1746 {
1747 ph->env.cpuid = do_read_string(fd, ph);
1748 return ph->env.cpuid ? 0 : -ENOMEM;
1749 }
1750
1751 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1752 struct perf_header *ph, int fd,
1753 void *data __maybe_unused)
1754 {
1755 uint64_t mem;
1756 ssize_t ret;
1757
1758 ret = readn(fd, &mem, sizeof(mem));
1759 if (ret != sizeof(mem))
1760 return -1;
1761
1762 if (ph->needs_swap)
1763 mem = bswap_64(mem);
1764
1765 ph->env.total_mem = mem;
1766 return 0;
1767 }
1768
1769 static struct perf_evsel *
1770 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1771 {
1772 struct perf_evsel *evsel;
1773
1774 list_for_each_entry(evsel, &evlist->entries, node) {
1775 if (evsel->idx == idx)
1776 return evsel;
1777 }
1778
1779 return NULL;
1780 }
1781
1782 static void
1783 perf_evlist__set_event_name(struct perf_evlist *evlist,
1784 struct perf_evsel *event)
1785 {
1786 struct perf_evsel *evsel;
1787
1788 if (!event->name)
1789 return;
1790
1791 evsel = perf_evlist__find_by_index(evlist, event->idx);
1792 if (!evsel)
1793 return;
1794
1795 if (evsel->name)
1796 return;
1797
1798 evsel->name = strdup(event->name);
1799 }
1800
1801 static int
1802 process_event_desc(struct perf_file_section *section __maybe_unused,
1803 struct perf_header *header, int fd,
1804 void *data __maybe_unused)
1805 {
1806 struct perf_session *session;
1807 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1808
1809 if (!events)
1810 return 0;
1811
1812 session = container_of(header, struct perf_session, header);
1813 for (evsel = events; evsel->attr.size; evsel++)
1814 perf_evlist__set_event_name(session->evlist, evsel);
1815
1816 free_event_desc(events);
1817
1818 return 0;
1819 }
1820
1821 static int process_cmdline(struct perf_file_section *section __maybe_unused,
1822 struct perf_header *ph, int fd,
1823 void *data __maybe_unused)
1824 {
1825 ssize_t ret;
1826 char *str;
1827 u32 nr, i;
1828 struct strbuf sb;
1829
1830 ret = readn(fd, &nr, sizeof(nr));
1831 if (ret != sizeof(nr))
1832 return -1;
1833
1834 if (ph->needs_swap)
1835 nr = bswap_32(nr);
1836
1837 ph->env.nr_cmdline = nr;
1838 strbuf_init(&sb, 128);
1839
1840 for (i = 0; i < nr; i++) {
1841 str = do_read_string(fd, ph);
1842 if (!str)
1843 goto error;
1844
1845 /* include a NULL character at the end */
1846 strbuf_add(&sb, str, strlen(str) + 1);
1847 free(str);
1848 }
1849 ph->env.cmdline = strbuf_detach(&sb, NULL);
1850 return 0;
1851
1852 error:
1853 strbuf_release(&sb);
1854 return -1;
1855 }
1856
1857 static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
1858 struct perf_header *ph, int fd,
1859 void *data __maybe_unused)
1860 {
1861 ssize_t ret;
1862 u32 nr, i;
1863 char *str;
1864 struct strbuf sb;
1865
1866 ret = readn(fd, &nr, sizeof(nr));
1867 if (ret != sizeof(nr))
1868 return -1;
1869
1870 if (ph->needs_swap)
1871 nr = bswap_32(nr);
1872
1873 ph->env.nr_sibling_cores = nr;
1874 strbuf_init(&sb, 128);
1875
1876 for (i = 0; i < nr; i++) {
1877 str = do_read_string(fd, ph);
1878 if (!str)
1879 goto error;
1880
1881 /* include a NULL character at the end */
1882 strbuf_add(&sb, str, strlen(str) + 1);
1883 free(str);
1884 }
1885 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1886
1887 ret = readn(fd, &nr, sizeof(nr));
1888 if (ret != sizeof(nr))
1889 return -1;
1890
1891 if (ph->needs_swap)
1892 nr = bswap_32(nr);
1893
1894 ph->env.nr_sibling_threads = nr;
1895
1896 for (i = 0; i < nr; i++) {
1897 str = do_read_string(fd, ph);
1898 if (!str)
1899 goto error;
1900
1901 /* include a NULL character at the end */
1902 strbuf_add(&sb, str, strlen(str) + 1);
1903 free(str);
1904 }
1905 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1906 return 0;
1907
1908 error:
1909 strbuf_release(&sb);
1910 return -1;
1911 }
1912
1913 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1914 struct perf_header *ph, int fd,
1915 void *data __maybe_unused)
1916 {
1917 ssize_t ret;
1918 u32 nr, node, i;
1919 char *str;
1920 uint64_t mem_total, mem_free;
1921 struct strbuf sb;
1922
1923 /* nr nodes */
1924 ret = readn(fd, &nr, sizeof(nr));
1925 if (ret != sizeof(nr))
1926 goto error;
1927
1928 if (ph->needs_swap)
1929 nr = bswap_32(nr);
1930
1931 ph->env.nr_numa_nodes = nr;
1932 strbuf_init(&sb, 256);
1933
1934 for (i = 0; i < nr; i++) {
1935 /* node number */
1936 ret = readn(fd, &node, sizeof(node));
1937 if (ret != sizeof(node))
1938 goto error;
1939
1940 ret = readn(fd, &mem_total, sizeof(u64));
1941 if (ret != sizeof(u64))
1942 goto error;
1943
1944 ret = readn(fd, &mem_free, sizeof(u64));
1945 if (ret != sizeof(u64))
1946 goto error;
1947
1948 if (ph->needs_swap) {
1949 node = bswap_32(node);
1950 mem_total = bswap_64(mem_total);
1951 mem_free = bswap_64(mem_free);
1952 }
1953
1954 strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
1955 node, mem_total, mem_free);
1956
1957 str = do_read_string(fd, ph);
1958 if (!str)
1959 goto error;
1960
1961 /* include a NULL character at the end */
1962 strbuf_add(&sb, str, strlen(str) + 1);
1963 free(str);
1964 }
1965 ph->env.numa_nodes = strbuf_detach(&sb, NULL);
1966 return 0;
1967
1968 error:
1969 strbuf_release(&sb);
1970 return -1;
1971 }
1972
1973 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1974 struct perf_header *ph, int fd,
1975 void *data __maybe_unused)
1976 {
1977 ssize_t ret;
1978 char *name;
1979 u32 pmu_num;
1980 u32 type;
1981 struct strbuf sb;
1982
1983 ret = readn(fd, &pmu_num, sizeof(pmu_num));
1984 if (ret != sizeof(pmu_num))
1985 return -1;
1986
1987 if (ph->needs_swap)
1988 pmu_num = bswap_32(pmu_num);
1989
1990 if (!pmu_num) {
1991 pr_debug("pmu mappings not available\n");
1992 return 0;
1993 }
1994
1995 ph->env.nr_pmu_mappings = pmu_num;
1996 strbuf_init(&sb, 128);
1997
1998 while (pmu_num) {
1999 if (readn(fd, &type, sizeof(type)) != sizeof(type))
2000 goto error;
2001 if (ph->needs_swap)
2002 type = bswap_32(type);
2003
2004 name = do_read_string(fd, ph);
2005 if (!name)
2006 goto error;
2007
2008 strbuf_addf(&sb, "%u:%s", type, name);
2009 /* include a NULL character at the end */
2010 strbuf_add(&sb, "", 1);
2011
2012 free(name);
2013 pmu_num--;
2014 }
2015 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2016 return 0;
2017
2018 error:
2019 strbuf_release(&sb);
2020 return -1;
2021 }
2022
2023 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2024 struct perf_header *ph, int fd,
2025 void *data __maybe_unused)
2026 {
2027 size_t ret = -1;
2028 u32 i, nr, nr_groups;
2029 struct perf_session *session;
2030 struct perf_evsel *evsel, *leader = NULL;
2031 struct group_desc {
2032 char *name;
2033 u32 leader_idx;
2034 u32 nr_members;
2035 } *desc;
2036
2037 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2038 return -1;
2039
2040 if (ph->needs_swap)
2041 nr_groups = bswap_32(nr_groups);
2042
2043 ph->env.nr_groups = nr_groups;
2044 if (!nr_groups) {
2045 pr_debug("group desc not available\n");
2046 return 0;
2047 }
2048
2049 desc = calloc(nr_groups, sizeof(*desc));
2050 if (!desc)
2051 return -1;
2052
2053 for (i = 0; i < nr_groups; i++) {
2054 desc[i].name = do_read_string(fd, ph);
2055 if (!desc[i].name)
2056 goto out_free;
2057
2058 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2059 goto out_free;
2060
2061 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2062 goto out_free;
2063
2064 if (ph->needs_swap) {
2065 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2066 desc[i].nr_members = bswap_32(desc[i].nr_members);
2067 }
2068 }
2069
2070 /*
2071 * Rebuild group relationship based on the group_desc
2072 */
2073 session = container_of(ph, struct perf_session, header);
2074 session->evlist->nr_groups = nr_groups;
2075
2076 i = nr = 0;
2077 list_for_each_entry(evsel, &session->evlist->entries, node) {
2078 if (evsel->idx == (int) desc[i].leader_idx) {
2079 evsel->leader = evsel;
2080 /* {anon_group} is a dummy name */
2081 if (strcmp(desc[i].name, "{anon_group}")) {
2082 evsel->group_name = desc[i].name;
2083 desc[i].name = NULL;
2084 }
2085 evsel->nr_members = desc[i].nr_members;
2086
2087 if (i >= nr_groups || nr > 0) {
2088 pr_debug("invalid group desc\n");
2089 goto out_free;
2090 }
2091
2092 leader = evsel;
2093 nr = evsel->nr_members - 1;
2094 i++;
2095 } else if (nr) {
2096 /* This is a group member */
2097 evsel->leader = leader;
2098
2099 nr--;
2100 }
2101 }
2102
2103 if (i != nr_groups || nr != 0) {
2104 pr_debug("invalid group desc\n");
2105 goto out_free;
2106 }
2107
2108 ret = 0;
2109 out_free:
2110 for (i = 0; i < nr_groups; i++)
2111 free(desc[i].name);
2112 free(desc);
2113
2114 return ret;
2115 }
2116
2117 struct feature_ops {
2118 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2119 void (*print)(struct perf_header *h, int fd, FILE *fp);
2120 int (*process)(struct perf_file_section *section,
2121 struct perf_header *h, int fd, void *data);
2122 const char *name;
2123 bool full_only;
2124 };
2125
2126 #define FEAT_OPA(n, func) \
2127 [n] = { .name = #n, .write = write_##func, .print = print_##func }
2128 #define FEAT_OPP(n, func) \
2129 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2130 .process = process_##func }
2131 #define FEAT_OPF(n, func) \
2132 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2133 .process = process_##func, .full_only = true }
2134
2135 /* feature_ops not implemented: */
2136 #define print_tracing_data NULL
2137 #define print_build_id NULL
2138
2139 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2140 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
2141 FEAT_OPP(HEADER_BUILD_ID, build_id),
2142 FEAT_OPP(HEADER_HOSTNAME, hostname),
2143 FEAT_OPP(HEADER_OSRELEASE, osrelease),
2144 FEAT_OPP(HEADER_VERSION, version),
2145 FEAT_OPP(HEADER_ARCH, arch),
2146 FEAT_OPP(HEADER_NRCPUS, nrcpus),
2147 FEAT_OPP(HEADER_CPUDESC, cpudesc),
2148 FEAT_OPP(HEADER_CPUID, cpuid),
2149 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
2150 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
2151 FEAT_OPP(HEADER_CMDLINE, cmdline),
2152 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
2153 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
2154 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
2155 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
2156 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
2157 };
2158
2159 struct header_print_data {
2160 FILE *fp;
2161 bool full; /* extended list of headers */
2162 };
2163
2164 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2165 struct perf_header *ph,
2166 int feat, int fd, void *data)
2167 {
2168 struct header_print_data *hd = data;
2169
2170 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2171 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2172 "%d, continuing...\n", section->offset, feat);
2173 return 0;
2174 }
2175 if (feat >= HEADER_LAST_FEATURE) {
2176 pr_warning("unknown feature %d\n", feat);
2177 return 0;
2178 }
2179 if (!feat_ops[feat].print)
2180 return 0;
2181
2182 if (!feat_ops[feat].full_only || hd->full)
2183 feat_ops[feat].print(ph, fd, hd->fp);
2184 else
2185 fprintf(hd->fp, "# %s info available, use -I to display\n",
2186 feat_ops[feat].name);
2187
2188 return 0;
2189 }
2190
2191 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2192 {
2193 struct header_print_data hd;
2194 struct perf_header *header = &session->header;
2195 int fd = perf_data_file__fd(session->file);
2196 hd.fp = fp;
2197 hd.full = full;
2198
2199 perf_header__process_sections(header, fd, &hd,
2200 perf_file_section__fprintf_info);
2201 return 0;
2202 }
2203
2204 static int do_write_feat(int fd, struct perf_header *h, int type,
2205 struct perf_file_section **p,
2206 struct perf_evlist *evlist)
2207 {
2208 int err;
2209 int ret = 0;
2210
2211 if (perf_header__has_feat(h, type)) {
2212 if (!feat_ops[type].write)
2213 return -1;
2214
2215 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2216
2217 err = feat_ops[type].write(fd, h, evlist);
2218 if (err < 0) {
2219 pr_debug("failed to write feature %d\n", type);
2220
2221 /* undo anything written */
2222 lseek(fd, (*p)->offset, SEEK_SET);
2223
2224 return -1;
2225 }
2226 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2227 (*p)++;
2228 }
2229 return ret;
2230 }
2231
2232 static int perf_header__adds_write(struct perf_header *header,
2233 struct perf_evlist *evlist, int fd)
2234 {
2235 int nr_sections;
2236 struct perf_file_section *feat_sec, *p;
2237 int sec_size;
2238 u64 sec_start;
2239 int feat;
2240 int err;
2241
2242 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2243 if (!nr_sections)
2244 return 0;
2245
2246 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2247 if (feat_sec == NULL)
2248 return -ENOMEM;
2249
2250 sec_size = sizeof(*feat_sec) * nr_sections;
2251
2252 sec_start = header->feat_offset;
2253 lseek(fd, sec_start + sec_size, SEEK_SET);
2254
2255 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2256 if (do_write_feat(fd, header, feat, &p, evlist))
2257 perf_header__clear_feat(header, feat);
2258 }
2259
2260 lseek(fd, sec_start, SEEK_SET);
2261 /*
2262 * may write more than needed due to dropped feature, but
2263 * this is okay, reader will skip the mising entries
2264 */
2265 err = do_write(fd, feat_sec, sec_size);
2266 if (err < 0)
2267 pr_debug("failed to write feature section\n");
2268 free(feat_sec);
2269 return err;
2270 }
2271
2272 int perf_header__write_pipe(int fd)
2273 {
2274 struct perf_pipe_file_header f_header;
2275 int err;
2276
2277 f_header = (struct perf_pipe_file_header){
2278 .magic = PERF_MAGIC,
2279 .size = sizeof(f_header),
2280 };
2281
2282 err = do_write(fd, &f_header, sizeof(f_header));
2283 if (err < 0) {
2284 pr_debug("failed to write perf pipe header\n");
2285 return err;
2286 }
2287
2288 return 0;
2289 }
2290
2291 int perf_session__write_header(struct perf_session *session,
2292 struct perf_evlist *evlist,
2293 int fd, bool at_exit)
2294 {
2295 struct perf_file_header f_header;
2296 struct perf_file_attr f_attr;
2297 struct perf_header *header = &session->header;
2298 struct perf_evsel *evsel;
2299 u64 attr_offset;
2300 int err;
2301
2302 lseek(fd, sizeof(f_header), SEEK_SET);
2303
2304 list_for_each_entry(evsel, &evlist->entries, node) {
2305 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2306 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2307 if (err < 0) {
2308 pr_debug("failed to write perf header\n");
2309 return err;
2310 }
2311 }
2312
2313 attr_offset = lseek(fd, 0, SEEK_CUR);
2314
2315 list_for_each_entry(evsel, &evlist->entries, node) {
2316 f_attr = (struct perf_file_attr){
2317 .attr = evsel->attr,
2318 .ids = {
2319 .offset = evsel->id_offset,
2320 .size = evsel->ids * sizeof(u64),
2321 }
2322 };
2323 err = do_write(fd, &f_attr, sizeof(f_attr));
2324 if (err < 0) {
2325 pr_debug("failed to write perf header attribute\n");
2326 return err;
2327 }
2328 }
2329
2330 header->data_offset = lseek(fd, 0, SEEK_CUR);
2331 header->feat_offset = header->data_offset + header->data_size;
2332
2333 if (at_exit) {
2334 err = perf_header__adds_write(header, evlist, fd);
2335 if (err < 0)
2336 return err;
2337 }
2338
2339 f_header = (struct perf_file_header){
2340 .magic = PERF_MAGIC,
2341 .size = sizeof(f_header),
2342 .attr_size = sizeof(f_attr),
2343 .attrs = {
2344 .offset = attr_offset,
2345 .size = evlist->nr_entries * sizeof(f_attr),
2346 },
2347 .data = {
2348 .offset = header->data_offset,
2349 .size = header->data_size,
2350 },
2351 /* event_types is ignored, store zeros */
2352 };
2353
2354 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2355
2356 lseek(fd, 0, SEEK_SET);
2357 err = do_write(fd, &f_header, sizeof(f_header));
2358 if (err < 0) {
2359 pr_debug("failed to write perf header\n");
2360 return err;
2361 }
2362 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2363
2364 return 0;
2365 }
2366
2367 static int perf_header__getbuffer64(struct perf_header *header,
2368 int fd, void *buf, size_t size)
2369 {
2370 if (readn(fd, buf, size) <= 0)
2371 return -1;
2372
2373 if (header->needs_swap)
2374 mem_bswap_64(buf, size);
2375
2376 return 0;
2377 }
2378
2379 int perf_header__process_sections(struct perf_header *header, int fd,
2380 void *data,
2381 int (*process)(struct perf_file_section *section,
2382 struct perf_header *ph,
2383 int feat, int fd, void *data))
2384 {
2385 struct perf_file_section *feat_sec, *sec;
2386 int nr_sections;
2387 int sec_size;
2388 int feat;
2389 int err;
2390
2391 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2392 if (!nr_sections)
2393 return 0;
2394
2395 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2396 if (!feat_sec)
2397 return -1;
2398
2399 sec_size = sizeof(*feat_sec) * nr_sections;
2400
2401 lseek(fd, header->feat_offset, SEEK_SET);
2402
2403 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2404 if (err < 0)
2405 goto out_free;
2406
2407 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2408 err = process(sec++, header, feat, fd, data);
2409 if (err < 0)
2410 goto out_free;
2411 }
2412 err = 0;
2413 out_free:
2414 free(feat_sec);
2415 return err;
2416 }
2417
2418 static const int attr_file_abi_sizes[] = {
2419 [0] = PERF_ATTR_SIZE_VER0,
2420 [1] = PERF_ATTR_SIZE_VER1,
2421 [2] = PERF_ATTR_SIZE_VER2,
2422 [3] = PERF_ATTR_SIZE_VER3,
2423 0,
2424 };
2425
2426 /*
2427 * In the legacy file format, the magic number is not used to encode endianness.
2428 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2429 * on ABI revisions, we need to try all combinations for all endianness to
2430 * detect the endianness.
2431 */
2432 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2433 {
2434 uint64_t ref_size, attr_size;
2435 int i;
2436
2437 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2438 ref_size = attr_file_abi_sizes[i]
2439 + sizeof(struct perf_file_section);
2440 if (hdr_sz != ref_size) {
2441 attr_size = bswap_64(hdr_sz);
2442 if (attr_size != ref_size)
2443 continue;
2444
2445 ph->needs_swap = true;
2446 }
2447 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2448 i,
2449 ph->needs_swap);
2450 return 0;
2451 }
2452 /* could not determine endianness */
2453 return -1;
2454 }
2455
2456 #define PERF_PIPE_HDR_VER0 16
2457
2458 static const size_t attr_pipe_abi_sizes[] = {
2459 [0] = PERF_PIPE_HDR_VER0,
2460 0,
2461 };
2462
2463 /*
2464 * In the legacy pipe format, there is an implicit assumption that endiannesss
2465 * between host recording the samples, and host parsing the samples is the
2466 * same. This is not always the case given that the pipe output may always be
2467 * redirected into a file and analyzed on a different machine with possibly a
2468 * different endianness and perf_event ABI revsions in the perf tool itself.
2469 */
2470 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2471 {
2472 u64 attr_size;
2473 int i;
2474
2475 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2476 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2477 attr_size = bswap_64(hdr_sz);
2478 if (attr_size != hdr_sz)
2479 continue;
2480
2481 ph->needs_swap = true;
2482 }
2483 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2484 return 0;
2485 }
2486 return -1;
2487 }
2488
2489 bool is_perf_magic(u64 magic)
2490 {
2491 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2492 || magic == __perf_magic2
2493 || magic == __perf_magic2_sw)
2494 return true;
2495
2496 return false;
2497 }
2498
2499 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2500 bool is_pipe, struct perf_header *ph)
2501 {
2502 int ret;
2503
2504 /* check for legacy format */
2505 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2506 if (ret == 0) {
2507 ph->version = PERF_HEADER_VERSION_1;
2508 pr_debug("legacy perf.data format\n");
2509 if (is_pipe)
2510 return try_all_pipe_abis(hdr_sz, ph);
2511
2512 return try_all_file_abis(hdr_sz, ph);
2513 }
2514 /*
2515 * the new magic number serves two purposes:
2516 * - unique number to identify actual perf.data files
2517 * - encode endianness of file
2518 */
2519
2520 /* check magic number with one endianness */
2521 if (magic == __perf_magic2)
2522 return 0;
2523
2524 /* check magic number with opposite endianness */
2525 if (magic != __perf_magic2_sw)
2526 return -1;
2527
2528 ph->needs_swap = true;
2529 ph->version = PERF_HEADER_VERSION_2;
2530
2531 return 0;
2532 }
2533
2534 int perf_file_header__read(struct perf_file_header *header,
2535 struct perf_header *ph, int fd)
2536 {
2537 ssize_t ret;
2538
2539 lseek(fd, 0, SEEK_SET);
2540
2541 ret = readn(fd, header, sizeof(*header));
2542 if (ret <= 0)
2543 return -1;
2544
2545 if (check_magic_endian(header->magic,
2546 header->attr_size, false, ph) < 0) {
2547 pr_debug("magic/endian check failed\n");
2548 return -1;
2549 }
2550
2551 if (ph->needs_swap) {
2552 mem_bswap_64(header, offsetof(struct perf_file_header,
2553 adds_features));
2554 }
2555
2556 if (header->size != sizeof(*header)) {
2557 /* Support the previous format */
2558 if (header->size == offsetof(typeof(*header), adds_features))
2559 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2560 else
2561 return -1;
2562 } else if (ph->needs_swap) {
2563 /*
2564 * feature bitmap is declared as an array of unsigned longs --
2565 * not good since its size can differ between the host that
2566 * generated the data file and the host analyzing the file.
2567 *
2568 * We need to handle endianness, but we don't know the size of
2569 * the unsigned long where the file was generated. Take a best
2570 * guess at determining it: try 64-bit swap first (ie., file
2571 * created on a 64-bit host), and check if the hostname feature
2572 * bit is set (this feature bit is forced on as of fbe96f2).
2573 * If the bit is not, undo the 64-bit swap and try a 32-bit
2574 * swap. If the hostname bit is still not set (e.g., older data
2575 * file), punt and fallback to the original behavior --
2576 * clearing all feature bits and setting buildid.
2577 */
2578 mem_bswap_64(&header->adds_features,
2579 BITS_TO_U64(HEADER_FEAT_BITS));
2580
2581 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2582 /* unswap as u64 */
2583 mem_bswap_64(&header->adds_features,
2584 BITS_TO_U64(HEADER_FEAT_BITS));
2585
2586 /* unswap as u32 */
2587 mem_bswap_32(&header->adds_features,
2588 BITS_TO_U32(HEADER_FEAT_BITS));
2589 }
2590
2591 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2592 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2593 set_bit(HEADER_BUILD_ID, header->adds_features);
2594 }
2595 }
2596
2597 memcpy(&ph->adds_features, &header->adds_features,
2598 sizeof(ph->adds_features));
2599
2600 ph->data_offset = header->data.offset;
2601 ph->data_size = header->data.size;
2602 ph->feat_offset = header->data.offset + header->data.size;
2603 return 0;
2604 }
2605
2606 static int perf_file_section__process(struct perf_file_section *section,
2607 struct perf_header *ph,
2608 int feat, int fd, void *data)
2609 {
2610 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2611 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2612 "%d, continuing...\n", section->offset, feat);
2613 return 0;
2614 }
2615
2616 if (feat >= HEADER_LAST_FEATURE) {
2617 pr_debug("unknown feature %d, continuing...\n", feat);
2618 return 0;
2619 }
2620
2621 if (!feat_ops[feat].process)
2622 return 0;
2623
2624 return feat_ops[feat].process(section, ph, fd, data);
2625 }
2626
2627 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2628 struct perf_header *ph, int fd,
2629 bool repipe)
2630 {
2631 ssize_t ret;
2632
2633 ret = readn(fd, header, sizeof(*header));
2634 if (ret <= 0)
2635 return -1;
2636
2637 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2638 pr_debug("endian/magic failed\n");
2639 return -1;
2640 }
2641
2642 if (ph->needs_swap)
2643 header->size = bswap_64(header->size);
2644
2645 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2646 return -1;
2647
2648 return 0;
2649 }
2650
2651 static int perf_header__read_pipe(struct perf_session *session)
2652 {
2653 struct perf_header *header = &session->header;
2654 struct perf_pipe_file_header f_header;
2655
2656 if (perf_file_header__read_pipe(&f_header, header,
2657 perf_data_file__fd(session->file),
2658 session->repipe) < 0) {
2659 pr_debug("incompatible file format\n");
2660 return -EINVAL;
2661 }
2662
2663 return 0;
2664 }
2665
2666 static int read_attr(int fd, struct perf_header *ph,
2667 struct perf_file_attr *f_attr)
2668 {
2669 struct perf_event_attr *attr = &f_attr->attr;
2670 size_t sz, left;
2671 size_t our_sz = sizeof(f_attr->attr);
2672 ssize_t ret;
2673
2674 memset(f_attr, 0, sizeof(*f_attr));
2675
2676 /* read minimal guaranteed structure */
2677 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2678 if (ret <= 0) {
2679 pr_debug("cannot read %d bytes of header attr\n",
2680 PERF_ATTR_SIZE_VER0);
2681 return -1;
2682 }
2683
2684 /* on file perf_event_attr size */
2685 sz = attr->size;
2686
2687 if (ph->needs_swap)
2688 sz = bswap_32(sz);
2689
2690 if (sz == 0) {
2691 /* assume ABI0 */
2692 sz = PERF_ATTR_SIZE_VER0;
2693 } else if (sz > our_sz) {
2694 pr_debug("file uses a more recent and unsupported ABI"
2695 " (%zu bytes extra)\n", sz - our_sz);
2696 return -1;
2697 }
2698 /* what we have not yet read and that we know about */
2699 left = sz - PERF_ATTR_SIZE_VER0;
2700 if (left) {
2701 void *ptr = attr;
2702 ptr += PERF_ATTR_SIZE_VER0;
2703
2704 ret = readn(fd, ptr, left);
2705 }
2706 /* read perf_file_section, ids are read in caller */
2707 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2708
2709 return ret <= 0 ? -1 : 0;
2710 }
2711
2712 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2713 struct pevent *pevent)
2714 {
2715 struct event_format *event;
2716 char bf[128];
2717
2718 /* already prepared */
2719 if (evsel->tp_format)
2720 return 0;
2721
2722 if (pevent == NULL) {
2723 pr_debug("broken or missing trace data\n");
2724 return -1;
2725 }
2726
2727 event = pevent_find_event(pevent, evsel->attr.config);
2728 if (event == NULL)
2729 return -1;
2730
2731 if (!evsel->name) {
2732 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2733 evsel->name = strdup(bf);
2734 if (evsel->name == NULL)
2735 return -1;
2736 }
2737
2738 evsel->tp_format = event;
2739 return 0;
2740 }
2741
2742 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2743 struct pevent *pevent)
2744 {
2745 struct perf_evsel *pos;
2746
2747 list_for_each_entry(pos, &evlist->entries, node) {
2748 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2749 perf_evsel__prepare_tracepoint_event(pos, pevent))
2750 return -1;
2751 }
2752
2753 return 0;
2754 }
2755
2756 int perf_session__read_header(struct perf_session *session)
2757 {
2758 struct perf_data_file *file = session->file;
2759 struct perf_header *header = &session->header;
2760 struct perf_file_header f_header;
2761 struct perf_file_attr f_attr;
2762 u64 f_id;
2763 int nr_attrs, nr_ids, i, j;
2764 int fd = perf_data_file__fd(file);
2765
2766 session->evlist = perf_evlist__new();
2767 if (session->evlist == NULL)
2768 return -ENOMEM;
2769
2770 if (perf_data_file__is_pipe(file))
2771 return perf_header__read_pipe(session);
2772
2773 if (perf_file_header__read(&f_header, header, fd) < 0)
2774 return -EINVAL;
2775
2776 /*
2777 * Sanity check that perf.data was written cleanly; data size is
2778 * initialized to 0 and updated only if the on_exit function is run.
2779 * If data size is still 0 then the file contains only partial
2780 * information. Just warn user and process it as much as it can.
2781 */
2782 if (f_header.data.size == 0) {
2783 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2784 "Was the 'perf record' command properly terminated?\n",
2785 file->path);
2786 }
2787
2788 nr_attrs = f_header.attrs.size / f_header.attr_size;
2789 lseek(fd, f_header.attrs.offset, SEEK_SET);
2790
2791 for (i = 0; i < nr_attrs; i++) {
2792 struct perf_evsel *evsel;
2793 off_t tmp;
2794
2795 if (read_attr(fd, header, &f_attr) < 0)
2796 goto out_errno;
2797
2798 if (header->needs_swap)
2799 perf_event__attr_swap(&f_attr.attr);
2800
2801 tmp = lseek(fd, 0, SEEK_CUR);
2802 evsel = perf_evsel__new(&f_attr.attr);
2803
2804 if (evsel == NULL)
2805 goto out_delete_evlist;
2806
2807 evsel->needs_swap = header->needs_swap;
2808 /*
2809 * Do it before so that if perf_evsel__alloc_id fails, this
2810 * entry gets purged too at perf_evlist__delete().
2811 */
2812 perf_evlist__add(session->evlist, evsel);
2813
2814 nr_ids = f_attr.ids.size / sizeof(u64);
2815 /*
2816 * We don't have the cpu and thread maps on the header, so
2817 * for allocating the perf_sample_id table we fake 1 cpu and
2818 * hattr->ids threads.
2819 */
2820 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2821 goto out_delete_evlist;
2822
2823 lseek(fd, f_attr.ids.offset, SEEK_SET);
2824
2825 for (j = 0; j < nr_ids; j++) {
2826 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2827 goto out_errno;
2828
2829 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2830 }
2831
2832 lseek(fd, tmp, SEEK_SET);
2833 }
2834
2835 symbol_conf.nr_events = nr_attrs;
2836
2837 perf_header__process_sections(header, fd, &session->pevent,
2838 perf_file_section__process);
2839
2840 if (perf_evlist__prepare_tracepoint_events(session->evlist,
2841 session->pevent))
2842 goto out_delete_evlist;
2843
2844 return 0;
2845 out_errno:
2846 return -errno;
2847
2848 out_delete_evlist:
2849 perf_evlist__delete(session->evlist);
2850 session->evlist = NULL;
2851 return -ENOMEM;
2852 }
2853
2854 int perf_event__synthesize_attr(struct perf_tool *tool,
2855 struct perf_event_attr *attr, u32 ids, u64 *id,
2856 perf_event__handler_t process)
2857 {
2858 union perf_event *ev;
2859 size_t size;
2860 int err;
2861
2862 size = sizeof(struct perf_event_attr);
2863 size = PERF_ALIGN(size, sizeof(u64));
2864 size += sizeof(struct perf_event_header);
2865 size += ids * sizeof(u64);
2866
2867 ev = malloc(size);
2868
2869 if (ev == NULL)
2870 return -ENOMEM;
2871
2872 ev->attr.attr = *attr;
2873 memcpy(ev->attr.id, id, ids * sizeof(u64));
2874
2875 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2876 ev->attr.header.size = (u16)size;
2877
2878 if (ev->attr.header.size == size)
2879 err = process(tool, ev, NULL, NULL);
2880 else
2881 err = -E2BIG;
2882
2883 free(ev);
2884
2885 return err;
2886 }
2887
2888 int perf_event__synthesize_attrs(struct perf_tool *tool,
2889 struct perf_session *session,
2890 perf_event__handler_t process)
2891 {
2892 struct perf_evsel *evsel;
2893 int err = 0;
2894
2895 list_for_each_entry(evsel, &session->evlist->entries, node) {
2896 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
2897 evsel->id, process);
2898 if (err) {
2899 pr_debug("failed to create perf header attribute\n");
2900 return err;
2901 }
2902 }
2903
2904 return err;
2905 }
2906
2907 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
2908 union perf_event *event,
2909 struct perf_evlist **pevlist)
2910 {
2911 u32 i, ids, n_ids;
2912 struct perf_evsel *evsel;
2913 struct perf_evlist *evlist = *pevlist;
2914
2915 if (evlist == NULL) {
2916 *pevlist = evlist = perf_evlist__new();
2917 if (evlist == NULL)
2918 return -ENOMEM;
2919 }
2920
2921 evsel = perf_evsel__new(&event->attr.attr);
2922 if (evsel == NULL)
2923 return -ENOMEM;
2924
2925 perf_evlist__add(evlist, evsel);
2926
2927 ids = event->header.size;
2928 ids -= (void *)&event->attr.id - (void *)event;
2929 n_ids = ids / sizeof(u64);
2930 /*
2931 * We don't have the cpu and thread maps on the header, so
2932 * for allocating the perf_sample_id table we fake 1 cpu and
2933 * hattr->ids threads.
2934 */
2935 if (perf_evsel__alloc_id(evsel, 1, n_ids))
2936 return -ENOMEM;
2937
2938 for (i = 0; i < n_ids; i++) {
2939 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2940 }
2941
2942 symbol_conf.nr_events = evlist->nr_entries;
2943
2944 return 0;
2945 }
2946
2947 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
2948 struct perf_evlist *evlist,
2949 perf_event__handler_t process)
2950 {
2951 union perf_event ev;
2952 struct tracing_data *tdata;
2953 ssize_t size = 0, aligned_size = 0, padding;
2954 int err __maybe_unused = 0;
2955
2956 /*
2957 * We are going to store the size of the data followed
2958 * by the data contents. Since the fd descriptor is a pipe,
2959 * we cannot seek back to store the size of the data once
2960 * we know it. Instead we:
2961 *
2962 * - write the tracing data to the temp file
2963 * - get/write the data size to pipe
2964 * - write the tracing data from the temp file
2965 * to the pipe
2966 */
2967 tdata = tracing_data_get(&evlist->entries, fd, true);
2968 if (!tdata)
2969 return -1;
2970
2971 memset(&ev, 0, sizeof(ev));
2972
2973 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2974 size = tdata->size;
2975 aligned_size = PERF_ALIGN(size, sizeof(u64));
2976 padding = aligned_size - size;
2977 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2978 ev.tracing_data.size = aligned_size;
2979
2980 process(tool, &ev, NULL, NULL);
2981
2982 /*
2983 * The put function will copy all the tracing data
2984 * stored in temp file to the pipe.
2985 */
2986 tracing_data_put(tdata);
2987
2988 write_padded(fd, NULL, 0, padding);
2989
2990 return aligned_size;
2991 }
2992
2993 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
2994 union perf_event *event,
2995 struct perf_session *session)
2996 {
2997 ssize_t size_read, padding, size = event->tracing_data.size;
2998 int fd = perf_data_file__fd(session->file);
2999 off_t offset = lseek(fd, 0, SEEK_CUR);
3000 char buf[BUFSIZ];
3001
3002 /* setup for reading amidst mmap */
3003 lseek(fd, offset + sizeof(struct tracing_data_event),
3004 SEEK_SET);
3005
3006 size_read = trace_report(fd, &session->pevent,
3007 session->repipe);
3008 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3009
3010 if (readn(fd, buf, padding) < 0) {
3011 pr_err("%s: reading input file", __func__);
3012 return -1;
3013 }
3014 if (session->repipe) {
3015 int retw = write(STDOUT_FILENO, buf, padding);
3016 if (retw <= 0 || retw != padding) {
3017 pr_err("%s: repiping tracing data padding", __func__);
3018 return -1;
3019 }
3020 }
3021
3022 if (size_read + padding != size) {
3023 pr_err("%s: tracing data size mismatch", __func__);
3024 return -1;
3025 }
3026
3027 perf_evlist__prepare_tracepoint_events(session->evlist,
3028 session->pevent);
3029
3030 return size_read + padding;
3031 }
3032
3033 int perf_event__synthesize_build_id(struct perf_tool *tool,
3034 struct dso *pos, u16 misc,
3035 perf_event__handler_t process,
3036 struct machine *machine)
3037 {
3038 union perf_event ev;
3039 size_t len;
3040 int err = 0;
3041
3042 if (!pos->hit)
3043 return err;
3044
3045 memset(&ev, 0, sizeof(ev));
3046
3047 len = pos->long_name_len + 1;
3048 len = PERF_ALIGN(len, NAME_ALIGN);
3049 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3050 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3051 ev.build_id.header.misc = misc;
3052 ev.build_id.pid = machine->pid;
3053 ev.build_id.header.size = sizeof(ev.build_id) + len;
3054 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3055
3056 err = process(tool, &ev, NULL, machine);
3057
3058 return err;
3059 }
3060
3061 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3062 union perf_event *event,
3063 struct perf_session *session)
3064 {
3065 __event_process_build_id(&event->build_id,
3066 event->build_id.filename,
3067 session);
3068 return 0;
3069 }
3070
3071 void disable_buildid_cache(void)
3072 {
3073 no_buildid_cache = true;
3074 }
This page took 0.18922 seconds and 5 git commands to generate.