perf tools: Back [vdso] DSO with real data
[deliverable/linux.git] / tools / perf / util / header.c
CommitLineData
b8f46c5a
XG
1#define _FILE_OFFSET_BITS 64
2
a9072bc0 3#include "util.h"
7c6a1c65 4#include <sys/types.h>
ba21594c 5#include <byteswap.h>
7c6a1c65
PZ
6#include <unistd.h>
7#include <stdio.h>
8#include <stdlib.h>
8671dab9 9#include <linux/list.h>
ba21594c 10#include <linux/kernel.h>
b1e5a9be 11#include <linux/bitops.h>
fbe96f29 12#include <sys/utsname.h>
7c6a1c65 13
361c99a6 14#include "evlist.h"
a91e5431 15#include "evsel.h"
7c6a1c65 16#include "header.h"
03456a15
FW
17#include "../perf.h"
18#include "trace-event.h"
301a0b02 19#include "session.h"
8671dab9 20#include "symbol.h"
4778d2e4 21#include "debug.h"
fbe96f29 22#include "cpumap.h"
50a9667c 23#include "pmu.h"
7dbf4dcf 24#include "vdso.h"
7c6a1c65 25
a1ac1d3c
SE
26static bool no_buildid_cache = false;
27
db146f06
RR
28static int trace_event_count;
29static struct perf_trace_event_type *trace_events;
8755a8f2 30
fbe96f29
SE
31static u32 header_argc;
32static const char **header_argv;
33
ae99fb2c 34int perf_header__push_event(u64 id, const char *name)
8755a8f2 35{
f3054c77
UD
36 struct perf_trace_event_type *nevents;
37
ec60a3fe 38 if (strlen(name) > MAX_EVENT_NAME)
6beba7ad 39 pr_warning("Event %s will be truncated\n", name);
8755a8f2 40
db146f06 41 nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events));
f3054c77
UD
42 if (nevents == NULL)
43 return -ENOMEM;
db146f06 44 trace_events = nevents;
ae99fb2c 45
db146f06
RR
46 memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type));
47 trace_events[trace_event_count].event_id = id;
48 strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1);
49 trace_event_count++;
ae99fb2c 50 return 0;
8755a8f2
AV
51}
52
53char *perf_header__find_event(u64 id)
54{
55 int i;
db146f06
RR
56 for (i = 0 ; i < trace_event_count; i++) {
57 if (trace_events[i].event_id == id)
58 return trace_events[i].name;
8755a8f2
AV
59 }
60 return NULL;
61}
62
73323f54
SE
63/*
64 * magic2 = "PERFILE2"
65 * must be a numerical value to let the endianness
66 * determine the memory layout. That way we are able
67 * to detect endianness when reading the perf.data file
68 * back.
69 *
70 * we check for legacy (PERFFILE) format.
71 */
72static const char *__perf_magic1 = "PERFFILE";
73static const u64 __perf_magic2 = 0x32454c4946524550ULL;
74static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
7c6a1c65 75
73323f54 76#define PERF_MAGIC __perf_magic2
7c6a1c65 77
7c6a1c65 78struct perf_file_attr {
cdd6c482 79 struct perf_event_attr attr;
7c6a1c65
PZ
80 struct perf_file_section ids;
81};
82
1c0b04d1 83void perf_header__set_feat(struct perf_header *header, int feat)
8d06367f 84{
1c0b04d1 85 set_bit(feat, header->adds_features);
8d06367f
ACM
86}
87
1c0b04d1 88void perf_header__clear_feat(struct perf_header *header, int feat)
baa2f6ce 89{
1c0b04d1 90 clear_bit(feat, header->adds_features);
baa2f6ce
ACM
91}
92
1c0b04d1 93bool perf_header__has_feat(const struct perf_header *header, int feat)
8d06367f 94{
1c0b04d1 95 return test_bit(feat, header->adds_features);
8d06367f
ACM
96}
97
3726cc75 98static int do_write(int fd, const void *buf, size_t size)
7c6a1c65
PZ
99{
100 while (size) {
101 int ret = write(fd, buf, size);
102
103 if (ret < 0)
d5eed904 104 return -errno;
7c6a1c65
PZ
105
106 size -= ret;
107 buf += ret;
108 }
3726cc75
ACM
109
110 return 0;
7c6a1c65
PZ
111}
112
f92cb24c
ACM
113#define NAME_ALIGN 64
114
115static int write_padded(int fd, const void *bf, size_t count,
116 size_t count_aligned)
117{
118 static const char zero_buf[NAME_ALIGN];
119 int err = do_write(fd, bf, count);
120
121 if (!err)
122 err = do_write(fd, zero_buf, count_aligned - count);
123
124 return err;
125}
126
fbe96f29
SE
127static int do_write_string(int fd, const char *str)
128{
129 u32 len, olen;
130 int ret;
131
132 olen = strlen(str) + 1;
9ac3e487 133 len = PERF_ALIGN(olen, NAME_ALIGN);
fbe96f29
SE
134
135 /* write len, incl. \0 */
136 ret = do_write(fd, &len, sizeof(len));
137 if (ret < 0)
138 return ret;
139
140 return write_padded(fd, str, olen, len);
141}
142
143static char *do_read_string(int fd, struct perf_header *ph)
144{
145 ssize_t sz, ret;
146 u32 len;
147 char *buf;
148
149 sz = read(fd, &len, sizeof(len));
150 if (sz < (ssize_t)sizeof(len))
151 return NULL;
152
153 if (ph->needs_swap)
154 len = bswap_32(len);
155
156 buf = malloc(len);
157 if (!buf)
158 return NULL;
159
160 ret = read(fd, buf, len);
161 if (ret == (ssize_t)len) {
162 /*
163 * strings are padded by zeroes
164 * thus the actual strlen of buf
165 * may be less than len
166 */
167 return buf;
168 }
169
170 free(buf);
171 return NULL;
172}
173
174int
175perf_header__set_cmdline(int argc, const char **argv)
176{
177 int i;
178
56e6f602
DA
179 /*
180 * If header_argv has already been set, do not override it.
181 * This allows a command to set the cmdline, parse args and
182 * then call another builtin function that implements a
183 * command -- e.g, cmd_kvm calling cmd_record.
184 */
185 if (header_argv)
186 return 0;
187
fbe96f29
SE
188 header_argc = (u32)argc;
189
190 /* do not include NULL termination */
191 header_argv = calloc(argc, sizeof(char *));
192 if (!header_argv)
193 return -ENOMEM;
194
195 /*
196 * must copy argv contents because it gets moved
197 * around during option parsing
198 */
199 for (i = 0; i < argc ; i++)
200 header_argv[i] = argv[i];
201
202 return 0;
203}
204
1b549504
RR
205#define dsos__for_each_with_build_id(pos, head) \
206 list_for_each_entry(pos, head, node) \
207 if (!pos->has_build_id) \
208 continue; \
209 else
210
7dbf4dcf
JO
211static int write_buildid(char *name, size_t name_len, u8 *build_id,
212 pid_t pid, u16 misc, int fd)
213{
214 int err;
215 struct build_id_event b;
216 size_t len;
217
218 len = name_len + 1;
219 len = PERF_ALIGN(len, NAME_ALIGN);
220
221 memset(&b, 0, sizeof(b));
222 memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
223 b.pid = pid;
224 b.header.misc = misc;
225 b.header.size = sizeof(b) + len;
226
227 err = do_write(fd, &b, sizeof(b));
228 if (err < 0)
229 return err;
230
231 return write_padded(fd, name, name_len + 1, len);
232}
233
1b549504
RR
234static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
235 u16 misc, int fd)
236{
237 struct dso *pos;
238
239 dsos__for_each_with_build_id(pos, head) {
240 int err;
7dbf4dcf
JO
241 char *name;
242 size_t name_len;
1b549504
RR
243
244 if (!pos->hit)
245 continue;
7dbf4dcf
JO
246
247 if (is_vdso_map(pos->short_name)) {
248 name = (char *) VDSO__MAP_NAME;
249 name_len = sizeof(VDSO__MAP_NAME) + 1;
250 } else {
251 name = pos->long_name;
252 name_len = pos->long_name_len + 1;
253 }
254
255 err = write_buildid(name, name_len, pos->build_id,
256 pid, misc, fd);
257 if (err)
1b549504
RR
258 return err;
259 }
260
261 return 0;
262}
263
264static int machine__write_buildid_table(struct machine *machine, int fd)
265{
266 int err;
267 u16 kmisc = PERF_RECORD_MISC_KERNEL,
268 umisc = PERF_RECORD_MISC_USER;
269
270 if (!machine__is_host(machine)) {
271 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
272 umisc = PERF_RECORD_MISC_GUEST_USER;
273 }
274
275 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
276 kmisc, fd);
277 if (err == 0)
278 err = __dsos__write_buildid_table(&machine->user_dsos,
279 machine->pid, umisc, fd);
280 return err;
281}
282
283static int dsos__write_buildid_table(struct perf_header *header, int fd)
284{
285 struct perf_session *session = container_of(header,
286 struct perf_session, header);
287 struct rb_node *nd;
288 int err = machine__write_buildid_table(&session->host_machine, fd);
289
290 if (err)
291 return err;
292
293 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
294 struct machine *pos = rb_entry(nd, struct machine, rb_node);
295 err = machine__write_buildid_table(pos, fd);
296 if (err)
297 break;
298 }
299 return err;
300}
301
302int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
7dbf4dcf 303 const char *name, bool is_kallsyms, bool is_vdso)
1b549504
RR
304{
305 const size_t size = PATH_MAX;
306 char *realname, *filename = zalloc(size),
307 *linkname = zalloc(size), *targetname;
308 int len, err = -1;
7dbf4dcf 309 bool slash = is_kallsyms || is_vdso;
1b549504
RR
310
311 if (is_kallsyms) {
312 if (symbol_conf.kptr_restrict) {
313 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
314 return 0;
315 }
7dbf4dcf 316 realname = (char *) name;
1b549504
RR
317 } else
318 realname = realpath(name, NULL);
319
320 if (realname == NULL || filename == NULL || linkname == NULL)
321 goto out_free;
322
e7f01d1e 323 len = scnprintf(filename, size, "%s%s%s",
7dbf4dcf
JO
324 debugdir, slash ? "/" : "",
325 is_vdso ? VDSO__MAP_NAME : realname);
1b549504
RR
326 if (mkdir_p(filename, 0755))
327 goto out_free;
328
afda0f94 329 snprintf(filename + len, size - len, "/%s", sbuild_id);
1b549504
RR
330
331 if (access(filename, F_OK)) {
332 if (is_kallsyms) {
333 if (copyfile("/proc/kallsyms", filename))
334 goto out_free;
335 } else if (link(realname, filename) && copyfile(name, filename))
336 goto out_free;
337 }
338
e7f01d1e 339 len = scnprintf(linkname, size, "%s/.build-id/%.2s",
1b549504
RR
340 debugdir, sbuild_id);
341
342 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
343 goto out_free;
344
345 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
346 targetname = filename + strlen(debugdir) - 5;
347 memcpy(targetname, "../..", 5);
348
349 if (symlink(targetname, linkname) == 0)
350 err = 0;
351out_free:
352 if (!is_kallsyms)
353 free(realname);
354 free(filename);
355 free(linkname);
356 return err;
357}
358
359static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
360 const char *name, const char *debugdir,
7dbf4dcf 361 bool is_kallsyms, bool is_vdso)
1b549504
RR
362{
363 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
364
365 build_id__sprintf(build_id, build_id_size, sbuild_id);
366
7dbf4dcf
JO
367 return build_id_cache__add_s(sbuild_id, debugdir, name,
368 is_kallsyms, is_vdso);
1b549504
RR
369}
370
371int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
372{
373 const size_t size = PATH_MAX;
374 char *filename = zalloc(size),
375 *linkname = zalloc(size);
376 int err = -1;
377
378 if (filename == NULL || linkname == NULL)
379 goto out_free;
380
381 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
382 debugdir, sbuild_id, sbuild_id + 2);
383
384 if (access(linkname, F_OK))
385 goto out_free;
386
387 if (readlink(linkname, filename, size - 1) < 0)
388 goto out_free;
389
390 if (unlink(linkname))
391 goto out_free;
392
393 /*
394 * Since the link is relative, we must make it absolute:
395 */
396 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
397 debugdir, sbuild_id, filename);
398
399 if (unlink(linkname))
400 goto out_free;
401
402 err = 0;
403out_free:
404 free(filename);
405 free(linkname);
406 return err;
407}
408
409static int dso__cache_build_id(struct dso *dso, const char *debugdir)
410{
411 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
7dbf4dcf 412 bool is_vdso = is_vdso_map(dso->short_name);
1b549504
RR
413
414 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
7dbf4dcf
JO
415 dso->long_name, debugdir,
416 is_kallsyms, is_vdso);
1b549504
RR
417}
418
419static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
420{
421 struct dso *pos;
422 int err = 0;
423
424 dsos__for_each_with_build_id(pos, head)
425 if (dso__cache_build_id(pos, debugdir))
426 err = -1;
427
428 return err;
429}
430
431static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
432{
433 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
434 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
435 return ret;
436}
437
438static int perf_session__cache_build_ids(struct perf_session *session)
439{
440 struct rb_node *nd;
441 int ret;
442 char debugdir[PATH_MAX];
443
444 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
445
446 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
447 return -1;
448
449 ret = machine__cache_build_ids(&session->host_machine, debugdir);
450
451 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
452 struct machine *pos = rb_entry(nd, struct machine, rb_node);
453 ret |= machine__cache_build_ids(pos, debugdir);
454 }
455 return ret ? -1 : 0;
456}
457
458static bool machine__read_build_ids(struct machine *machine, bool with_hits)
459{
460 bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
461 ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
462 return ret;
463}
464
465static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
466{
467 struct rb_node *nd;
468 bool ret = machine__read_build_ids(&session->host_machine, with_hits);
469
470 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
471 struct machine *pos = rb_entry(nd, struct machine, rb_node);
472 ret |= machine__read_build_ids(pos, with_hits);
473 }
474
475 return ret;
476}
477
2eeaaa09 478static int write_tracing_data(int fd, struct perf_header *h __used,
fbe96f29
SE
479 struct perf_evlist *evlist)
480{
481 return read_tracing_data(fd, &evlist->entries);
482}
483
484
485static int write_build_id(int fd, struct perf_header *h,
486 struct perf_evlist *evlist __used)
487{
488 struct perf_session *session;
489 int err;
490
491 session = container_of(h, struct perf_session, header);
492
e20960c0
RR
493 if (!perf_session__read_build_ids(session, true))
494 return -1;
495
fbe96f29
SE
496 err = dsos__write_buildid_table(h, fd);
497 if (err < 0) {
498 pr_debug("failed to write buildid table\n");
499 return err;
500 }
501 if (!no_buildid_cache)
502 perf_session__cache_build_ids(session);
503
504 return 0;
505}
506
507static int write_hostname(int fd, struct perf_header *h __used,
508 struct perf_evlist *evlist __used)
509{
510 struct utsname uts;
511 int ret;
512
513 ret = uname(&uts);
514 if (ret < 0)
515 return -1;
516
517 return do_write_string(fd, uts.nodename);
518}
519
520static int write_osrelease(int fd, struct perf_header *h __used,
521 struct perf_evlist *evlist __used)
522{
523 struct utsname uts;
524 int ret;
525
526 ret = uname(&uts);
527 if (ret < 0)
528 return -1;
529
530 return do_write_string(fd, uts.release);
531}
532
533static int write_arch(int fd, struct perf_header *h __used,
534 struct perf_evlist *evlist __used)
535{
536 struct utsname uts;
537 int ret;
538
539 ret = uname(&uts);
540 if (ret < 0)
541 return -1;
542
543 return do_write_string(fd, uts.machine);
544}
545
546static int write_version(int fd, struct perf_header *h __used,
547 struct perf_evlist *evlist __used)
548{
549 return do_write_string(fd, perf_version_string);
550}
551
552static int write_cpudesc(int fd, struct perf_header *h __used,
553 struct perf_evlist *evlist __used)
554{
555#ifndef CPUINFO_PROC
556#define CPUINFO_PROC NULL
557#endif
558 FILE *file;
559 char *buf = NULL;
560 char *s, *p;
561 const char *search = CPUINFO_PROC;
562 size_t len = 0;
563 int ret = -1;
564
565 if (!search)
566 return -1;
567
568 file = fopen("/proc/cpuinfo", "r");
569 if (!file)
570 return -1;
571
572 while (getline(&buf, &len, file) > 0) {
573 ret = strncmp(buf, search, strlen(search));
574 if (!ret)
575 break;
576 }
577
578 if (ret)
579 goto done;
580
581 s = buf;
582
583 p = strchr(buf, ':');
584 if (p && *(p+1) == ' ' && *(p+2))
585 s = p + 2;
586 p = strchr(s, '\n');
587 if (p)
588 *p = '\0';
589
590 /* squash extra space characters (branding string) */
591 p = s;
592 while (*p) {
593 if (isspace(*p)) {
594 char *r = p + 1;
595 char *q = r;
596 *p = ' ';
597 while (*q && isspace(*q))
598 q++;
599 if (q != (p+1))
600 while ((*r++ = *q++));
601 }
602 p++;
603 }
604 ret = do_write_string(fd, s);
605done:
606 free(buf);
607 fclose(file);
608 return ret;
609}
610
611static int write_nrcpus(int fd, struct perf_header *h __used,
612 struct perf_evlist *evlist __used)
613{
614 long nr;
615 u32 nrc, nra;
616 int ret;
617
618 nr = sysconf(_SC_NPROCESSORS_CONF);
619 if (nr < 0)
620 return -1;
621
622 nrc = (u32)(nr & UINT_MAX);
623
624 nr = sysconf(_SC_NPROCESSORS_ONLN);
625 if (nr < 0)
626 return -1;
627
628 nra = (u32)(nr & UINT_MAX);
629
630 ret = do_write(fd, &nrc, sizeof(nrc));
631 if (ret < 0)
632 return ret;
633
634 return do_write(fd, &nra, sizeof(nra));
635}
636
637static int write_event_desc(int fd, struct perf_header *h __used,
638 struct perf_evlist *evlist)
639{
6606f873 640 struct perf_evsel *evsel;
74ba9e11 641 u32 nre, nri, sz;
fbe96f29
SE
642 int ret;
643
74ba9e11 644 nre = evlist->nr_entries;
fbe96f29
SE
645
646 /*
647 * write number of events
648 */
649 ret = do_write(fd, &nre, sizeof(nre));
650 if (ret < 0)
651 return ret;
652
653 /*
654 * size of perf_event_attr struct
655 */
6606f873 656 sz = (u32)sizeof(evsel->attr);
fbe96f29
SE
657 ret = do_write(fd, &sz, sizeof(sz));
658 if (ret < 0)
659 return ret;
660
6606f873 661 list_for_each_entry(evsel, &evlist->entries, node) {
fbe96f29 662
6606f873 663 ret = do_write(fd, &evsel->attr, sz);
fbe96f29
SE
664 if (ret < 0)
665 return ret;
666 /*
667 * write number of unique id per event
668 * there is one id per instance of an event
669 *
670 * copy into an nri to be independent of the
671 * type of ids,
672 */
6606f873 673 nri = evsel->ids;
fbe96f29
SE
674 ret = do_write(fd, &nri, sizeof(nri));
675 if (ret < 0)
676 return ret;
677
678 /*
679 * write event string as passed on cmdline
680 */
6606f873 681 ret = do_write_string(fd, perf_evsel__name(evsel));
fbe96f29
SE
682 if (ret < 0)
683 return ret;
684 /*
685 * write unique ids for this event
686 */
6606f873 687 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
fbe96f29
SE
688 if (ret < 0)
689 return ret;
690 }
691 return 0;
692}
693
694static int write_cmdline(int fd, struct perf_header *h __used,
695 struct perf_evlist *evlist __used)
696{
697 char buf[MAXPATHLEN];
698 char proc[32];
699 u32 i, n;
700 int ret;
701
702 /*
703 * actual atual path to perf binary
704 */
705 sprintf(proc, "/proc/%d/exe", getpid());
706 ret = readlink(proc, buf, sizeof(buf));
707 if (ret <= 0)
708 return -1;
709
710 /* readlink() does not add null termination */
711 buf[ret] = '\0';
712
713 /* account for binary path */
714 n = header_argc + 1;
715
716 ret = do_write(fd, &n, sizeof(n));
717 if (ret < 0)
718 return ret;
719
720 ret = do_write_string(fd, buf);
721 if (ret < 0)
722 return ret;
723
724 for (i = 0 ; i < header_argc; i++) {
725 ret = do_write_string(fd, header_argv[i]);
726 if (ret < 0)
727 return ret;
728 }
729 return 0;
730}
731
732#define CORE_SIB_FMT \
733 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
734#define THRD_SIB_FMT \
735 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
736
737struct cpu_topo {
738 u32 core_sib;
739 u32 thread_sib;
740 char **core_siblings;
741 char **thread_siblings;
742};
743
744static int build_cpu_topo(struct cpu_topo *tp, int cpu)
745{
746 FILE *fp;
747 char filename[MAXPATHLEN];
748 char *buf = NULL, *p;
749 size_t len = 0;
750 u32 i = 0;
751 int ret = -1;
752
753 sprintf(filename, CORE_SIB_FMT, cpu);
754 fp = fopen(filename, "r");
755 if (!fp)
756 return -1;
757
758 if (getline(&buf, &len, fp) <= 0)
759 goto done;
760
761 fclose(fp);
762
763 p = strchr(buf, '\n');
764 if (p)
765 *p = '\0';
766
767 for (i = 0; i < tp->core_sib; i++) {
768 if (!strcmp(buf, tp->core_siblings[i]))
769 break;
770 }
771 if (i == tp->core_sib) {
772 tp->core_siblings[i] = buf;
773 tp->core_sib++;
774 buf = NULL;
775 len = 0;
776 }
777
778 sprintf(filename, THRD_SIB_FMT, cpu);
779 fp = fopen(filename, "r");
780 if (!fp)
781 goto done;
782
783 if (getline(&buf, &len, fp) <= 0)
784 goto done;
785
786 p = strchr(buf, '\n');
787 if (p)
788 *p = '\0';
789
790 for (i = 0; i < tp->thread_sib; i++) {
791 if (!strcmp(buf, tp->thread_siblings[i]))
792 break;
793 }
794 if (i == tp->thread_sib) {
795 tp->thread_siblings[i] = buf;
796 tp->thread_sib++;
797 buf = NULL;
798 }
799 ret = 0;
800done:
801 if(fp)
802 fclose(fp);
803 free(buf);
804 return ret;
805}
806
807static void free_cpu_topo(struct cpu_topo *tp)
808{
809 u32 i;
810
811 if (!tp)
812 return;
813
814 for (i = 0 ; i < tp->core_sib; i++)
815 free(tp->core_siblings[i]);
816
817 for (i = 0 ; i < tp->thread_sib; i++)
818 free(tp->thread_siblings[i]);
819
820 free(tp);
821}
822
823static struct cpu_topo *build_cpu_topology(void)
824{
825 struct cpu_topo *tp;
826 void *addr;
827 u32 nr, i;
828 size_t sz;
829 long ncpus;
830 int ret = -1;
831
832 ncpus = sysconf(_SC_NPROCESSORS_CONF);
833 if (ncpus < 0)
834 return NULL;
835
836 nr = (u32)(ncpus & UINT_MAX);
837
838 sz = nr * sizeof(char *);
839
840 addr = calloc(1, sizeof(*tp) + 2 * sz);
841 if (!addr)
842 return NULL;
843
844 tp = addr;
845
846 addr += sizeof(*tp);
847 tp->core_siblings = addr;
848 addr += sz;
849 tp->thread_siblings = addr;
850
851 for (i = 0; i < nr; i++) {
852 ret = build_cpu_topo(tp, i);
853 if (ret < 0)
854 break;
855 }
856 if (ret) {
857 free_cpu_topo(tp);
858 tp = NULL;
859 }
860 return tp;
861}
862
863static int write_cpu_topology(int fd, struct perf_header *h __used,
864 struct perf_evlist *evlist __used)
865{
866 struct cpu_topo *tp;
867 u32 i;
868 int ret;
869
870 tp = build_cpu_topology();
871 if (!tp)
872 return -1;
873
874 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
875 if (ret < 0)
876 goto done;
877
878 for (i = 0; i < tp->core_sib; i++) {
879 ret = do_write_string(fd, tp->core_siblings[i]);
880 if (ret < 0)
881 goto done;
882 }
883 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
884 if (ret < 0)
885 goto done;
886
887 for (i = 0; i < tp->thread_sib; i++) {
888 ret = do_write_string(fd, tp->thread_siblings[i]);
889 if (ret < 0)
890 break;
891 }
892done:
893 free_cpu_topo(tp);
894 return ret;
895}
896
897
898
899static int write_total_mem(int fd, struct perf_header *h __used,
900 struct perf_evlist *evlist __used)
901{
902 char *buf = NULL;
903 FILE *fp;
904 size_t len = 0;
905 int ret = -1, n;
906 uint64_t mem;
907
908 fp = fopen("/proc/meminfo", "r");
909 if (!fp)
910 return -1;
911
912 while (getline(&buf, &len, fp) > 0) {
913 ret = strncmp(buf, "MemTotal:", 9);
914 if (!ret)
915 break;
916 }
917 if (!ret) {
918 n = sscanf(buf, "%*s %"PRIu64, &mem);
919 if (n == 1)
920 ret = do_write(fd, &mem, sizeof(mem));
921 }
922 free(buf);
923 fclose(fp);
924 return ret;
925}
926
927static int write_topo_node(int fd, int node)
928{
929 char str[MAXPATHLEN];
930 char field[32];
931 char *buf = NULL, *p;
932 size_t len = 0;
933 FILE *fp;
934 u64 mem_total, mem_free, mem;
935 int ret = -1;
936
937 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
938 fp = fopen(str, "r");
939 if (!fp)
940 return -1;
941
942 while (getline(&buf, &len, fp) > 0) {
943 /* skip over invalid lines */
944 if (!strchr(buf, ':'))
945 continue;
946 if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
947 goto done;
948 if (!strcmp(field, "MemTotal:"))
949 mem_total = mem;
950 if (!strcmp(field, "MemFree:"))
951 mem_free = mem;
952 }
953
954 fclose(fp);
955
956 ret = do_write(fd, &mem_total, sizeof(u64));
957 if (ret)
958 goto done;
959
960 ret = do_write(fd, &mem_free, sizeof(u64));
961 if (ret)
962 goto done;
963
964 ret = -1;
965 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
966
967 fp = fopen(str, "r");
968 if (!fp)
969 goto done;
970
971 if (getline(&buf, &len, fp) <= 0)
972 goto done;
973
974 p = strchr(buf, '\n');
975 if (p)
976 *p = '\0';
977
978 ret = do_write_string(fd, buf);
979done:
980 free(buf);
981 fclose(fp);
982 return ret;
983}
984
985static int write_numa_topology(int fd, struct perf_header *h __used,
986 struct perf_evlist *evlist __used)
987{
988 char *buf = NULL;
989 size_t len = 0;
990 FILE *fp;
991 struct cpu_map *node_map = NULL;
992 char *c;
993 u32 nr, i, j;
994 int ret = -1;
995
996 fp = fopen("/sys/devices/system/node/online", "r");
997 if (!fp)
998 return -1;
999
1000 if (getline(&buf, &len, fp) <= 0)
1001 goto done;
1002
1003 c = strchr(buf, '\n');
1004 if (c)
1005 *c = '\0';
1006
1007 node_map = cpu_map__new(buf);
1008 if (!node_map)
1009 goto done;
1010
1011 nr = (u32)node_map->nr;
1012
1013 ret = do_write(fd, &nr, sizeof(nr));
1014 if (ret < 0)
1015 goto done;
1016
1017 for (i = 0; i < nr; i++) {
1018 j = (u32)node_map->map[i];
1019 ret = do_write(fd, &j, sizeof(j));
1020 if (ret < 0)
1021 break;
1022
1023 ret = write_topo_node(fd, i);
1024 if (ret < 0)
1025 break;
1026 }
1027done:
1028 free(buf);
1029 fclose(fp);
1030 free(node_map);
1031 return ret;
1032}
1033
50a9667c
RR
1034/*
1035 * File format:
1036 *
1037 * struct pmu_mappings {
1038 * u32 pmu_num;
1039 * struct pmu_map {
1040 * u32 type;
1041 * char name[];
1042 * }[pmu_num];
1043 * };
1044 */
1045
1046static int write_pmu_mappings(int fd, struct perf_header *h __used,
1047 struct perf_evlist *evlist __used)
1048{
1049 struct perf_pmu *pmu = NULL;
1050 off_t offset = lseek(fd, 0, SEEK_CUR);
1051 __u32 pmu_num = 0;
1052
1053 /* write real pmu_num later */
1054 do_write(fd, &pmu_num, sizeof(pmu_num));
1055
1056 while ((pmu = perf_pmu__scan(pmu))) {
1057 if (!pmu->name)
1058 continue;
1059 pmu_num++;
1060 do_write(fd, &pmu->type, sizeof(pmu->type));
1061 do_write_string(fd, pmu->name);
1062 }
1063
1064 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
1065 /* discard all */
1066 lseek(fd, offset, SEEK_SET);
1067 return -1;
1068 }
1069
1070 return 0;
1071}
1072
fbe96f29
SE
1073/*
1074 * default get_cpuid(): nothing gets recorded
1075 * actual implementation must be in arch/$(ARCH)/util/header.c
1076 */
1077int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
1078{
1079 return -1;
1080}
1081
1082static int write_cpuid(int fd, struct perf_header *h __used,
1083 struct perf_evlist *evlist __used)
1084{
1085 char buffer[64];
1086 int ret;
1087
1088 ret = get_cpuid(buffer, sizeof(buffer));
1089 if (!ret)
1090 goto write_it;
1091
1092 return -1;
1093write_it:
1094 return do_write_string(fd, buffer);
1095}
1096
330aa675
SE
1097static int write_branch_stack(int fd __used, struct perf_header *h __used,
1098 struct perf_evlist *evlist __used)
1099{
1100 return 0;
1101}
1102
fbe96f29
SE
1103static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
1104{
1105 char *str = do_read_string(fd, ph);
1106 fprintf(fp, "# hostname : %s\n", str);
1107 free(str);
1108}
1109
1110static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
1111{
1112 char *str = do_read_string(fd, ph);
1113 fprintf(fp, "# os release : %s\n", str);
1114 free(str);
1115}
1116
1117static void print_arch(struct perf_header *ph, int fd, FILE *fp)
1118{
1119 char *str = do_read_string(fd, ph);
1120 fprintf(fp, "# arch : %s\n", str);
1121 free(str);
1122}
1123
1124static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
1125{
1126 char *str = do_read_string(fd, ph);
1127 fprintf(fp, "# cpudesc : %s\n", str);
1128 free(str);
1129}
1130
1131static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
1132{
1133 ssize_t ret;
1134 u32 nr;
1135
1136 ret = read(fd, &nr, sizeof(nr));
1137 if (ret != (ssize_t)sizeof(nr))
1138 nr = -1; /* interpreted as error */
1139
1140 if (ph->needs_swap)
1141 nr = bswap_32(nr);
1142
1143 fprintf(fp, "# nrcpus online : %u\n", nr);
1144
1145 ret = read(fd, &nr, sizeof(nr));
1146 if (ret != (ssize_t)sizeof(nr))
1147 nr = -1; /* interpreted as error */
1148
1149 if (ph->needs_swap)
1150 nr = bswap_32(nr);
1151
1152 fprintf(fp, "# nrcpus avail : %u\n", nr);
1153}
1154
1155static void print_version(struct perf_header *ph, int fd, FILE *fp)
1156{
1157 char *str = do_read_string(fd, ph);
1158 fprintf(fp, "# perf version : %s\n", str);
1159 free(str);
1160}
1161
1162static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
1163{
1164 ssize_t ret;
1165 char *str;
1166 u32 nr, i;
1167
1168 ret = read(fd, &nr, sizeof(nr));
1169 if (ret != (ssize_t)sizeof(nr))
1170 return;
1171
1172 if (ph->needs_swap)
1173 nr = bswap_32(nr);
1174
1175 fprintf(fp, "# cmdline : ");
1176
1177 for (i = 0; i < nr; i++) {
1178 str = do_read_string(fd, ph);
1179 fprintf(fp, "%s ", str);
1180 free(str);
1181 }
1182 fputc('\n', fp);
1183}
1184
1185static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
1186{
1187 ssize_t ret;
1188 u32 nr, i;
1189 char *str;
1190
1191 ret = read(fd, &nr, sizeof(nr));
1192 if (ret != (ssize_t)sizeof(nr))
1193 return;
1194
1195 if (ph->needs_swap)
1196 nr = bswap_32(nr);
1197
1198 for (i = 0; i < nr; i++) {
1199 str = do_read_string(fd, ph);
1200 fprintf(fp, "# sibling cores : %s\n", str);
1201 free(str);
1202 }
1203
1204 ret = read(fd, &nr, sizeof(nr));
1205 if (ret != (ssize_t)sizeof(nr))
1206 return;
1207
1208 if (ph->needs_swap)
1209 nr = bswap_32(nr);
1210
1211 for (i = 0; i < nr; i++) {
1212 str = do_read_string(fd, ph);
1213 fprintf(fp, "# sibling threads : %s\n", str);
1214 free(str);
1215 }
1216}
1217
4e1b9c67 1218static void free_event_desc(struct perf_evsel *events)
fbe96f29 1219{
4e1b9c67
RR
1220 struct perf_evsel *evsel;
1221
1222 if (!events)
1223 return;
1224
1225 for (evsel = events; evsel->attr.size; evsel++) {
1226 if (evsel->name)
1227 free(evsel->name);
1228 if (evsel->id)
1229 free(evsel->id);
1230 }
1231
1232 free(events);
1233}
1234
1235static struct perf_evsel *
1236read_event_desc(struct perf_header *ph, int fd)
1237{
1238 struct perf_evsel *evsel, *events = NULL;
1239 u64 *id;
fbe96f29 1240 void *buf = NULL;
62db9068
SE
1241 u32 nre, sz, nr, i, j;
1242 ssize_t ret;
1243 size_t msz;
fbe96f29
SE
1244
1245 /* number of events */
1246 ret = read(fd, &nre, sizeof(nre));
1247 if (ret != (ssize_t)sizeof(nre))
1248 goto error;
1249
1250 if (ph->needs_swap)
1251 nre = bswap_32(nre);
1252
1253 ret = read(fd, &sz, sizeof(sz));
1254 if (ret != (ssize_t)sizeof(sz))
1255 goto error;
1256
1257 if (ph->needs_swap)
1258 sz = bswap_32(sz);
1259
62db9068 1260 /* buffer to hold on file attr struct */
fbe96f29
SE
1261 buf = malloc(sz);
1262 if (!buf)
1263 goto error;
1264
4e1b9c67
RR
1265 /* the last event terminates with evsel->attr.size == 0: */
1266 events = calloc(nre + 1, sizeof(*events));
1267 if (!events)
1268 goto error;
1269
1270 msz = sizeof(evsel->attr);
9fafd98f 1271 if (sz < msz)
fbe96f29
SE
1272 msz = sz;
1273
4e1b9c67
RR
1274 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1275 evsel->idx = i;
fbe96f29 1276
62db9068
SE
1277 /*
1278 * must read entire on-file attr struct to
1279 * sync up with layout.
1280 */
fbe96f29
SE
1281 ret = read(fd, buf, sz);
1282 if (ret != (ssize_t)sz)
1283 goto error;
1284
1285 if (ph->needs_swap)
1286 perf_event__attr_swap(buf);
1287
4e1b9c67 1288 memcpy(&evsel->attr, buf, msz);
fbe96f29
SE
1289
1290 ret = read(fd, &nr, sizeof(nr));
1291 if (ret != (ssize_t)sizeof(nr))
1292 goto error;
1293
1294 if (ph->needs_swap)
1295 nr = bswap_32(nr);
1296
4e1b9c67
RR
1297 evsel->name = do_read_string(fd, ph);
1298
1299 if (!nr)
1300 continue;
1301
1302 id = calloc(nr, sizeof(*id));
1303 if (!id)
1304 goto error;
1305 evsel->ids = nr;
1306 evsel->id = id;
1307
1308 for (j = 0 ; j < nr; j++) {
1309 ret = read(fd, id, sizeof(*id));
1310 if (ret != (ssize_t)sizeof(*id))
1311 goto error;
1312 if (ph->needs_swap)
1313 *id = bswap_64(*id);
1314 id++;
1315 }
1316 }
1317out:
1318 if (buf)
1319 free(buf);
1320 return events;
1321error:
1322 if (events)
1323 free_event_desc(events);
1324 events = NULL;
1325 goto out;
1326}
1327
1328static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1329{
1330 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1331 u32 j;
1332 u64 *id;
1333
1334 if (!events) {
1335 fprintf(fp, "# event desc: not available or unable to read\n");
1336 return;
1337 }
1338
1339 for (evsel = events; evsel->attr.size; evsel++) {
1340 fprintf(fp, "# event : name = %s, ", evsel->name);
fbe96f29
SE
1341
1342 fprintf(fp, "type = %d, config = 0x%"PRIx64
1343 ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
4e1b9c67
RR
1344 evsel->attr.type,
1345 (u64)evsel->attr.config,
1346 (u64)evsel->attr.config1,
1347 (u64)evsel->attr.config2);
fbe96f29
SE
1348
1349 fprintf(fp, ", excl_usr = %d, excl_kern = %d",
4e1b9c67
RR
1350 evsel->attr.exclude_user,
1351 evsel->attr.exclude_kernel);
fbe96f29 1352
78b961ff 1353 fprintf(fp, ", excl_host = %d, excl_guest = %d",
4e1b9c67
RR
1354 evsel->attr.exclude_host,
1355 evsel->attr.exclude_guest);
78b961ff 1356
4e1b9c67 1357 fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
78b961ff 1358
4e1b9c67 1359 if (evsel->ids) {
fbe96f29 1360 fprintf(fp, ", id = {");
4e1b9c67
RR
1361 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1362 if (j)
1363 fputc(',', fp);
1364 fprintf(fp, " %"PRIu64, *id);
1365 }
fbe96f29 1366 fprintf(fp, " }");
4e1b9c67
RR
1367 }
1368
fbe96f29
SE
1369 fputc('\n', fp);
1370 }
4e1b9c67
RR
1371
1372 free_event_desc(events);
fbe96f29
SE
1373}
1374
1375static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
1376{
1377 uint64_t mem;
1378 ssize_t ret;
1379
1380 ret = read(fd, &mem, sizeof(mem));
1381 if (ret != sizeof(mem))
1382 goto error;
1383
1384 if (h->needs_swap)
1385 mem = bswap_64(mem);
1386
1387 fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
1388 return;
1389error:
1390 fprintf(fp, "# total memory : unknown\n");
1391}
1392
1393static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
1394{
1395 ssize_t ret;
1396 u32 nr, c, i;
1397 char *str;
1398 uint64_t mem_total, mem_free;
1399
1400 /* nr nodes */
1401 ret = read(fd, &nr, sizeof(nr));
1402 if (ret != (ssize_t)sizeof(nr))
1403 goto error;
1404
1405 if (h->needs_swap)
1406 nr = bswap_32(nr);
1407
1408 for (i = 0; i < nr; i++) {
1409
1410 /* node number */
1411 ret = read(fd, &c, sizeof(c));
1412 if (ret != (ssize_t)sizeof(c))
1413 goto error;
1414
1415 if (h->needs_swap)
1416 c = bswap_32(c);
1417
1418 ret = read(fd, &mem_total, sizeof(u64));
1419 if (ret != sizeof(u64))
1420 goto error;
1421
1422 ret = read(fd, &mem_free, sizeof(u64));
1423 if (ret != sizeof(u64))
1424 goto error;
1425
1426 if (h->needs_swap) {
1427 mem_total = bswap_64(mem_total);
1428 mem_free = bswap_64(mem_free);
1429 }
1430
1431 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1432 " free = %"PRIu64" kB\n",
1433 c,
1434 mem_total,
1435 mem_free);
1436
1437 str = do_read_string(fd, h);
1438 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1439 free(str);
1440 }
1441 return;
1442error:
1443 fprintf(fp, "# numa topology : not available\n");
1444}
1445
1446static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
1447{
1448 char *str = do_read_string(fd, ph);
1449 fprintf(fp, "# cpuid : %s\n", str);
1450 free(str);
1451}
1452
330aa675
SE
1453static void print_branch_stack(struct perf_header *ph __used, int fd __used,
1454 FILE *fp)
1455{
1456 fprintf(fp, "# contains samples with branch stack\n");
1457}
1458
50a9667c
RR
1459static void print_pmu_mappings(struct perf_header *ph, int fd, FILE *fp)
1460{
1461 const char *delimiter = "# pmu mappings: ";
1462 char *name;
1463 int ret;
1464 u32 pmu_num;
1465 u32 type;
1466
1467 ret = read(fd, &pmu_num, sizeof(pmu_num));
1468 if (ret != sizeof(pmu_num))
1469 goto error;
1470
be4a2ded
NK
1471 if (ph->needs_swap)
1472 pmu_num = bswap_32(pmu_num);
1473
50a9667c
RR
1474 if (!pmu_num) {
1475 fprintf(fp, "# pmu mappings: not available\n");
1476 return;
1477 }
1478
1479 while (pmu_num) {
1480 if (read(fd, &type, sizeof(type)) != sizeof(type))
1481 break;
be4a2ded
NK
1482 if (ph->needs_swap)
1483 type = bswap_32(type);
1484
50a9667c
RR
1485 name = do_read_string(fd, ph);
1486 if (!name)
1487 break;
1488 pmu_num--;
1489 fprintf(fp, "%s%s = %" PRIu32, delimiter, name, type);
1490 free(name);
1491 delimiter = ", ";
1492 }
1493
1494 fprintf(fp, "\n");
1495
1496 if (!pmu_num)
1497 return;
1498error:
1499 fprintf(fp, "# pmu mappings: unable to read\n");
1500}
1501
08d95bd2
RR
1502static int __event_process_build_id(struct build_id_event *bev,
1503 char *filename,
1504 struct perf_session *session)
1505{
1506 int err = -1;
1507 struct list_head *head;
1508 struct machine *machine;
1509 u16 misc;
1510 struct dso *dso;
1511 enum dso_kernel_type dso_type;
1512
1513 machine = perf_session__findnew_machine(session, bev->pid);
1514 if (!machine)
1515 goto out;
1516
1517 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1518
1519 switch (misc) {
1520 case PERF_RECORD_MISC_KERNEL:
1521 dso_type = DSO_TYPE_KERNEL;
1522 head = &machine->kernel_dsos;
1523 break;
1524 case PERF_RECORD_MISC_GUEST_KERNEL:
1525 dso_type = DSO_TYPE_GUEST_KERNEL;
1526 head = &machine->kernel_dsos;
1527 break;
1528 case PERF_RECORD_MISC_USER:
1529 case PERF_RECORD_MISC_GUEST_USER:
1530 dso_type = DSO_TYPE_USER;
1531 head = &machine->user_dsos;
1532 break;
1533 default:
1534 goto out;
1535 }
1536
1537 dso = __dsos__findnew(head, filename);
1538 if (dso != NULL) {
1539 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1540
1541 dso__set_build_id(dso, &bev->build_id);
1542
1543 if (filename[0] == '[')
1544 dso->kernel = dso_type;
1545
1546 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1547 sbuild_id);
1548 pr_debug("build id event received for %s: %s\n",
1549 dso->long_name, sbuild_id);
1550 }
1551
1552 err = 0;
1553out:
1554 return err;
1555}
1556
1557static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1558 int input, u64 offset, u64 size)
1559{
1560 struct perf_session *session = container_of(header, struct perf_session, header);
1561 struct {
1562 struct perf_event_header header;
9ac3e487 1563 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
08d95bd2
RR
1564 char filename[0];
1565 } old_bev;
1566 struct build_id_event bev;
1567 char filename[PATH_MAX];
1568 u64 limit = offset + size;
1569
1570 while (offset < limit) {
1571 ssize_t len;
1572
1573 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1574 return -1;
1575
1576 if (header->needs_swap)
1577 perf_event_header__bswap(&old_bev.header);
1578
1579 len = old_bev.header.size - sizeof(old_bev);
1580 if (read(input, filename, len) != len)
1581 return -1;
1582
1583 bev.header = old_bev.header;
1584
1585 /*
1586 * As the pid is the missing value, we need to fill
1587 * it properly. The header.misc value give us nice hint.
1588 */
1589 bev.pid = HOST_KERNEL_ID;
1590 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1591 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1592 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1593
1594 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1595 __event_process_build_id(&bev, filename, session);
1596
1597 offset += bev.header.size;
1598 }
1599
1600 return 0;
1601}
1602
1603static int perf_header__read_build_ids(struct perf_header *header,
1604 int input, u64 offset, u64 size)
1605{
1606 struct perf_session *session = container_of(header, struct perf_session, header);
1607 struct build_id_event bev;
1608 char filename[PATH_MAX];
1609 u64 limit = offset + size, orig_offset = offset;
1610 int err = -1;
1611
1612 while (offset < limit) {
1613 ssize_t len;
1614
1615 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
1616 goto out;
1617
1618 if (header->needs_swap)
1619 perf_event_header__bswap(&bev.header);
1620
1621 len = bev.header.size - sizeof(bev);
1622 if (read(input, filename, len) != len)
1623 goto out;
1624 /*
1625 * The a1645ce1 changeset:
1626 *
1627 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1628 *
1629 * Added a field to struct build_id_event that broke the file
1630 * format.
1631 *
1632 * Since the kernel build-id is the first entry, process the
1633 * table using the old format if the well known
1634 * '[kernel.kallsyms]' string for the kernel build-id has the
1635 * first 4 characters chopped off (where the pid_t sits).
1636 */
1637 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1638 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1639 return -1;
1640 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1641 }
1642
1643 __event_process_build_id(&bev, filename, session);
1644
1645 offset += bev.header.size;
1646 }
1647 err = 0;
1648out:
1649 return err;
1650}
1651
2eeaaa09 1652static int process_tracing_data(struct perf_file_section *section __unused,
f1c67db7 1653 struct perf_header *ph __unused,
da378962 1654 int feat __unused, int fd, void *data)
f1c67db7 1655{
da378962 1656 trace_report(fd, data, false);
f1c67db7
RR
1657 return 0;
1658}
1659
1660static int process_build_id(struct perf_file_section *section,
1661 struct perf_header *ph,
da378962 1662 int feat __unused, int fd, void *data __used)
f1c67db7
RR
1663{
1664 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1665 pr_debug("Failed to read buildids, continuing...\n");
1666 return 0;
1667}
1668
7c2f7afd
RR
1669static struct perf_evsel *
1670perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1671{
1672 struct perf_evsel *evsel;
1673
1674 list_for_each_entry(evsel, &evlist->entries, node) {
1675 if (evsel->idx == idx)
1676 return evsel;
1677 }
1678
1679 return NULL;
1680}
1681
1682static void
1683perf_evlist__set_event_name(struct perf_evlist *evlist, struct perf_evsel *event)
1684{
1685 struct perf_evsel *evsel;
1686
1687 if (!event->name)
1688 return;
1689
1690 evsel = perf_evlist__find_by_index(evlist, event->idx);
1691 if (!evsel)
1692 return;
1693
1694 if (evsel->name)
1695 return;
1696
1697 evsel->name = strdup(event->name);
1698}
1699
1700static int
1701process_event_desc(struct perf_file_section *section __unused,
1702 struct perf_header *header, int feat __unused, int fd,
1703 void *data __used)
1704{
1705 struct perf_session *session = container_of(header, struct perf_session, header);
1706 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1707
1708 if (!events)
1709 return 0;
1710
1711 for (evsel = events; evsel->attr.size; evsel++)
1712 perf_evlist__set_event_name(session->evlist, evsel);
1713
1714 free_event_desc(events);
1715
1716 return 0;
1717}
1718
fbe96f29
SE
1719struct feature_ops {
1720 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1721 void (*print)(struct perf_header *h, int fd, FILE *fp);
f1c67db7 1722 int (*process)(struct perf_file_section *section,
da378962 1723 struct perf_header *h, int feat, int fd, void *data);
fbe96f29
SE
1724 const char *name;
1725 bool full_only;
1726};
1727
8cdfa78a
RR
1728#define FEAT_OPA(n, func) \
1729 [n] = { .name = #n, .write = write_##func, .print = print_##func }
f1c67db7
RR
1730#define FEAT_OPP(n, func) \
1731 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1732 .process = process_##func }
8cdfa78a 1733#define FEAT_OPF(n, func) \
f1c67db7
RR
1734 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1735 .full_only = true }
8cdfa78a
RR
1736
1737/* feature_ops not implemented: */
2eeaaa09
SE
1738#define print_tracing_data NULL
1739#define print_build_id NULL
fbe96f29
SE
1740
1741static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2eeaaa09 1742 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
f1c67db7 1743 FEAT_OPP(HEADER_BUILD_ID, build_id),
8cdfa78a
RR
1744 FEAT_OPA(HEADER_HOSTNAME, hostname),
1745 FEAT_OPA(HEADER_OSRELEASE, osrelease),
1746 FEAT_OPA(HEADER_VERSION, version),
1747 FEAT_OPA(HEADER_ARCH, arch),
1748 FEAT_OPA(HEADER_NRCPUS, nrcpus),
1749 FEAT_OPA(HEADER_CPUDESC, cpudesc),
1750 FEAT_OPA(HEADER_CPUID, cpuid),
1751 FEAT_OPA(HEADER_TOTAL_MEM, total_mem),
7c2f7afd 1752 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
8cdfa78a
RR
1753 FEAT_OPA(HEADER_CMDLINE, cmdline),
1754 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
1755 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
330aa675 1756 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
50a9667c 1757 FEAT_OPA(HEADER_PMU_MAPPINGS, pmu_mappings),
fbe96f29
SE
1758};
1759
1760struct header_print_data {
1761 FILE *fp;
1762 bool full; /* extended list of headers */
1763};
1764
1765static int perf_file_section__fprintf_info(struct perf_file_section *section,
1766 struct perf_header *ph,
1767 int feat, int fd, void *data)
1768{
1769 struct header_print_data *hd = data;
1770
1771 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1772 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1773 "%d, continuing...\n", section->offset, feat);
1774 return 0;
1775 }
b1e5a9be 1776 if (feat >= HEADER_LAST_FEATURE) {
fbe96f29 1777 pr_warning("unknown feature %d\n", feat);
f7a8a133 1778 return 0;
fbe96f29
SE
1779 }
1780 if (!feat_ops[feat].print)
1781 return 0;
1782
1783 if (!feat_ops[feat].full_only || hd->full)
1784 feat_ops[feat].print(ph, fd, hd->fp);
1785 else
1786 fprintf(hd->fp, "# %s info available, use -I to display\n",
1787 feat_ops[feat].name);
1788
1789 return 0;
1790}
1791
1792int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
1793{
1794 struct header_print_data hd;
1795 struct perf_header *header = &session->header;
1796 int fd = session->fd;
1797 hd.fp = fp;
1798 hd.full = full;
1799
1800 perf_header__process_sections(header, fd, &hd,
1801 perf_file_section__fprintf_info);
1802 return 0;
1803}
1804
fbe96f29
SE
1805static int do_write_feat(int fd, struct perf_header *h, int type,
1806 struct perf_file_section **p,
1807 struct perf_evlist *evlist)
1808{
1809 int err;
1810 int ret = 0;
1811
1812 if (perf_header__has_feat(h, type)) {
b1e5a9be
RR
1813 if (!feat_ops[type].write)
1814 return -1;
fbe96f29
SE
1815
1816 (*p)->offset = lseek(fd, 0, SEEK_CUR);
1817
1818 err = feat_ops[type].write(fd, h, evlist);
1819 if (err < 0) {
1820 pr_debug("failed to write feature %d\n", type);
1821
1822 /* undo anything written */
1823 lseek(fd, (*p)->offset, SEEK_SET);
1824
1825 return -1;
1826 }
1827 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
1828 (*p)++;
1829 }
1830 return ret;
1831}
1832
1c0b04d1 1833static int perf_header__adds_write(struct perf_header *header,
361c99a6 1834 struct perf_evlist *evlist, int fd)
2ba08250 1835{
9e827dd0 1836 int nr_sections;
fbe96f29 1837 struct perf_file_section *feat_sec, *p;
9e827dd0
FW
1838 int sec_size;
1839 u64 sec_start;
b1e5a9be 1840 int feat;
fbe96f29 1841 int err;
9e827dd0 1842
1c0b04d1 1843 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
9e827dd0 1844 if (!nr_sections)
d5eed904 1845 return 0;
9e827dd0 1846
fbe96f29 1847 feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
d5eed904
ACM
1848 if (feat_sec == NULL)
1849 return -ENOMEM;
9e827dd0
FW
1850
1851 sec_size = sizeof(*feat_sec) * nr_sections;
1852
1c0b04d1 1853 sec_start = header->data_offset + header->data_size;
f887f301 1854 lseek(fd, sec_start + sec_size, SEEK_SET);
2ba08250 1855
b1e5a9be
RR
1856 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
1857 if (do_write_feat(fd, header, feat, &p, evlist))
1858 perf_header__clear_feat(header, feat);
1859 }
9e827dd0 1860
f887f301 1861 lseek(fd, sec_start, SEEK_SET);
fbe96f29
SE
1862 /*
1863 * may write more than needed due to dropped feature, but
1864 * this is okay, reader will skip the mising entries
1865 */
d5eed904
ACM
1866 err = do_write(fd, feat_sec, sec_size);
1867 if (err < 0)
1868 pr_debug("failed to write feature section\n");
9e827dd0 1869 free(feat_sec);
d5eed904 1870 return err;
9e827dd0 1871}
2ba08250 1872
8dc58101
TZ
1873int perf_header__write_pipe(int fd)
1874{
1875 struct perf_pipe_file_header f_header;
1876 int err;
1877
1878 f_header = (struct perf_pipe_file_header){
1879 .magic = PERF_MAGIC,
1880 .size = sizeof(f_header),
1881 };
1882
1883 err = do_write(fd, &f_header, sizeof(f_header));
1884 if (err < 0) {
1885 pr_debug("failed to write perf pipe header\n");
1886 return err;
1887 }
1888
1889 return 0;
1890}
1891
a91e5431
ACM
1892int perf_session__write_header(struct perf_session *session,
1893 struct perf_evlist *evlist,
1894 int fd, bool at_exit)
7c6a1c65
PZ
1895{
1896 struct perf_file_header f_header;
1897 struct perf_file_attr f_attr;
1c0b04d1 1898 struct perf_header *header = &session->header;
6606f873 1899 struct perf_evsel *evsel, *pair = NULL;
a91e5431 1900 int err;
7c6a1c65
PZ
1901
1902 lseek(fd, sizeof(f_header), SEEK_SET);
1903
a91e5431 1904 if (session->evlist != evlist)
0c21f736 1905 pair = perf_evlist__first(session->evlist);
7c6a1c65 1906
6606f873
RR
1907 list_for_each_entry(evsel, &evlist->entries, node) {
1908 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
1909 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
d5eed904 1910 if (err < 0) {
a91e5431 1911out_err_write:
d5eed904
ACM
1912 pr_debug("failed to write perf header\n");
1913 return err;
1914 }
a91e5431
ACM
1915 if (session->evlist != evlist) {
1916 err = do_write(fd, pair->id, pair->ids * sizeof(u64));
1917 if (err < 0)
1918 goto out_err_write;
6606f873 1919 evsel->ids += pair->ids;
0c21f736 1920 pair = perf_evsel__next(pair);
a91e5431 1921 }
7c6a1c65
PZ
1922 }
1923
1c0b04d1 1924 header->attr_offset = lseek(fd, 0, SEEK_CUR);
7c6a1c65 1925
6606f873 1926 list_for_each_entry(evsel, &evlist->entries, node) {
7c6a1c65 1927 f_attr = (struct perf_file_attr){
6606f873 1928 .attr = evsel->attr,
7c6a1c65 1929 .ids = {
6606f873
RR
1930 .offset = evsel->id_offset,
1931 .size = evsel->ids * sizeof(u64),
7c6a1c65
PZ
1932 }
1933 };
d5eed904
ACM
1934 err = do_write(fd, &f_attr, sizeof(f_attr));
1935 if (err < 0) {
1936 pr_debug("failed to write perf header attribute\n");
1937 return err;
1938 }
7c6a1c65
PZ
1939 }
1940
1c0b04d1 1941 header->event_offset = lseek(fd, 0, SEEK_CUR);
db146f06
RR
1942 header->event_size = trace_event_count * sizeof(struct perf_trace_event_type);
1943 if (trace_events) {
1944 err = do_write(fd, trace_events, header->event_size);
d5eed904
ACM
1945 if (err < 0) {
1946 pr_debug("failed to write perf header events\n");
1947 return err;
1948 }
1949 }
8755a8f2 1950
1c0b04d1 1951 header->data_offset = lseek(fd, 0, SEEK_CUR);
7c6a1c65 1952
d5eed904 1953 if (at_exit) {
1c0b04d1 1954 err = perf_header__adds_write(header, evlist, fd);
d5eed904
ACM
1955 if (err < 0)
1956 return err;
1957 }
9e827dd0 1958
7c6a1c65
PZ
1959 f_header = (struct perf_file_header){
1960 .magic = PERF_MAGIC,
1961 .size = sizeof(f_header),
1962 .attr_size = sizeof(f_attr),
1963 .attrs = {
1c0b04d1 1964 .offset = header->attr_offset,
a91e5431 1965 .size = evlist->nr_entries * sizeof(f_attr),
7c6a1c65
PZ
1966 },
1967 .data = {
1c0b04d1
ACM
1968 .offset = header->data_offset,
1969 .size = header->data_size,
7c6a1c65 1970 },
8755a8f2 1971 .event_types = {
1c0b04d1
ACM
1972 .offset = header->event_offset,
1973 .size = header->event_size,
8755a8f2 1974 },
7c6a1c65
PZ
1975 };
1976
1c0b04d1 1977 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2ba08250 1978
7c6a1c65 1979 lseek(fd, 0, SEEK_SET);
d5eed904
ACM
1980 err = do_write(fd, &f_header, sizeof(f_header));
1981 if (err < 0) {
1982 pr_debug("failed to write perf header\n");
1983 return err;
1984 }
1c0b04d1 1985 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
7c6a1c65 1986
1c0b04d1 1987 header->frozen = 1;
d5eed904 1988 return 0;
7c6a1c65
PZ
1989}
1990
1c0b04d1 1991static int perf_header__getbuffer64(struct perf_header *header,
ba21594c
ACM
1992 int fd, void *buf, size_t size)
1993{
1e7972cc 1994 if (readn(fd, buf, size) <= 0)
ba21594c
ACM
1995 return -1;
1996
1c0b04d1 1997 if (header->needs_swap)
ba21594c
ACM
1998 mem_bswap_64(buf, size);
1999
2000 return 0;
2001}
2002
1c0b04d1 2003int perf_header__process_sections(struct perf_header *header, int fd,
fbe96f29 2004 void *data,
1c0b04d1 2005 int (*process)(struct perf_file_section *section,
b1e5a9be
RR
2006 struct perf_header *ph,
2007 int feat, int fd, void *data))
2ba08250 2008{
b1e5a9be 2009 struct perf_file_section *feat_sec, *sec;
9e827dd0
FW
2010 int nr_sections;
2011 int sec_size;
b1e5a9be
RR
2012 int feat;
2013 int err;
9e827dd0 2014
1c0b04d1 2015 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
9e827dd0 2016 if (!nr_sections)
37562eac 2017 return 0;
9e827dd0 2018
b1e5a9be 2019 feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
9e827dd0 2020 if (!feat_sec)
37562eac 2021 return -1;
9e827dd0
FW
2022
2023 sec_size = sizeof(*feat_sec) * nr_sections;
2024
1c0b04d1 2025 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
9e827dd0 2026
b1e5a9be
RR
2027 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2028 if (err < 0)
769885f3 2029 goto out_free;
9e827dd0 2030
b1e5a9be
RR
2031 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2032 err = process(sec++, header, feat, fd, data);
2033 if (err < 0)
2034 goto out_free;
2ba08250 2035 }
b1e5a9be 2036 err = 0;
769885f3 2037out_free:
37562eac
ACM
2038 free(feat_sec);
2039 return err;
769885f3 2040}
4778d2e4 2041
114382a0
SE
2042static const int attr_file_abi_sizes[] = {
2043 [0] = PERF_ATTR_SIZE_VER0,
2044 [1] = PERF_ATTR_SIZE_VER1,
239cc478 2045 [2] = PERF_ATTR_SIZE_VER2,
0f6a3015 2046 [3] = PERF_ATTR_SIZE_VER3,
114382a0
SE
2047 0,
2048};
2049
2050/*
2051 * In the legacy file format, the magic number is not used to encode endianness.
2052 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2053 * on ABI revisions, we need to try all combinations for all endianness to
2054 * detect the endianness.
2055 */
2056static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
73323f54 2057{
114382a0
SE
2058 uint64_t ref_size, attr_size;
2059 int i;
73323f54 2060
114382a0
SE
2061 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2062 ref_size = attr_file_abi_sizes[i]
2063 + sizeof(struct perf_file_section);
2064 if (hdr_sz != ref_size) {
2065 attr_size = bswap_64(hdr_sz);
2066 if (attr_size != ref_size)
2067 continue;
73323f54 2068
114382a0
SE
2069 ph->needs_swap = true;
2070 }
2071 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2072 i,
2073 ph->needs_swap);
2074 return 0;
2075 }
2076 /* could not determine endianness */
2077 return -1;
2078}
73323f54 2079
114382a0
SE
2080#define PERF_PIPE_HDR_VER0 16
2081
2082static const size_t attr_pipe_abi_sizes[] = {
2083 [0] = PERF_PIPE_HDR_VER0,
2084 0,
2085};
2086
2087/*
2088 * In the legacy pipe format, there is an implicit assumption that endiannesss
2089 * between host recording the samples, and host parsing the samples is the
2090 * same. This is not always the case given that the pipe output may always be
2091 * redirected into a file and analyzed on a different machine with possibly a
2092 * different endianness and perf_event ABI revsions in the perf tool itself.
2093 */
2094static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2095{
2096 u64 attr_size;
2097 int i;
2098
2099 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2100 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2101 attr_size = bswap_64(hdr_sz);
2102 if (attr_size != hdr_sz)
2103 continue;
73323f54
SE
2104
2105 ph->needs_swap = true;
2106 }
114382a0 2107 pr_debug("Pipe ABI%d perf.data file detected\n", i);
73323f54
SE
2108 return 0;
2109 }
114382a0
SE
2110 return -1;
2111}
2112
2113static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2114 bool is_pipe, struct perf_header *ph)
2115{
2116 int ret;
2117
2118 /* check for legacy format */
2119 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2120 if (ret == 0) {
2121 pr_debug("legacy perf.data format\n");
2122 if (is_pipe)
2123 return try_all_pipe_abis(hdr_sz, ph);
2124
2125 return try_all_file_abis(hdr_sz, ph);
2126 }
2127 /*
2128 * the new magic number serves two purposes:
2129 * - unique number to identify actual perf.data files
2130 * - encode endianness of file
2131 */
73323f54 2132
114382a0
SE
2133 /* check magic number with one endianness */
2134 if (magic == __perf_magic2)
73323f54
SE
2135 return 0;
2136
114382a0
SE
2137 /* check magic number with opposite endianness */
2138 if (magic != __perf_magic2_sw)
73323f54
SE
2139 return -1;
2140
2141 ph->needs_swap = true;
2142
2143 return 0;
2144}
2145
1c0b04d1 2146int perf_file_header__read(struct perf_file_header *header,
37562eac
ACM
2147 struct perf_header *ph, int fd)
2148{
73323f54
SE
2149 int ret;
2150
37562eac 2151 lseek(fd, 0, SEEK_SET);
37562eac 2152
73323f54
SE
2153 ret = readn(fd, header, sizeof(*header));
2154 if (ret <= 0)
37562eac
ACM
2155 return -1;
2156
114382a0
SE
2157 if (check_magic_endian(header->magic,
2158 header->attr_size, false, ph) < 0) {
2159 pr_debug("magic/endian check failed\n");
73323f54 2160 return -1;
114382a0 2161 }
ba21594c 2162
73323f54 2163 if (ph->needs_swap) {
1c0b04d1 2164 mem_bswap_64(header, offsetof(struct perf_file_header,
73323f54 2165 adds_features));
ba21594c
ACM
2166 }
2167
1c0b04d1 2168 if (header->size != sizeof(*header)) {
37562eac 2169 /* Support the previous format */
1c0b04d1
ACM
2170 if (header->size == offsetof(typeof(*header), adds_features))
2171 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
37562eac
ACM
2172 else
2173 return -1;
d327fa43 2174 } else if (ph->needs_swap) {
d327fa43
DA
2175 /*
2176 * feature bitmap is declared as an array of unsigned longs --
2177 * not good since its size can differ between the host that
2178 * generated the data file and the host analyzing the file.
2179 *
2180 * We need to handle endianness, but we don't know the size of
2181 * the unsigned long where the file was generated. Take a best
2182 * guess at determining it: try 64-bit swap first (ie., file
2183 * created on a 64-bit host), and check if the hostname feature
2184 * bit is set (this feature bit is forced on as of fbe96f2).
2185 * If the bit is not, undo the 64-bit swap and try a 32-bit
2186 * swap. If the hostname bit is still not set (e.g., older data
2187 * file), punt and fallback to the original behavior --
2188 * clearing all feature bits and setting buildid.
2189 */
80c0120a
DA
2190 mem_bswap_64(&header->adds_features,
2191 BITS_TO_U64(HEADER_FEAT_BITS));
d327fa43
DA
2192
2193 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
80c0120a
DA
2194 /* unswap as u64 */
2195 mem_bswap_64(&header->adds_features,
2196 BITS_TO_U64(HEADER_FEAT_BITS));
2197
2198 /* unswap as u32 */
2199 mem_bswap_32(&header->adds_features,
2200 BITS_TO_U32(HEADER_FEAT_BITS));
d327fa43
DA
2201 }
2202
2203 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2204 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2205 set_bit(HEADER_BUILD_ID, header->adds_features);
2206 }
4778d2e4 2207 }
9e827dd0 2208
1c0b04d1 2209 memcpy(&ph->adds_features, &header->adds_features,
ba21594c 2210 sizeof(ph->adds_features));
37562eac 2211
1c0b04d1
ACM
2212 ph->event_offset = header->event_types.offset;
2213 ph->event_size = header->event_types.size;
2214 ph->data_offset = header->data.offset;
2215 ph->data_size = header->data.size;
37562eac
ACM
2216 return 0;
2217}
2218
1c0b04d1 2219static int perf_file_section__process(struct perf_file_section *section,
ba21594c 2220 struct perf_header *ph,
da378962 2221 int feat, int fd, void *data)
37562eac 2222{
1c0b04d1 2223 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
9486aa38 2224 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1c0b04d1 2225 "%d, continuing...\n", section->offset, feat);
37562eac
ACM
2226 return 0;
2227 }
2228
b1e5a9be
RR
2229 if (feat >= HEADER_LAST_FEATURE) {
2230 pr_debug("unknown feature %d, continuing...\n", feat);
2231 return 0;
2232 }
2233
f1c67db7
RR
2234 if (!feat_ops[feat].process)
2235 return 0;
37562eac 2236
da378962 2237 return feat_ops[feat].process(section, ph, feat, fd, data);
37562eac 2238}
2ba08250 2239
1c0b04d1 2240static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
454c407e
TZ
2241 struct perf_header *ph, int fd,
2242 bool repipe)
7c6a1c65 2243{
73323f54
SE
2244 int ret;
2245
2246 ret = readn(fd, header, sizeof(*header));
2247 if (ret <= 0)
2248 return -1;
2249
114382a0
SE
2250 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2251 pr_debug("endian/magic failed\n");
8dc58101 2252 return -1;
114382a0
SE
2253 }
2254
2255 if (ph->needs_swap)
2256 header->size = bswap_64(header->size);
8dc58101 2257
1c0b04d1 2258 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
454c407e
TZ
2259 return -1;
2260
8dc58101
TZ
2261 return 0;
2262}
2263
2264static int perf_header__read_pipe(struct perf_session *session, int fd)
2265{
1c0b04d1 2266 struct perf_header *header = &session->header;
8dc58101
TZ
2267 struct perf_pipe_file_header f_header;
2268
1c0b04d1 2269 if (perf_file_header__read_pipe(&f_header, header, fd,
454c407e 2270 session->repipe) < 0) {
8dc58101
TZ
2271 pr_debug("incompatible file format\n");
2272 return -EINVAL;
2273 }
2274
2275 session->fd = fd;
2276
2277 return 0;
2278}
2279
69996df4
SE
2280static int read_attr(int fd, struct perf_header *ph,
2281 struct perf_file_attr *f_attr)
2282{
2283 struct perf_event_attr *attr = &f_attr->attr;
2284 size_t sz, left;
2285 size_t our_sz = sizeof(f_attr->attr);
2286 int ret;
2287
2288 memset(f_attr, 0, sizeof(*f_attr));
2289
2290 /* read minimal guaranteed structure */
2291 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2292 if (ret <= 0) {
2293 pr_debug("cannot read %d bytes of header attr\n",
2294 PERF_ATTR_SIZE_VER0);
2295 return -1;
2296 }
2297
2298 /* on file perf_event_attr size */
2299 sz = attr->size;
114382a0 2300
69996df4
SE
2301 if (ph->needs_swap)
2302 sz = bswap_32(sz);
2303
2304 if (sz == 0) {
2305 /* assume ABI0 */
2306 sz = PERF_ATTR_SIZE_VER0;
2307 } else if (sz > our_sz) {
2308 pr_debug("file uses a more recent and unsupported ABI"
2309 " (%zu bytes extra)\n", sz - our_sz);
2310 return -1;
2311 }
2312 /* what we have not yet read and that we know about */
2313 left = sz - PERF_ATTR_SIZE_VER0;
2314 if (left) {
2315 void *ptr = attr;
2316 ptr += PERF_ATTR_SIZE_VER0;
2317
2318 ret = readn(fd, ptr, left);
2319 }
2320 /* read perf_file_section, ids are read in caller */
2321 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2322
2323 return ret <= 0 ? -1 : 0;
2324}
2325
831394bd
NK
2326static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2327 struct pevent *pevent)
cb9dd49e 2328{
831394bd 2329 struct event_format *event;
cb9dd49e
ACM
2330 char bf[128];
2331
831394bd
NK
2332 /* already prepared */
2333 if (evsel->tp_format)
2334 return 0;
2335
2336 event = pevent_find_event(pevent, evsel->attr.config);
cb9dd49e
ACM
2337 if (event == NULL)
2338 return -1;
2339
831394bd
NK
2340 if (!evsel->name) {
2341 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2342 evsel->name = strdup(bf);
2343 if (evsel->name == NULL)
2344 return -1;
2345 }
cb9dd49e 2346
fcf65bf1 2347 evsel->tp_format = event;
cb9dd49e
ACM
2348 return 0;
2349}
2350
831394bd
NK
2351static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2352 struct pevent *pevent)
cb9dd49e
ACM
2353{
2354 struct perf_evsel *pos;
2355
2356 list_for_each_entry(pos, &evlist->entries, node) {
831394bd
NK
2357 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2358 perf_evsel__prepare_tracepoint_event(pos, pevent))
cb9dd49e
ACM
2359 return -1;
2360 }
2361
2362 return 0;
2363}
2364
a91e5431 2365int perf_session__read_header(struct perf_session *session, int fd)
8dc58101 2366{
1c0b04d1 2367 struct perf_header *header = &session->header;
ba21594c 2368 struct perf_file_header f_header;
7c6a1c65
PZ
2369 struct perf_file_attr f_attr;
2370 u64 f_id;
7c6a1c65
PZ
2371 int nr_attrs, nr_ids, i, j;
2372
a91e5431
ACM
2373 session->evlist = perf_evlist__new(NULL, NULL);
2374 if (session->evlist == NULL)
2375 return -ENOMEM;
2376
8dc58101
TZ
2377 if (session->fd_pipe)
2378 return perf_header__read_pipe(session, fd);
2379
69996df4 2380 if (perf_file_header__read(&f_header, header, fd) < 0)
4dc0a04b 2381 return -EINVAL;
7c6a1c65 2382
69996df4 2383 nr_attrs = f_header.attrs.size / f_header.attr_size;
7c6a1c65
PZ
2384 lseek(fd, f_header.attrs.offset, SEEK_SET);
2385
2386 for (i = 0; i < nr_attrs; i++) {
a91e5431 2387 struct perf_evsel *evsel;
1c222bce 2388 off_t tmp;
7c6a1c65 2389
69996df4 2390 if (read_attr(fd, header, &f_attr) < 0)
769885f3 2391 goto out_errno;
ba21594c 2392
eda3913b
DA
2393 if (header->needs_swap)
2394 perf_event__attr_swap(&f_attr.attr);
2395
1c222bce 2396 tmp = lseek(fd, 0, SEEK_CUR);
a91e5431 2397 evsel = perf_evsel__new(&f_attr.attr, i);
7c6a1c65 2398
a91e5431
ACM
2399 if (evsel == NULL)
2400 goto out_delete_evlist;
2401 /*
2402 * Do it before so that if perf_evsel__alloc_id fails, this
2403 * entry gets purged too at perf_evlist__delete().
2404 */
2405 perf_evlist__add(session->evlist, evsel);
7c6a1c65
PZ
2406
2407 nr_ids = f_attr.ids.size / sizeof(u64);
a91e5431
ACM
2408 /*
2409 * We don't have the cpu and thread maps on the header, so
2410 * for allocating the perf_sample_id table we fake 1 cpu and
2411 * hattr->ids threads.
2412 */
2413 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2414 goto out_delete_evlist;
2415
7c6a1c65
PZ
2416 lseek(fd, f_attr.ids.offset, SEEK_SET);
2417
2418 for (j = 0; j < nr_ids; j++) {
1c0b04d1 2419 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
769885f3 2420 goto out_errno;
7c6a1c65 2421
a91e5431 2422 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
7c6a1c65 2423 }
11deb1f9 2424
7c6a1c65
PZ
2425 lseek(fd, tmp, SEEK_SET);
2426 }
2427
d04b35f8
ACM
2428 symbol_conf.nr_events = nr_attrs;
2429
8755a8f2
AV
2430 if (f_header.event_types.size) {
2431 lseek(fd, f_header.event_types.offset, SEEK_SET);
db146f06
RR
2432 trace_events = malloc(f_header.event_types.size);
2433 if (trace_events == NULL)
4dc0a04b 2434 return -ENOMEM;
db146f06 2435 if (perf_header__getbuffer64(header, fd, trace_events,
ba21594c 2436 f_header.event_types.size))
769885f3 2437 goto out_errno;
db146f06 2438 trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
8755a8f2 2439 }
03456a15 2440
da378962 2441 perf_header__process_sections(header, fd, &session->pevent,
fbe96f29 2442 perf_file_section__process);
4778d2e4 2443
1c0b04d1 2444 lseek(fd, header->data_offset, SEEK_SET);
7c6a1c65 2445
831394bd
NK
2446 if (perf_evlist__prepare_tracepoint_events(session->evlist,
2447 session->pevent))
cb9dd49e
ACM
2448 goto out_delete_evlist;
2449
1c0b04d1 2450 header->frozen = 1;
4dc0a04b 2451 return 0;
769885f3
ACM
2452out_errno:
2453 return -errno;
a91e5431
ACM
2454
2455out_delete_evlist:
2456 perf_evlist__delete(session->evlist);
2457 session->evlist = NULL;
2458 return -ENOMEM;
7c6a1c65 2459}
0d3a5c88 2460
45694aa7 2461int perf_event__synthesize_attr(struct perf_tool *tool,
f4d83436 2462 struct perf_event_attr *attr, u32 ids, u64 *id,
743eb868 2463 perf_event__handler_t process)
2c46dbb5 2464{
8115d60c 2465 union perf_event *ev;
2c46dbb5
TZ
2466 size_t size;
2467 int err;
2468
2469 size = sizeof(struct perf_event_attr);
9ac3e487 2470 size = PERF_ALIGN(size, sizeof(u64));
2c46dbb5
TZ
2471 size += sizeof(struct perf_event_header);
2472 size += ids * sizeof(u64);
2473
2474 ev = malloc(size);
2475
ce47dc56
CS
2476 if (ev == NULL)
2477 return -ENOMEM;
2478
2c46dbb5
TZ
2479 ev->attr.attr = *attr;
2480 memcpy(ev->attr.id, id, ids * sizeof(u64));
2481
2482 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
f4d83436 2483 ev->attr.header.size = (u16)size;
2c46dbb5 2484
f4d83436
RR
2485 if (ev->attr.header.size == size)
2486 err = process(tool, ev, NULL, NULL);
2487 else
2488 err = -E2BIG;
2c46dbb5
TZ
2489
2490 free(ev);
2491
2492 return err;
2493}
2494
45694aa7 2495int perf_event__synthesize_attrs(struct perf_tool *tool,
d20deb64 2496 struct perf_session *session,
a91e5431 2497 perf_event__handler_t process)
2c46dbb5 2498{
6606f873 2499 struct perf_evsel *evsel;
a91e5431 2500 int err = 0;
2c46dbb5 2501
6606f873
RR
2502 list_for_each_entry(evsel, &session->evlist->entries, node) {
2503 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
2504 evsel->id, process);
2c46dbb5
TZ
2505 if (err) {
2506 pr_debug("failed to create perf header attribute\n");
2507 return err;
2508 }
2509 }
2510
2511 return err;
2512}
2513
8115d60c 2514int perf_event__process_attr(union perf_event *event,
10d0f086 2515 struct perf_evlist **pevlist)
2c46dbb5 2516{
f4d83436 2517 u32 i, ids, n_ids;
a91e5431 2518 struct perf_evsel *evsel;
10d0f086 2519 struct perf_evlist *evlist = *pevlist;
2c46dbb5 2520
10d0f086
ACM
2521 if (evlist == NULL) {
2522 *pevlist = evlist = perf_evlist__new(NULL, NULL);
2523 if (evlist == NULL)
a91e5431
ACM
2524 return -ENOMEM;
2525 }
2526
10d0f086 2527 evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
a91e5431 2528 if (evsel == NULL)
2c46dbb5
TZ
2529 return -ENOMEM;
2530
10d0f086 2531 perf_evlist__add(evlist, evsel);
a91e5431 2532
8115d60c
ACM
2533 ids = event->header.size;
2534 ids -= (void *)&event->attr.id - (void *)event;
2c46dbb5 2535 n_ids = ids / sizeof(u64);
a91e5431
ACM
2536 /*
2537 * We don't have the cpu and thread maps on the header, so
2538 * for allocating the perf_sample_id table we fake 1 cpu and
2539 * hattr->ids threads.
2540 */
2541 if (perf_evsel__alloc_id(evsel, 1, n_ids))
2542 return -ENOMEM;
2c46dbb5
TZ
2543
2544 for (i = 0; i < n_ids; i++) {
10d0f086 2545 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2c46dbb5
TZ
2546 }
2547
2c46dbb5
TZ
2548 return 0;
2549}
cd19a035 2550
45694aa7 2551int perf_event__synthesize_event_type(struct perf_tool *tool,
d20deb64 2552 u64 event_id, char *name,
8115d60c 2553 perf_event__handler_t process,
743eb868 2554 struct machine *machine)
cd19a035 2555{
8115d60c 2556 union perf_event ev;
cd19a035
TZ
2557 size_t size = 0;
2558 int err = 0;
2559
2560 memset(&ev, 0, sizeof(ev));
2561
2562 ev.event_type.event_type.event_id = event_id;
2563 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
2564 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
2565
2566 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
f8f4b287 2567 size = strlen(ev.event_type.event_type.name);
9ac3e487 2568 size = PERF_ALIGN(size, sizeof(u64));
cd19a035
TZ
2569 ev.event_type.header.size = sizeof(ev.event_type) -
2570 (sizeof(ev.event_type.event_type.name) - size);
2571
45694aa7 2572 err = process(tool, &ev, NULL, machine);
cd19a035
TZ
2573
2574 return err;
2575}
2576
45694aa7 2577int perf_event__synthesize_event_types(struct perf_tool *tool,
d20deb64 2578 perf_event__handler_t process,
743eb868 2579 struct machine *machine)
cd19a035
TZ
2580{
2581 struct perf_trace_event_type *type;
2582 int i, err = 0;
2583
db146f06
RR
2584 for (i = 0; i < trace_event_count; i++) {
2585 type = &trace_events[i];
cd19a035 2586
45694aa7 2587 err = perf_event__synthesize_event_type(tool, type->event_id,
8115d60c 2588 type->name, process,
743eb868 2589 machine);
cd19a035
TZ
2590 if (err) {
2591 pr_debug("failed to create perf header event type\n");
2592 return err;
2593 }
2594 }
2595
2596 return err;
2597}
2598
45694aa7 2599int perf_event__process_event_type(struct perf_tool *tool __unused,
743eb868 2600 union perf_event *event)
cd19a035 2601{
8115d60c
ACM
2602 if (perf_header__push_event(event->event_type.event_type.event_id,
2603 event->event_type.event_type.name) < 0)
cd19a035
TZ
2604 return -ENOMEM;
2605
2606 return 0;
2607}
9215545e 2608
45694aa7 2609int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
d20deb64 2610 struct perf_evlist *evlist,
743eb868 2611 perf_event__handler_t process)
9215545e 2612{
8115d60c 2613 union perf_event ev;
29208e57 2614 struct tracing_data *tdata;
9215545e 2615 ssize_t size = 0, aligned_size = 0, padding;
fb7d0b3c 2616 int err __used = 0;
9215545e 2617
29208e57
JO
2618 /*
2619 * We are going to store the size of the data followed
2620 * by the data contents. Since the fd descriptor is a pipe,
2621 * we cannot seek back to store the size of the data once
2622 * we know it. Instead we:
2623 *
2624 * - write the tracing data to the temp file
2625 * - get/write the data size to pipe
2626 * - write the tracing data from the temp file
2627 * to the pipe
2628 */
2629 tdata = tracing_data_get(&evlist->entries, fd, true);
2630 if (!tdata)
2631 return -1;
2632
9215545e
TZ
2633 memset(&ev, 0, sizeof(ev));
2634
2635 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
29208e57 2636 size = tdata->size;
9ac3e487 2637 aligned_size = PERF_ALIGN(size, sizeof(u64));
9215545e
TZ
2638 padding = aligned_size - size;
2639 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2640 ev.tracing_data.size = aligned_size;
2641
45694aa7 2642 process(tool, &ev, NULL, NULL);
9215545e 2643
29208e57
JO
2644 /*
2645 * The put function will copy all the tracing data
2646 * stored in temp file to the pipe.
2647 */
2648 tracing_data_put(tdata);
2649
9215545e
TZ
2650 write_padded(fd, NULL, 0, padding);
2651
2652 return aligned_size;
2653}
2654
8115d60c
ACM
2655int perf_event__process_tracing_data(union perf_event *event,
2656 struct perf_session *session)
9215545e 2657{
8115d60c 2658 ssize_t size_read, padding, size = event->tracing_data.size;
9215545e
TZ
2659 off_t offset = lseek(session->fd, 0, SEEK_CUR);
2660 char buf[BUFSIZ];
2661
2662 /* setup for reading amidst mmap */
2663 lseek(session->fd, offset + sizeof(struct tracing_data_event),
2664 SEEK_SET);
2665
da378962
ACM
2666 size_read = trace_report(session->fd, &session->pevent,
2667 session->repipe);
9ac3e487 2668 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
9215545e
TZ
2669
2670 if (read(session->fd, buf, padding) < 0)
2671 die("reading input file");
454c407e
TZ
2672 if (session->repipe) {
2673 int retw = write(STDOUT_FILENO, buf, padding);
2674 if (retw <= 0 || retw != padding)
2675 die("repiping tracing data padding");
2676 }
9215545e
TZ
2677
2678 if (size_read + padding != size)
2679 die("tracing data size mismatch");
2680
831394bd
NK
2681 perf_evlist__prepare_tracepoint_events(session->evlist,
2682 session->pevent);
8b6ee4c5 2683
9215545e
TZ
2684 return size_read + padding;
2685}
c7929e47 2686
45694aa7 2687int perf_event__synthesize_build_id(struct perf_tool *tool,
d20deb64 2688 struct dso *pos, u16 misc,
8115d60c 2689 perf_event__handler_t process,
743eb868 2690 struct machine *machine)
c7929e47 2691{
8115d60c 2692 union perf_event ev;
c7929e47
TZ
2693 size_t len;
2694 int err = 0;
2695
2696 if (!pos->hit)
2697 return err;
2698
2699 memset(&ev, 0, sizeof(ev));
2700
2701 len = pos->long_name_len + 1;
9ac3e487 2702 len = PERF_ALIGN(len, NAME_ALIGN);
c7929e47
TZ
2703 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
2704 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2705 ev.build_id.header.misc = misc;
23346f21 2706 ev.build_id.pid = machine->pid;
c7929e47
TZ
2707 ev.build_id.header.size = sizeof(ev.build_id) + len;
2708 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2709
45694aa7 2710 err = process(tool, &ev, NULL, machine);
c7929e47
TZ
2711
2712 return err;
2713}
2714
45694aa7 2715int perf_event__process_build_id(struct perf_tool *tool __used,
d20deb64 2716 union perf_event *event,
8115d60c 2717 struct perf_session *session)
c7929e47 2718{
8115d60c
ACM
2719 __event_process_build_id(&event->build_id,
2720 event->build_id.filename,
a1645ce1 2721 session);
c7929e47
TZ
2722 return 0;
2723}
a1ac1d3c
SE
2724
2725void disable_buildid_cache(void)
2726{
2727 no_buildid_cache = true;
2728}
This page took 0.250985 seconds and 5 git commands to generate.