4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
12 #include "util/util.h"
13 #include "util/parse-options.h"
14 #include "util/parse-events.h"
15 #include "util/string.h"
17 #include "util/header.h"
18 #include "util/event.h"
19 #include "util/debug.h"
24 #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
25 #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
27 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
29 static long default_interval
= 100000;
31 static int nr_cpus
= 0;
32 static unsigned int page_size
;
33 static unsigned int mmap_pages
= 128;
36 static const char *output_name
= "perf.data";
38 static unsigned int realtime_prio
= 0;
39 static int raw_samples
= 0;
40 static int system_wide
= 0;
41 static int profile_cpu
= -1;
42 static pid_t target_pid
= -1;
43 static int inherit
= 1;
45 static int append_file
= 0;
46 static int call_graph
= 0;
47 static int inherit_stat
= 0;
48 static int no_samples
= 0;
49 static int sample_address
= 0;
52 static struct timeval last_read
;
53 static struct timeval this_read
;
55 static u64 bytes_written
;
57 static struct pollfd event_array
[MAX_NR_CPUS
* MAX_COUNTERS
];
62 static int file_new
= 1;
64 struct perf_header
*header
;
73 static struct mmap_data mmap_array
[MAX_NR_CPUS
][MAX_COUNTERS
];
75 static unsigned long mmap_read_head(struct mmap_data
*md
)
77 struct perf_counter_mmap_page
*pc
= md
->base
;
86 static void mmap_write_tail(struct mmap_data
*md
, unsigned long tail
)
88 struct perf_counter_mmap_page
*pc
= md
->base
;
91 * ensure all reads are done before we write the tail out.
97 static void write_output(void *buf
, size_t size
)
100 int ret
= write(output
, buf
, size
);
103 die("failed to write");
108 bytes_written
+= ret
;
112 static void mmap_read(struct mmap_data
*md
)
114 unsigned int head
= mmap_read_head(md
);
115 unsigned int old
= md
->prev
;
116 unsigned char *data
= md
->base
+ page_size
;
121 gettimeofday(&this_read
, NULL
);
124 * If we're further behind than half the buffer, there's a chance
125 * the writer will bite our tail and mess up the samples under us.
127 * If we somehow ended up ahead of the head, we got messed up.
129 * In either case, truncate and restart at head.
136 timersub(&this_read
, &last_read
, &iv
);
137 msecs
= iv
.tv_sec
*1000 + iv
.tv_usec
/1000;
139 fprintf(stderr
, "WARNING: failed to keep up with mmap data."
140 " Last read %lu msecs ago.\n", msecs
);
143 * head points to a known good entry, start there.
148 last_read
= this_read
;
155 if ((old
& md
->mask
) + size
!= (head
& md
->mask
)) {
156 buf
= &data
[old
& md
->mask
];
157 size
= md
->mask
+ 1 - (old
& md
->mask
);
160 write_output(buf
, size
);
163 buf
= &data
[old
& md
->mask
];
167 write_output(buf
, size
);
170 mmap_write_tail(md
, old
);
173 static volatile int done
= 0;
174 static volatile int signr
= -1;
176 static void sig_handler(int sig
)
182 static void sig_atexit(void)
187 signal(signr
, SIG_DFL
);
188 kill(getpid(), signr
);
191 static pid_t
pid_synthesize_comm_event(pid_t pid
, int full
)
193 struct comm_event comm_ev
;
194 char filename
[PATH_MAX
];
199 struct dirent dirent
, *next
;
202 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
204 fp
= fopen(filename
, "r");
207 * We raced with a task exiting - just return:
210 fprintf(stderr
, "couldn't open %s\n", filename
);
214 memset(&comm_ev
, 0, sizeof(comm_ev
));
215 while (!comm_ev
.comm
[0] || !comm_ev
.pid
) {
216 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
219 if (memcmp(bf
, "Name:", 5) == 0) {
221 while (*name
&& isspace(*name
))
223 size
= strlen(name
) - 1;
224 memcpy(comm_ev
.comm
, name
, size
++);
225 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
226 char *tgids
= bf
+ 5;
227 while (*tgids
&& isspace(*tgids
))
229 tgid
= comm_ev
.pid
= atoi(tgids
);
233 comm_ev
.header
.type
= PERF_EVENT_COMM
;
234 size
= ALIGN(size
, sizeof(u64
));
235 comm_ev
.header
.size
= sizeof(comm_ev
) - (sizeof(comm_ev
.comm
) - size
);
240 write_output(&comm_ev
, comm_ev
.header
.size
);
244 snprintf(filename
, sizeof(filename
), "/proc/%d/task", pid
);
246 tasks
= opendir(filename
);
247 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
249 pid
= strtol(dirent
.d_name
, &end
, 10);
255 write_output(&comm_ev
, comm_ev
.header
.size
);
264 fprintf(stderr
, "couldn't get COMM and pgid, malformed %s\n",
269 static void pid_synthesize_mmap_samples(pid_t pid
, pid_t tgid
)
271 char filename
[PATH_MAX
];
274 snprintf(filename
, sizeof(filename
), "/proc/%d/maps", pid
);
276 fp
= fopen(filename
, "r");
279 * We raced with a task exiting - just return:
282 fprintf(stderr
, "couldn't open %s\n", filename
);
286 char bf
[BUFSIZ
], *pbf
= bf
;
287 struct mmap_event mmap_ev
= {
288 .header
= { .type
= PERF_EVENT_MMAP
},
292 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
295 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
296 n
= hex2u64(pbf
, &mmap_ev
.start
);
300 n
= hex2u64(pbf
, &mmap_ev
.len
);
304 if (*pbf
== 'x') { /* vm_exec */
305 char *execname
= strchr(bf
, '/');
308 if (execname
== NULL
)
309 execname
= strstr(bf
, "[vdso]");
311 if (execname
== NULL
)
314 size
= strlen(execname
);
315 execname
[size
- 1] = '\0'; /* Remove \n */
316 memcpy(mmap_ev
.filename
, execname
, size
);
317 size
= ALIGN(size
, sizeof(u64
));
318 mmap_ev
.len
-= mmap_ev
.start
;
319 mmap_ev
.header
.size
= (sizeof(mmap_ev
) -
320 (sizeof(mmap_ev
.filename
) - size
));
324 write_output(&mmap_ev
, mmap_ev
.header
.size
);
331 static void synthesize_all(void)
334 struct dirent dirent
, *next
;
336 proc
= opendir("/proc");
338 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
342 pid
= strtol(dirent
.d_name
, &end
, 10);
343 if (*end
) /* only interested in proper numerical dirents */
346 tgid
= pid_synthesize_comm_event(pid
, 1);
347 pid_synthesize_mmap_samples(pid
, tgid
);
355 static struct perf_header_attr
*get_header_attr(struct perf_counter_attr
*a
, int nr
)
357 struct perf_header_attr
*h_attr
;
359 if (nr
< header
->attrs
) {
360 h_attr
= header
->attr
[nr
];
362 h_attr
= perf_header_attr__new(a
);
363 perf_header__add_attr(header
, h_attr
);
369 static void create_counter(int counter
, int cpu
, pid_t pid
)
371 struct perf_counter_attr
*attr
= attrs
+ counter
;
372 struct perf_header_attr
*h_attr
;
373 int track
= !counter
; /* only the first counter needs these */
381 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
382 PERF_FORMAT_TOTAL_TIME_RUNNING
|
385 attr
->sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
388 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
390 attr
->sample_freq
= freq
;
394 attr
->sample_freq
= 0;
397 attr
->inherit_stat
= 1;
400 attr
->sample_type
|= PERF_SAMPLE_ADDR
;
403 attr
->sample_type
|= PERF_SAMPLE_CALLCHAIN
;
406 attr
->sample_type
|= PERF_SAMPLE_RAW
;
410 attr
->inherit
= (cpu
< 0) && inherit
;
414 fd
[nr_cpu
][counter
] = sys_perf_counter_open(attr
, pid
, cpu
, group_fd
, 0);
416 if (fd
[nr_cpu
][counter
] < 0) {
420 die("Permission error - are you root?\n");
421 else if (err
== ENODEV
&& profile_cpu
!= -1)
422 die("No such device - did you specify an out-of-range profile CPU?\n");
425 * If it's cycles then fall back to hrtimer
426 * based cpu-clock-tick sw counter, which
427 * is always available even if no PMU support:
429 if (attr
->type
== PERF_TYPE_HARDWARE
430 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
433 warning(" ... trying to fall back to cpu-clock-ticks\n");
434 attr
->type
= PERF_TYPE_SOFTWARE
;
435 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
439 error("perfcounter syscall returned with %d (%s)\n",
440 fd
[nr_cpu
][counter
], strerror(err
));
441 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n");
445 h_attr
= get_header_attr(attr
, counter
);
448 if (memcmp(&h_attr
->attr
, attr
, sizeof(*attr
))) {
449 fprintf(stderr
, "incompatible append\n");
454 if (read(fd
[nr_cpu
][counter
], &read_data
, sizeof(read_data
)) == -1) {
455 perror("Unable to read perf file descriptor\n");
459 perf_header_attr__add_id(h_attr
, read_data
.id
);
461 assert(fd
[nr_cpu
][counter
] >= 0);
462 fcntl(fd
[nr_cpu
][counter
], F_SETFL
, O_NONBLOCK
);
465 * First counter acts as the group leader:
467 if (group
&& group_fd
== -1)
468 group_fd
= fd
[nr_cpu
][counter
];
470 event_array
[nr_poll
].fd
= fd
[nr_cpu
][counter
];
471 event_array
[nr_poll
].events
= POLLIN
;
474 mmap_array
[nr_cpu
][counter
].counter
= counter
;
475 mmap_array
[nr_cpu
][counter
].prev
= 0;
476 mmap_array
[nr_cpu
][counter
].mask
= mmap_pages
*page_size
- 1;
477 mmap_array
[nr_cpu
][counter
].base
= mmap(NULL
, (mmap_pages
+1)*page_size
,
478 PROT_READ
|PROT_WRITE
, MAP_SHARED
, fd
[nr_cpu
][counter
], 0);
479 if (mmap_array
[nr_cpu
][counter
].base
== MAP_FAILED
) {
480 error("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
484 ioctl(fd
[nr_cpu
][counter
], PERF_COUNTER_IOC_ENABLE
);
487 static void open_counters(int cpu
, pid_t pid
)
492 for (counter
= 0; counter
< nr_counters
; counter
++)
493 create_counter(counter
, cpu
, pid
);
498 static void atexit_header(void)
500 header
->data_size
+= bytes_written
;
502 perf_header__write(header
, output
);
505 static int __cmd_record(int argc
, const char **argv
)
513 page_size
= sysconf(_SC_PAGE_SIZE
);
514 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
515 assert(nr_cpus
<= MAX_NR_CPUS
);
516 assert(nr_cpus
>= 0);
519 signal(SIGCHLD
, sig_handler
);
520 signal(SIGINT
, sig_handler
);
522 if (!stat(output_name
, &st
) && st
.st_size
) {
523 if (!force
&& !append_file
) {
524 fprintf(stderr
, "Error, output file %s exists, use -A to append or -f to overwrite.\n",
532 flags
= O_CREAT
|O_RDWR
;
538 output
= open(output_name
, flags
, S_IRUSR
|S_IWUSR
);
540 perror("failed to create output file");
545 header
= perf_header__read(output
);
547 header
= perf_header__new();
549 atexit(atexit_header
);
556 open_counters(profile_cpu
, pid
);
558 if (profile_cpu
!= -1) {
559 open_counters(profile_cpu
, target_pid
);
561 for (i
= 0; i
< nr_cpus
; i
++)
562 open_counters(i
, target_pid
);
567 perf_header__write(header
, output
);
570 pid_t tgid
= pid_synthesize_comm_event(pid
, 0);
571 pid_synthesize_mmap_samples(pid
, tgid
);
575 if (target_pid
== -1 && argc
) {
578 perror("failed to fork");
581 if (execvp(argv
[0], (char **)argv
)) {
589 struct sched_param param
;
591 param
.sched_priority
= realtime_prio
;
592 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
593 printf("Could not set realtime priority.\n");
601 for (i
= 0; i
< nr_cpu
; i
++) {
602 for (counter
= 0; counter
< nr_counters
; counter
++)
603 mmap_read(&mmap_array
[i
][counter
]);
606 if (hits
== samples
) {
609 ret
= poll(event_array
, nr_poll
, 100);
614 * Approximate RIP event size: 24 bytes.
617 "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
618 (double)bytes_written
/ 1024.0 / 1024.0,
625 static const char * const record_usage
[] = {
626 "perf record [<options>] [<command>]",
627 "perf record [<options>] -- <command> [<options>]",
631 static const struct option options
[] = {
632 OPT_CALLBACK('e', "event", NULL
, "event",
633 "event selector. use 'perf list' to list available events",
635 OPT_INTEGER('p', "pid", &target_pid
,
636 "record events on existing pid"),
637 OPT_INTEGER('r', "realtime", &realtime_prio
,
638 "collect data with this RT SCHED_FIFO priority"),
639 OPT_BOOLEAN('R', "raw-samples", &raw_samples
,
640 "collect raw sample records from all opened counters"),
641 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
642 "system-wide collection from all CPUs"),
643 OPT_BOOLEAN('A', "append", &append_file
,
644 "append to the output file to do incremental profiling"),
645 OPT_INTEGER('C', "profile_cpu", &profile_cpu
,
646 "CPU to profile on"),
647 OPT_BOOLEAN('f', "force", &force
,
648 "overwrite existing data file"),
649 OPT_LONG('c', "count", &default_interval
,
650 "event period to sample"),
651 OPT_STRING('o', "output", &output_name
, "file",
653 OPT_BOOLEAN('i', "inherit", &inherit
,
654 "child tasks inherit counters"),
655 OPT_INTEGER('F', "freq", &freq
,
656 "profile at this frequency"),
657 OPT_INTEGER('m', "mmap-pages", &mmap_pages
,
658 "number of mmap data pages"),
659 OPT_BOOLEAN('g', "call-graph", &call_graph
,
660 "do call-graph (stack chain/backtrace) recording"),
661 OPT_BOOLEAN('v', "verbose", &verbose
,
662 "be more verbose (show counter open errors, etc)"),
663 OPT_BOOLEAN('s', "stat", &inherit_stat
,
664 "per thread counts"),
665 OPT_BOOLEAN('d', "data", &sample_address
,
667 OPT_BOOLEAN('n', "no-samples", &no_samples
,
672 int cmd_record(int argc
, const char **argv
, const char *prefix __used
)
676 argc
= parse_options(argc
, argv
, options
, record_usage
,
677 PARSE_OPT_STOP_AT_NON_OPTION
);
678 if (!argc
&& target_pid
== -1 && !system_wide
)
679 usage_with_options(record_usage
, options
);
683 attrs
[0].type
= PERF_TYPE_HARDWARE
;
684 attrs
[0].config
= PERF_COUNT_HW_CPU_CYCLES
;
687 for (counter
= 0; counter
< nr_counters
; counter
++) {
688 if (attrs
[counter
].sample_period
)
691 attrs
[counter
].sample_period
= default_interval
;
694 return __cmd_record(argc
, argv
);