4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
8 #define _FILE_OFFSET_BITS 64
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evsel.h"
22 #include "util/debug.h"
23 #include "util/session.h"
24 #include "util/symbol.h"
25 #include "util/cpumap.h"
31 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
38 static u64 user_interval
= ULLONG_MAX
;
39 static u64 default_interval
= 0;
40 static u64 sample_type
;
42 static struct cpu_map
*cpus
;
43 static unsigned int page_size
;
44 static unsigned int mmap_pages
= 128;
45 static unsigned int user_freq
= UINT_MAX
;
46 static int freq
= 1000;
48 static int pipe_output
= 0;
49 static const char *output_name
= "perf.data";
51 static int realtime_prio
= 0;
52 static bool raw_samples
= false;
53 static bool sample_id_all_avail
= true;
54 static bool system_wide
= false;
55 static pid_t target_pid
= -1;
56 static pid_t target_tid
= -1;
57 static pid_t
*all_tids
= NULL
;
58 static int thread_num
= 0;
59 static pid_t child_pid
= -1;
60 static bool no_inherit
= false;
61 static enum write_mode_t write_mode
= WRITE_FORCE
;
62 static bool call_graph
= false;
63 static bool inherit_stat
= false;
64 static bool no_samples
= false;
65 static bool sample_address
= false;
66 static bool sample_time
= false;
67 static bool no_buildid
= false;
68 static bool no_buildid_cache
= false;
70 static long samples
= 0;
71 static u64 bytes_written
= 0;
73 static struct pollfd
*event_array
;
75 static int nr_poll
= 0;
76 static int nr_cpu
= 0;
78 static int file_new
= 1;
79 static off_t post_processing_offset
;
81 static struct perf_session
*session
;
82 static const char *cpu_list
;
90 static struct mmap_data mmap_array
[MAX_NR_CPUS
];
92 static unsigned long mmap_read_head(struct mmap_data
*md
)
94 struct perf_event_mmap_page
*pc
= md
->base
;
103 static void mmap_write_tail(struct mmap_data
*md
, unsigned long tail
)
105 struct perf_event_mmap_page
*pc
= md
->base
;
108 * ensure all reads are done before we write the tail out.
111 pc
->data_tail
= tail
;
114 static void advance_output(size_t size
)
116 bytes_written
+= size
;
119 static void write_output(void *buf
, size_t size
)
122 int ret
= write(output
, buf
, size
);
125 die("failed to write");
130 bytes_written
+= ret
;
134 static int process_synthesized_event(event_t
*event
,
135 struct sample_data
*sample __used
,
136 struct perf_session
*self __used
)
138 write_output(event
, event
->header
.size
);
142 static void mmap_read(struct mmap_data
*md
)
144 unsigned int head
= mmap_read_head(md
);
145 unsigned int old
= md
->prev
;
146 unsigned char *data
= md
->base
+ page_size
;
152 * If we're further behind than half the buffer, there's a chance
153 * the writer will bite our tail and mess up the samples under us.
155 * If we somehow ended up ahead of the head, we got messed up.
157 * In either case, truncate and restart at head.
161 fprintf(stderr
, "WARNING: failed to keep up with mmap data\n");
163 * head points to a known good entry, start there.
173 if ((old
& md
->mask
) + size
!= (head
& md
->mask
)) {
174 buf
= &data
[old
& md
->mask
];
175 size
= md
->mask
+ 1 - (old
& md
->mask
);
178 write_output(buf
, size
);
181 buf
= &data
[old
& md
->mask
];
185 write_output(buf
, size
);
188 mmap_write_tail(md
, old
);
191 static volatile int done
= 0;
192 static volatile int signr
= -1;
194 static void sig_handler(int sig
)
200 static void sig_atexit(void)
203 kill(child_pid
, SIGTERM
);
205 if (signr
== -1 || signr
== SIGUSR1
)
208 signal(signr
, SIG_DFL
);
209 kill(getpid(), signr
);
214 static struct perf_header_attr
*get_header_attr(struct perf_event_attr
*a
, int nr
)
216 struct perf_header_attr
*h_attr
;
218 if (nr
< session
->header
.attrs
) {
219 h_attr
= session
->header
.attr
[nr
];
221 h_attr
= perf_header_attr__new(a
);
223 if (perf_header__add_attr(&session
->header
, h_attr
) < 0) {
224 perf_header_attr__delete(h_attr
);
232 static void create_counter(struct perf_evsel
*evsel
, int cpu
)
234 char *filter
= evsel
->filter
;
235 struct perf_event_attr
*attr
= &evsel
->attr
;
236 struct perf_header_attr
*h_attr
;
237 int track
= !evsel
->idx
; /* only the first counter needs these */
247 * Check if parse_single_tracepoint_event has already asked for
250 * XXX this is kludgy but short term fix for problems introduced by
251 * eac23d1c that broke 'perf script' by having different sample_types
252 * when using multiple tracepoint events when we use a perf binary
253 * that tries to use sample_id_all on an older kernel.
255 * We need to move counter creation to perf_session, support
256 * different sample_types, etc.
258 bool time_needed
= attr
->sample_type
& PERF_SAMPLE_TIME
;
260 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
261 PERF_FORMAT_TOTAL_TIME_RUNNING
|
264 attr
->sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
267 attr
->sample_type
|= PERF_SAMPLE_ID
;
270 * We default some events to a 1 default interval. But keep
271 * it a weak assumption overridable by the user.
273 if (!attr
->sample_period
|| (user_freq
!= UINT_MAX
&&
274 user_interval
!= ULLONG_MAX
)) {
276 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
278 attr
->sample_freq
= freq
;
280 attr
->sample_period
= default_interval
;
285 attr
->sample_freq
= 0;
288 attr
->inherit_stat
= 1;
290 if (sample_address
) {
291 attr
->sample_type
|= PERF_SAMPLE_ADDR
;
292 attr
->mmap_data
= track
;
296 attr
->sample_type
|= PERF_SAMPLE_CALLCHAIN
;
299 attr
->sample_type
|= PERF_SAMPLE_CPU
;
301 if (sample_id_all_avail
&&
302 (sample_time
|| system_wide
|| !no_inherit
|| cpu_list
))
303 attr
->sample_type
|= PERF_SAMPLE_TIME
;
306 attr
->sample_type
|= PERF_SAMPLE_TIME
;
307 attr
->sample_type
|= PERF_SAMPLE_RAW
;
308 attr
->sample_type
|= PERF_SAMPLE_CPU
;
313 attr
->inherit
= !no_inherit
;
314 if (target_pid
== -1 && target_tid
== -1 && !system_wide
) {
316 attr
->enable_on_exec
= 1;
319 attr
->sample_id_all
= sample_id_all_avail
? 1 : 0;
321 for (thread_index
= 0; thread_index
< thread_num
; thread_index
++) {
323 FD(evsel
, nr_cpu
, thread_index
) = sys_perf_event_open(attr
, all_tids
[thread_index
], cpu
, group_fd
, 0);
325 if (FD(evsel
, nr_cpu
, thread_index
) < 0) {
328 if (err
== EPERM
|| err
== EACCES
)
329 die("Permission error - are you root?\n"
330 "\t Consider tweaking"
331 " /proc/sys/kernel/perf_event_paranoid.\n");
332 else if (err
== ENODEV
&& cpu_list
) {
333 die("No such device - did you specify"
334 " an out-of-range profile CPU?\n");
335 } else if (err
== EINVAL
&& sample_id_all_avail
) {
337 * Old kernel, no attr->sample_id_type_all field
339 sample_id_all_avail
= false;
340 if (!sample_time
&& !raw_samples
&& !time_needed
)
341 attr
->sample_type
&= ~PERF_SAMPLE_TIME
;
343 goto retry_sample_id
;
347 * If it's cycles then fall back to hrtimer
348 * based cpu-clock-tick sw counter, which
349 * is always available even if no PMU support:
351 if (attr
->type
== PERF_TYPE_HARDWARE
352 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
355 warning(" ... trying to fall back to cpu-clock-ticks\n");
356 attr
->type
= PERF_TYPE_SOFTWARE
;
357 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
361 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
362 FD(evsel
, nr_cpu
, thread_index
), strerror(err
));
364 #if defined(__i386__) || defined(__x86_64__)
365 if (attr
->type
== PERF_TYPE_HARDWARE
&& err
== EOPNOTSUPP
)
366 die("No hardware sampling interrupt available."
367 " No APIC? If so then you can boot the kernel"
368 " with the \"lapic\" boot parameter to"
369 " force-enable it.\n");
372 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
376 h_attr
= get_header_attr(attr
, evsel
->idx
);
381 if (memcmp(&h_attr
->attr
, attr
, sizeof(*attr
))) {
382 fprintf(stderr
, "incompatible append\n");
387 if (read(FD(evsel
, nr_cpu
, thread_index
), &read_data
, sizeof(read_data
)) == -1) {
388 perror("Unable to read perf file descriptor");
392 if (perf_header_attr__add_id(h_attr
, read_data
.id
) < 0) {
393 pr_warning("Not enough memory to add id\n");
397 assert(FD(evsel
, nr_cpu
, thread_index
) >= 0);
398 fcntl(FD(evsel
, nr_cpu
, thread_index
), F_SETFL
, O_NONBLOCK
);
401 * First counter acts as the group leader:
403 if (group
&& group_fd
== -1)
404 group_fd
= FD(evsel
, nr_cpu
, thread_index
);
406 if (evsel
->idx
|| thread_index
) {
407 struct perf_evsel
*first
;
408 first
= list_entry(evsel_list
.next
, struct perf_evsel
, node
);
409 ret
= ioctl(FD(evsel
, nr_cpu
, thread_index
),
410 PERF_EVENT_IOC_SET_OUTPUT
,
411 FD(first
, nr_cpu
, 0));
413 error("failed to set output: %d (%s)\n", errno
,
418 mmap_array
[nr_cpu
].prev
= 0;
419 mmap_array
[nr_cpu
].mask
= mmap_pages
*page_size
- 1;
420 mmap_array
[nr_cpu
].base
= mmap(NULL
, (mmap_pages
+1)*page_size
,
421 PROT_READ
| PROT_WRITE
, MAP_SHARED
, FD(evsel
, nr_cpu
, thread_index
), 0);
422 if (mmap_array
[nr_cpu
].base
== MAP_FAILED
) {
423 error("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
427 event_array
[nr_poll
].fd
= FD(evsel
, nr_cpu
, thread_index
);
428 event_array
[nr_poll
].events
= POLLIN
;
432 if (filter
!= NULL
) {
433 ret
= ioctl(FD(evsel
, nr_cpu
, thread_index
),
434 PERF_EVENT_IOC_SET_FILTER
, filter
);
436 error("failed to set filter with %d (%s)\n", errno
,
444 sample_type
= attr
->sample_type
;
447 static void open_counters(int cpu
)
449 struct perf_evsel
*pos
;
453 list_for_each_entry(pos
, &evsel_list
, node
)
454 create_counter(pos
, cpu
);
459 static int process_buildids(void)
461 u64 size
= lseek(output
, 0, SEEK_CUR
);
466 session
->fd
= output
;
467 return __perf_session__process_events(session
, post_processing_offset
,
468 size
- post_processing_offset
,
469 size
, &build_id__mark_dso_hit_ops
);
472 static void atexit_header(void)
475 session
->header
.data_size
+= bytes_written
;
479 perf_header__write(&session
->header
, output
, true);
480 perf_session__delete(session
);
485 static void event__synthesize_guest_os(struct machine
*machine
, void *data
)
488 struct perf_session
*psession
= data
;
490 if (machine__is_host(machine
))
494 *As for guest kernel when processing subcommand record&report,
495 *we arrange module mmap prior to guest kernel mmap and trigger
496 *a preload dso because default guest module symbols are loaded
497 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
498 *method is used to avoid symbol missing when the first addr is
499 *in module instead of in guest kernel.
501 err
= event__synthesize_modules(process_synthesized_event
,
504 pr_err("Couldn't record guest kernel [%d]'s reference"
505 " relocation symbol.\n", machine
->pid
);
508 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
509 * have no _text sometimes.
511 err
= event__synthesize_kernel_mmap(process_synthesized_event
,
512 psession
, machine
, "_text");
514 err
= event__synthesize_kernel_mmap(process_synthesized_event
,
515 psession
, machine
, "_stext");
517 pr_err("Couldn't record guest kernel [%d]'s reference"
518 " relocation symbol.\n", machine
->pid
);
521 static struct perf_event_header finished_round_event
= {
522 .size
= sizeof(struct perf_event_header
),
523 .type
= PERF_RECORD_FINISHED_ROUND
,
526 static void mmap_read_all(void)
530 for (i
= 0; i
< nr_cpu
; i
++) {
531 if (mmap_array
[i
].base
)
532 mmap_read(&mmap_array
[i
]);
535 if (perf_header__has_feat(&session
->header
, HEADER_TRACE_INFO
))
536 write_output(&finished_round_event
, sizeof(finished_round_event
));
539 static int __cmd_record(int argc
, const char **argv
)
545 unsigned long waking
= 0;
546 int child_ready_pipe
[2], go_pipe
[2];
547 const bool forks
= argc
> 0;
549 struct machine
*machine
;
551 page_size
= sysconf(_SC_PAGE_SIZE
);
554 signal(SIGCHLD
, sig_handler
);
555 signal(SIGINT
, sig_handler
);
556 signal(SIGUSR1
, sig_handler
);
558 if (forks
&& (pipe(child_ready_pipe
) < 0 || pipe(go_pipe
) < 0)) {
559 perror("failed to create pipes");
563 if (!strcmp(output_name
, "-"))
565 else if (!stat(output_name
, &st
) && st
.st_size
) {
566 if (write_mode
== WRITE_FORCE
) {
567 char oldname
[PATH_MAX
];
568 snprintf(oldname
, sizeof(oldname
), "%s.old",
571 rename(output_name
, oldname
);
573 } else if (write_mode
== WRITE_APPEND
) {
574 write_mode
= WRITE_FORCE
;
577 flags
= O_CREAT
|O_RDWR
;
578 if (write_mode
== WRITE_APPEND
)
584 output
= STDOUT_FILENO
;
586 output
= open(output_name
, flags
, S_IRUSR
| S_IWUSR
);
588 perror("failed to create output file");
592 session
= perf_session__new(output_name
, O_WRONLY
,
593 write_mode
== WRITE_FORCE
, false, NULL
);
594 if (session
== NULL
) {
595 pr_err("Not enough memory for reading perf file header\n");
600 perf_header__set_feat(&session
->header
, HEADER_BUILD_ID
);
603 err
= perf_header__read(session
, output
);
605 goto out_delete_session
;
608 if (have_tracepoints(&evsel_list
))
609 perf_header__set_feat(&session
->header
, HEADER_TRACE_INFO
);
612 * perf_session__delete(session) will be called at atexit_header()
614 atexit(atexit_header
);
619 perror("failed to fork");
626 close(child_ready_pipe
[0]);
628 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
631 * Do a dummy execvp to get the PLT entry resolved,
632 * so we avoid the resolver overhead on the real
635 execvp("", (char **)argv
);
638 * Tell the parent we're ready to go
640 close(child_ready_pipe
[1]);
643 * Wait until the parent tells us to go.
645 if (read(go_pipe
[0], &buf
, 1) == -1)
646 perror("unable to read pipe");
648 execvp(argv
[0], (char **)argv
);
651 kill(getppid(), SIGUSR1
);
655 if (!system_wide
&& target_tid
== -1 && target_pid
== -1)
656 all_tids
[0] = child_pid
;
658 close(child_ready_pipe
[1]);
661 * wait for child to settle
663 if (read(child_ready_pipe
[0], &buf
, 1) == -1) {
664 perror("unable to read pipe");
667 close(child_ready_pipe
[0]);
670 if (!system_wide
&& no_inherit
&& !cpu_list
) {
673 for (i
= 0; i
< cpus
->nr
; i
++)
674 open_counters(cpus
->map
[i
]);
677 perf_session__set_sample_type(session
, sample_type
);
680 err
= perf_header__write_pipe(output
);
683 } else if (file_new
) {
684 err
= perf_header__write(&session
->header
, output
, false);
689 post_processing_offset
= lseek(output
, 0, SEEK_CUR
);
691 perf_session__set_sample_id_all(session
, sample_id_all_avail
);
694 err
= event__synthesize_attrs(&session
->header
,
695 process_synthesized_event
,
698 pr_err("Couldn't synthesize attrs.\n");
702 err
= event__synthesize_event_types(process_synthesized_event
,
705 pr_err("Couldn't synthesize event_types.\n");
709 if (have_tracepoints(&evsel_list
)) {
711 * FIXME err <= 0 here actually means that
712 * there were no tracepoints so its not really
713 * an error, just that we don't need to
714 * synthesize anything. We really have to
715 * return this more properly and also
716 * propagate errors that now are calling die()
718 err
= event__synthesize_tracing_data(output
, &evsel_list
,
719 process_synthesized_event
,
722 pr_err("Couldn't record tracing data.\n");
729 machine
= perf_session__find_host_machine(session
);
731 pr_err("Couldn't find native kernel information.\n");
735 err
= event__synthesize_kernel_mmap(process_synthesized_event
,
736 session
, machine
, "_text");
738 err
= event__synthesize_kernel_mmap(process_synthesized_event
,
739 session
, machine
, "_stext");
741 pr_err("Couldn't record kernel reference relocation symbol\n"
742 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
743 "Check /proc/kallsyms permission or run as root.\n");
745 err
= event__synthesize_modules(process_synthesized_event
,
748 pr_err("Couldn't record kernel module information.\n"
749 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
750 "Check /proc/modules permission or run as root.\n");
753 perf_session__process_machines(session
, event__synthesize_guest_os
);
756 event__synthesize_thread(target_tid
, process_synthesized_event
,
759 event__synthesize_threads(process_synthesized_event
, session
);
762 struct sched_param param
;
764 param
.sched_priority
= realtime_prio
;
765 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
766 pr_err("Could not set realtime priority.\n");
783 if (hits
== samples
) {
786 err
= poll(event_array
, nr_poll
, -1);
791 for (i
= 0; i
< nr_cpu
; i
++) {
792 struct perf_evsel
*pos
;
794 list_for_each_entry(pos
, &evsel_list
, node
) {
798 ioctl(FD(pos
, i
, thread
),
799 PERF_EVENT_IOC_DISABLE
);
805 if (quiet
|| signr
== SIGUSR1
)
808 fprintf(stderr
, "[ perf record: Woken up %ld times to write data ]\n", waking
);
811 * Approximate RIP event size: 24 bytes.
814 "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
815 (double)bytes_written
/ 1024.0 / 1024.0,
822 perf_session__delete(session
);
826 static const char * const record_usage
[] = {
827 "perf record [<options>] [<command>]",
828 "perf record [<options>] -- <command> [<options>]",
832 static bool force
, append_file
;
834 const struct option record_options
[] = {
835 OPT_CALLBACK('e', "event", NULL
, "event",
836 "event selector. use 'perf list' to list available events",
838 OPT_CALLBACK(0, "filter", NULL
, "filter",
839 "event filter", parse_filter
),
840 OPT_INTEGER('p', "pid", &target_pid
,
841 "record events on existing process id"),
842 OPT_INTEGER('t', "tid", &target_tid
,
843 "record events on existing thread id"),
844 OPT_INTEGER('r', "realtime", &realtime_prio
,
845 "collect data with this RT SCHED_FIFO priority"),
846 OPT_BOOLEAN('R', "raw-samples", &raw_samples
,
847 "collect raw sample records from all opened counters"),
848 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
849 "system-wide collection from all CPUs"),
850 OPT_BOOLEAN('A', "append", &append_file
,
851 "append to the output file to do incremental profiling"),
852 OPT_STRING('C', "cpu", &cpu_list
, "cpu",
853 "list of cpus to monitor"),
854 OPT_BOOLEAN('f', "force", &force
,
855 "overwrite existing data file (deprecated)"),
856 OPT_U64('c', "count", &user_interval
, "event period to sample"),
857 OPT_STRING('o', "output", &output_name
, "file",
859 OPT_BOOLEAN('i', "no-inherit", &no_inherit
,
860 "child tasks do not inherit counters"),
861 OPT_UINTEGER('F', "freq", &user_freq
, "profile at this frequency"),
862 OPT_UINTEGER('m', "mmap-pages", &mmap_pages
, "number of mmap data pages"),
863 OPT_BOOLEAN('g', "call-graph", &call_graph
,
864 "do call-graph (stack chain/backtrace) recording"),
865 OPT_INCR('v', "verbose", &verbose
,
866 "be more verbose (show counter open errors, etc)"),
867 OPT_BOOLEAN('q', "quiet", &quiet
, "don't print any message"),
868 OPT_BOOLEAN('s', "stat", &inherit_stat
,
869 "per thread counts"),
870 OPT_BOOLEAN('d', "data", &sample_address
,
872 OPT_BOOLEAN('T', "timestamp", &sample_time
, "Sample timestamps"),
873 OPT_BOOLEAN('n', "no-samples", &no_samples
,
875 OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache
,
876 "do not update the buildid cache"),
877 OPT_BOOLEAN('B', "no-buildid", &no_buildid
,
878 "do not collect buildids in perf.data"),
882 int cmd_record(int argc
, const char **argv
, const char *prefix __used
)
885 struct perf_evsel
*pos
;
887 argc
= parse_options(argc
, argv
, record_options
, record_usage
,
888 PARSE_OPT_STOP_AT_NON_OPTION
);
889 if (!argc
&& target_pid
== -1 && target_tid
== -1 &&
890 !system_wide
&& !cpu_list
)
891 usage_with_options(record_usage
, record_options
);
893 if (force
&& append_file
) {
894 fprintf(stderr
, "Can't overwrite and append at the same time."
895 " You need to choose between -f and -A");
896 usage_with_options(record_usage
, record_options
);
897 } else if (append_file
) {
898 write_mode
= WRITE_APPEND
;
900 write_mode
= WRITE_FORCE
;
905 if (no_buildid_cache
|| no_buildid
)
906 disable_buildid_cache();
908 if (list_empty(&evsel_list
) && perf_evsel_list__create_default() < 0) {
909 pr_err("Not enough memory for event selector list\n");
910 goto out_symbol_exit
;
913 if (target_pid
!= -1) {
914 target_tid
= target_pid
;
915 thread_num
= find_all_tid(target_pid
, &all_tids
);
916 if (thread_num
<= 0) {
917 fprintf(stderr
, "Can't find all threads of pid %d\n",
919 usage_with_options(record_usage
, record_options
);
922 all_tids
=malloc(sizeof(pid_t
));
924 goto out_symbol_exit
;
926 all_tids
[0] = target_tid
;
930 cpus
= cpu_map__new(cpu_list
);
932 perror("failed to parse CPUs map");
936 list_for_each_entry(pos
, &evsel_list
, node
) {
937 if (perf_evsel__alloc_fd(pos
, cpus
->nr
, thread_num
) < 0)
940 event_array
= malloc(
941 sizeof(struct pollfd
)*MAX_NR_CPUS
*MAX_COUNTERS
*thread_num
);
945 if (user_interval
!= ULLONG_MAX
)
946 default_interval
= user_interval
;
947 if (user_freq
!= UINT_MAX
)
951 * User specified count overrides default frequency.
953 if (default_interval
)
956 default_interval
= freq
;
958 fprintf(stderr
, "frequency and count are zero, aborting\n");
960 goto out_free_event_array
;
963 err
= __cmd_record(argc
, argv
);
965 out_free_event_array
: