4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
8 #define _FILE_OFFSET_BITS 64
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/tool.h"
26 #include "util/symbol.h"
27 #include "util/cpumap.h"
28 #include "util/thread_map.h"
40 struct perf_tool tool
;
41 struct perf_record_opts opts
;
43 const char *output_name
;
44 struct perf_evlist
*evlist
;
45 struct perf_session
*session
;
48 unsigned int page_size
;
50 enum write_mode_t write_mode
;
52 bool no_buildid_cache
;
57 off_t post_processing_offset
;
60 static void advance_output(struct perf_record
*rec
, size_t size
)
62 rec
->bytes_written
+= size
;
65 static void write_output(struct perf_record
*rec
, void *buf
, size_t size
)
68 int ret
= write(rec
->output
, buf
, size
);
71 die("failed to write");
76 rec
->bytes_written
+= ret
;
80 static int process_synthesized_event(struct perf_tool
*tool
,
81 union perf_event
*event
,
82 struct perf_sample
*sample __used
,
83 struct machine
*machine __used
)
85 struct perf_record
*rec
= container_of(tool
, struct perf_record
, tool
);
86 write_output(rec
, event
, event
->header
.size
);
90 static void perf_record__mmap_read(struct perf_record
*rec
,
93 unsigned int head
= perf_mmap__read_head(md
);
94 unsigned int old
= md
->prev
;
95 unsigned char *data
= md
->base
+ rec
->page_size
;
106 if ((old
& md
->mask
) + size
!= (head
& md
->mask
)) {
107 buf
= &data
[old
& md
->mask
];
108 size
= md
->mask
+ 1 - (old
& md
->mask
);
111 write_output(rec
, buf
, size
);
114 buf
= &data
[old
& md
->mask
];
118 write_output(rec
, buf
, size
);
121 perf_mmap__write_tail(md
, old
);
124 static volatile int done
= 0;
125 static volatile int signr
= -1;
126 static volatile int child_finished
= 0;
128 static void sig_handler(int sig
)
137 static void perf_record__sig_exit(int exit_status __used
, void *arg
)
139 struct perf_record
*rec
= arg
;
142 if (rec
->evlist
->workload
.pid
> 0) {
144 kill(rec
->evlist
->workload
.pid
, SIGTERM
);
147 if (WIFSIGNALED(status
))
148 psignal(WTERMSIG(status
), rec
->progname
);
151 if (signr
== -1 || signr
== SIGUSR1
)
154 signal(signr
, SIG_DFL
);
155 kill(getpid(), signr
);
158 static bool perf_evlist__equal(struct perf_evlist
*evlist
,
159 struct perf_evlist
*other
)
161 struct perf_evsel
*pos
, *pair
;
163 if (evlist
->nr_entries
!= other
->nr_entries
)
166 pair
= list_entry(other
->entries
.next
, struct perf_evsel
, node
);
168 list_for_each_entry(pos
, &evlist
->entries
, node
) {
169 if (memcmp(&pos
->attr
, &pair
->attr
, sizeof(pos
->attr
) != 0))
171 pair
= list_entry(pair
->node
.next
, struct perf_evsel
, node
);
177 static void perf_record__open(struct perf_record
*rec
)
179 struct perf_evsel
*pos
, *first
;
180 struct perf_evlist
*evlist
= rec
->evlist
;
181 struct perf_session
*session
= rec
->session
;
182 struct perf_record_opts
*opts
= &rec
->opts
;
184 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
186 perf_evlist__config_attrs(evlist
, opts
);
188 list_for_each_entry(pos
, &evlist
->entries
, node
) {
189 struct perf_event_attr
*attr
= &pos
->attr
;
190 struct xyarray
*group_fd
= NULL
;
192 * Check if parse_single_tracepoint_event has already asked for
195 * XXX this is kludgy but short term fix for problems introduced by
196 * eac23d1c that broke 'perf script' by having different sample_types
197 * when using multiple tracepoint events when we use a perf binary
198 * that tries to use sample_id_all on an older kernel.
200 * We need to move counter creation to perf_session, support
201 * different sample_types, etc.
203 bool time_needed
= attr
->sample_type
& PERF_SAMPLE_TIME
;
205 if (opts
->group
&& pos
!= first
)
206 group_fd
= first
->fd
;
208 attr
->sample_id_all
= opts
->sample_id_all_avail
? 1 : 0;
210 if (perf_evsel__open(pos
, evlist
->cpus
, evlist
->threads
,
211 opts
->group
, group_fd
) < 0) {
214 if (err
== EPERM
|| err
== EACCES
) {
215 ui__error_paranoid();
217 } else if (err
== ENODEV
&& opts
->cpu_list
) {
218 die("No such device - did you specify"
219 " an out-of-range profile CPU?\n");
220 } else if (err
== EINVAL
&& opts
->sample_id_all_avail
) {
222 * Old kernel, no attr->sample_id_type_all field
224 opts
->sample_id_all_avail
= false;
225 if (!opts
->sample_time
&& !opts
->raw_samples
&& !time_needed
)
226 attr
->sample_type
&= ~PERF_SAMPLE_TIME
;
228 goto retry_sample_id
;
232 * If it's cycles then fall back to hrtimer
233 * based cpu-clock-tick sw counter, which
234 * is always available even if no PMU support:
236 if (attr
->type
== PERF_TYPE_HARDWARE
237 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
240 ui__warning("The cycles event is not supported, "
241 "trying to fall back to cpu-clock-ticks\n");
242 attr
->type
= PERF_TYPE_SOFTWARE
;
243 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
248 ui__warning("The %s event is not supported.\n",
254 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
257 #if defined(__i386__) || defined(__x86_64__)
258 if (attr
->type
== PERF_TYPE_HARDWARE
&& err
== EOPNOTSUPP
)
259 die("No hardware sampling interrupt available."
260 " No APIC? If so then you can boot the kernel"
261 " with the \"lapic\" boot parameter to"
262 " force-enable it.\n");
265 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
269 if (perf_evlist__set_filters(evlist
)) {
270 error("failed to set filter with %d (%s)\n", errno
,
275 if (perf_evlist__mmap(evlist
, opts
->mmap_pages
, false) < 0)
276 die("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
279 session
->evlist
= evlist
;
281 if (!perf_evlist__equal(session
->evlist
, evlist
)) {
282 fprintf(stderr
, "incompatible append\n");
287 perf_session__update_sample_type(session
);
290 static int process_buildids(struct perf_record
*rec
)
292 u64 size
= lseek(rec
->output
, 0, SEEK_CUR
);
297 rec
->session
->fd
= rec
->output
;
298 return __perf_session__process_events(rec
->session
, rec
->post_processing_offset
,
299 size
- rec
->post_processing_offset
,
300 size
, &build_id__mark_dso_hit_ops
);
303 static void perf_record__exit(int status __used
, void *arg
)
305 struct perf_record
*rec
= arg
;
307 if (!rec
->opts
.pipe_output
) {
308 rec
->session
->header
.data_size
+= rec
->bytes_written
;
310 if (!rec
->no_buildid
)
311 process_buildids(rec
);
312 perf_session__write_header(rec
->session
, rec
->evlist
,
314 perf_session__delete(rec
->session
);
315 perf_evlist__delete(rec
->evlist
);
320 static void perf_event__synthesize_guest_os(struct machine
*machine
, void *data
)
323 struct perf_tool
*tool
= data
;
325 if (machine__is_host(machine
))
329 *As for guest kernel when processing subcommand record&report,
330 *we arrange module mmap prior to guest kernel mmap and trigger
331 *a preload dso because default guest module symbols are loaded
332 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
333 *method is used to avoid symbol missing when the first addr is
334 *in module instead of in guest kernel.
336 err
= perf_event__synthesize_modules(tool
, process_synthesized_event
,
339 pr_err("Couldn't record guest kernel [%d]'s reference"
340 " relocation symbol.\n", machine
->pid
);
343 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
344 * have no _text sometimes.
346 err
= perf_event__synthesize_kernel_mmap(tool
, process_synthesized_event
,
349 err
= perf_event__synthesize_kernel_mmap(tool
, process_synthesized_event
,
352 pr_err("Couldn't record guest kernel [%d]'s reference"
353 " relocation symbol.\n", machine
->pid
);
356 static struct perf_event_header finished_round_event
= {
357 .size
= sizeof(struct perf_event_header
),
358 .type
= PERF_RECORD_FINISHED_ROUND
,
361 static void perf_record__mmap_read_all(struct perf_record
*rec
)
365 for (i
= 0; i
< rec
->evlist
->nr_mmaps
; i
++) {
366 if (rec
->evlist
->mmap
[i
].base
)
367 perf_record__mmap_read(rec
, &rec
->evlist
->mmap
[i
]);
370 if (perf_header__has_feat(&rec
->session
->header
, HEADER_TRACE_INFO
))
371 write_output(rec
, &finished_round_event
, sizeof(finished_round_event
));
374 static int __cmd_record(struct perf_record
*rec
, int argc
, const char **argv
)
379 unsigned long waking
= 0;
380 const bool forks
= argc
> 0;
381 struct machine
*machine
;
382 struct perf_tool
*tool
= &rec
->tool
;
383 struct perf_record_opts
*opts
= &rec
->opts
;
384 struct perf_evlist
*evsel_list
= rec
->evlist
;
385 const char *output_name
= rec
->output_name
;
386 struct perf_session
*session
;
388 rec
->progname
= argv
[0];
390 rec
->page_size
= sysconf(_SC_PAGE_SIZE
);
392 on_exit(perf_record__sig_exit
, rec
);
393 signal(SIGCHLD
, sig_handler
);
394 signal(SIGINT
, sig_handler
);
395 signal(SIGUSR1
, sig_handler
);
398 if (!fstat(STDOUT_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
399 opts
->pipe_output
= true;
401 rec
->output_name
= output_name
= "perf.data";
404 if (!strcmp(output_name
, "-"))
405 opts
->pipe_output
= true;
406 else if (!stat(output_name
, &st
) && st
.st_size
) {
407 if (rec
->write_mode
== WRITE_FORCE
) {
408 char oldname
[PATH_MAX
];
409 snprintf(oldname
, sizeof(oldname
), "%s.old",
412 rename(output_name
, oldname
);
414 } else if (rec
->write_mode
== WRITE_APPEND
) {
415 rec
->write_mode
= WRITE_FORCE
;
419 flags
= O_CREAT
|O_RDWR
;
420 if (rec
->write_mode
== WRITE_APPEND
)
425 if (opts
->pipe_output
)
426 output
= STDOUT_FILENO
;
428 output
= open(output_name
, flags
, S_IRUSR
| S_IWUSR
);
430 perror("failed to create output file");
434 rec
->output
= output
;
436 session
= perf_session__new(output_name
, O_WRONLY
,
437 rec
->write_mode
== WRITE_FORCE
, false, NULL
);
438 if (session
== NULL
) {
439 pr_err("Not enough memory for reading perf file header\n");
443 rec
->session
= session
;
445 if (!rec
->no_buildid
)
446 perf_header__set_feat(&session
->header
, HEADER_BUILD_ID
);
448 if (!rec
->file_new
) {
449 err
= perf_session__read_header(session
, output
);
451 goto out_delete_session
;
454 if (have_tracepoints(&evsel_list
->entries
))
455 perf_header__set_feat(&session
->header
, HEADER_TRACE_INFO
);
457 perf_header__set_feat(&session
->header
, HEADER_HOSTNAME
);
458 perf_header__set_feat(&session
->header
, HEADER_OSRELEASE
);
459 perf_header__set_feat(&session
->header
, HEADER_ARCH
);
460 perf_header__set_feat(&session
->header
, HEADER_CPUDESC
);
461 perf_header__set_feat(&session
->header
, HEADER_NRCPUS
);
462 perf_header__set_feat(&session
->header
, HEADER_EVENT_DESC
);
463 perf_header__set_feat(&session
->header
, HEADER_CMDLINE
);
464 perf_header__set_feat(&session
->header
, HEADER_VERSION
);
465 perf_header__set_feat(&session
->header
, HEADER_CPU_TOPOLOGY
);
466 perf_header__set_feat(&session
->header
, HEADER_TOTAL_MEM
);
467 perf_header__set_feat(&session
->header
, HEADER_NUMA_TOPOLOGY
);
468 perf_header__set_feat(&session
->header
, HEADER_CPUID
);
471 err
= perf_evlist__prepare_workload(evsel_list
, opts
, argv
);
473 pr_err("Couldn't run the workload!\n");
474 goto out_delete_session
;
478 perf_record__open(rec
);
481 * perf_session__delete(session) will be called at perf_record__exit()
483 on_exit(perf_record__exit
, rec
);
485 if (opts
->pipe_output
) {
486 err
= perf_header__write_pipe(output
);
489 } else if (rec
->file_new
) {
490 err
= perf_session__write_header(session
, evsel_list
,
496 rec
->post_processing_offset
= lseek(output
, 0, SEEK_CUR
);
498 machine
= perf_session__find_host_machine(session
);
500 pr_err("Couldn't find native kernel information.\n");
504 if (opts
->pipe_output
) {
505 err
= perf_event__synthesize_attrs(tool
, session
,
506 process_synthesized_event
);
508 pr_err("Couldn't synthesize attrs.\n");
512 err
= perf_event__synthesize_event_types(tool
, process_synthesized_event
,
515 pr_err("Couldn't synthesize event_types.\n");
519 if (have_tracepoints(&evsel_list
->entries
)) {
521 * FIXME err <= 0 here actually means that
522 * there were no tracepoints so its not really
523 * an error, just that we don't need to
524 * synthesize anything. We really have to
525 * return this more properly and also
526 * propagate errors that now are calling die()
528 err
= perf_event__synthesize_tracing_data(tool
, output
, evsel_list
,
529 process_synthesized_event
);
531 pr_err("Couldn't record tracing data.\n");
534 advance_output(rec
, err
);
538 err
= perf_event__synthesize_kernel_mmap(tool
, process_synthesized_event
,
541 err
= perf_event__synthesize_kernel_mmap(tool
, process_synthesized_event
,
544 pr_err("Couldn't record kernel reference relocation symbol\n"
545 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
546 "Check /proc/kallsyms permission or run as root.\n");
548 err
= perf_event__synthesize_modules(tool
, process_synthesized_event
,
551 pr_err("Couldn't record kernel module information.\n"
552 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
553 "Check /proc/modules permission or run as root.\n");
556 perf_session__process_machines(session
, tool
,
557 perf_event__synthesize_guest_os
);
559 if (!opts
->system_wide
)
560 perf_event__synthesize_thread_map(tool
, evsel_list
->threads
,
561 process_synthesized_event
,
564 perf_event__synthesize_threads(tool
, process_synthesized_event
,
567 if (rec
->realtime_prio
) {
568 struct sched_param param
;
570 param
.sched_priority
= rec
->realtime_prio
;
571 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
572 pr_err("Could not set realtime priority.\n");
577 perf_evlist__enable(evsel_list
);
583 perf_evlist__start_workload(evsel_list
);
586 int hits
= rec
->samples
;
588 perf_record__mmap_read_all(rec
);
590 if (hits
== rec
->samples
) {
593 err
= poll(evsel_list
->pollfd
, evsel_list
->nr_fds
, -1);
598 perf_evlist__disable(evsel_list
);
601 if (quiet
|| signr
== SIGUSR1
)
604 fprintf(stderr
, "[ perf record: Woken up %ld times to write data ]\n", waking
);
607 * Approximate RIP event size: 24 bytes.
610 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64
" samples) ]\n",
611 (double)rec
->bytes_written
/ 1024.0 / 1024.0,
613 rec
->bytes_written
/ 24);
618 perf_session__delete(session
);
622 static const char * const record_usage
[] = {
623 "perf record [<options>] [<command>]",
624 "perf record [<options>] -- <command> [<options>]",
629 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
630 * because we need to have access to it in perf_record__exit, that is called
631 * after cmd_record() exits, but since record_options need to be accessible to
632 * builtin-script, leave it here.
634 * At least we don't ouch it in all the other functions here directly.
636 * Just say no to tons of global variables, sigh.
638 static struct perf_record record
= {
642 .mmap_pages
= UINT_MAX
,
643 .user_freq
= UINT_MAX
,
644 .user_interval
= ULLONG_MAX
,
646 .sample_id_all_avail
= true,
648 .write_mode
= WRITE_FORCE
,
653 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
654 * with it and switch to use the library functions in perf_evlist that came
655 * from builtin-record.c, i.e. use perf_record_opts,
656 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
659 const struct option record_options
[] = {
660 OPT_CALLBACK('e', "event", &record
.evlist
, "event",
661 "event selector. use 'perf list' to list available events",
662 parse_events_option
),
663 OPT_CALLBACK(0, "filter", &record
.evlist
, "filter",
664 "event filter", parse_filter
),
665 OPT_INTEGER('p', "pid", &record
.opts
.target_pid
,
666 "record events on existing process id"),
667 OPT_INTEGER('t', "tid", &record
.opts
.target_tid
,
668 "record events on existing thread id"),
669 OPT_INTEGER('r', "realtime", &record
.realtime_prio
,
670 "collect data with this RT SCHED_FIFO priority"),
671 OPT_BOOLEAN('D', "no-delay", &record
.opts
.no_delay
,
672 "collect data without buffering"),
673 OPT_BOOLEAN('R', "raw-samples", &record
.opts
.raw_samples
,
674 "collect raw sample records from all opened counters"),
675 OPT_BOOLEAN('a', "all-cpus", &record
.opts
.system_wide
,
676 "system-wide collection from all CPUs"),
677 OPT_BOOLEAN('A', "append", &record
.append_file
,
678 "append to the output file to do incremental profiling"),
679 OPT_STRING('C', "cpu", &record
.opts
.cpu_list
, "cpu",
680 "list of cpus to monitor"),
681 OPT_BOOLEAN('f', "force", &record
.force
,
682 "overwrite existing data file (deprecated)"),
683 OPT_U64('c', "count", &record
.opts
.user_interval
, "event period to sample"),
684 OPT_STRING('o', "output", &record
.output_name
, "file",
686 OPT_BOOLEAN('i', "no-inherit", &record
.opts
.no_inherit
,
687 "child tasks do not inherit counters"),
688 OPT_UINTEGER('F', "freq", &record
.opts
.user_freq
, "profile at this frequency"),
689 OPT_UINTEGER('m', "mmap-pages", &record
.opts
.mmap_pages
,
690 "number of mmap data pages"),
691 OPT_BOOLEAN(0, "group", &record
.opts
.group
,
692 "put the counters into a counter group"),
693 OPT_BOOLEAN('g', "call-graph", &record
.opts
.call_graph
,
694 "do call-graph (stack chain/backtrace) recording"),
695 OPT_INCR('v', "verbose", &verbose
,
696 "be more verbose (show counter open errors, etc)"),
697 OPT_BOOLEAN('q', "quiet", &quiet
, "don't print any message"),
698 OPT_BOOLEAN('s', "stat", &record
.opts
.inherit_stat
,
699 "per thread counts"),
700 OPT_BOOLEAN('d', "data", &record
.opts
.sample_address
,
702 OPT_BOOLEAN('T', "timestamp", &record
.opts
.sample_time
, "Sample timestamps"),
703 OPT_BOOLEAN('n', "no-samples", &record
.opts
.no_samples
,
705 OPT_BOOLEAN('N', "no-buildid-cache", &record
.no_buildid_cache
,
706 "do not update the buildid cache"),
707 OPT_BOOLEAN('B', "no-buildid", &record
.no_buildid
,
708 "do not collect buildids in perf.data"),
709 OPT_CALLBACK('G', "cgroup", &record
.evlist
, "name",
710 "monitor event in cgroup name only",
715 int cmd_record(int argc
, const char **argv
, const char *prefix __used
)
718 struct perf_evsel
*pos
;
719 struct perf_evlist
*evsel_list
;
720 struct perf_record
*rec
= &record
;
722 perf_header__set_cmdline(argc
, argv
);
724 evsel_list
= perf_evlist__new(NULL
, NULL
);
725 if (evsel_list
== NULL
)
728 rec
->evlist
= evsel_list
;
730 argc
= parse_options(argc
, argv
, record_options
, record_usage
,
731 PARSE_OPT_STOP_AT_NON_OPTION
);
732 if (!argc
&& rec
->opts
.target_pid
== -1 && rec
->opts
.target_tid
== -1 &&
733 !rec
->opts
.system_wide
&& !rec
->opts
.cpu_list
)
734 usage_with_options(record_usage
, record_options
);
736 if (rec
->force
&& rec
->append_file
) {
737 fprintf(stderr
, "Can't overwrite and append at the same time."
738 " You need to choose between -f and -A");
739 usage_with_options(record_usage
, record_options
);
740 } else if (rec
->append_file
) {
741 rec
->write_mode
= WRITE_APPEND
;
743 rec
->write_mode
= WRITE_FORCE
;
746 if (nr_cgroups
&& !rec
->opts
.system_wide
) {
747 fprintf(stderr
, "cgroup monitoring only available in"
748 " system-wide mode\n");
749 usage_with_options(record_usage
, record_options
);
754 if (symbol_conf
.kptr_restrict
)
756 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
757 "check /proc/sys/kernel/kptr_restrict.\n\n"
758 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
759 "file is not found in the buildid cache or in the vmlinux path.\n\n"
760 "Samples in kernel modules won't be resolved at all.\n\n"
761 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
762 "even with a suitable vmlinux or kallsyms file.\n\n");
764 if (rec
->no_buildid_cache
|| rec
->no_buildid
)
765 disable_buildid_cache();
767 if (evsel_list
->nr_entries
== 0 &&
768 perf_evlist__add_default(evsel_list
) < 0) {
769 pr_err("Not enough memory for event selector list\n");
770 goto out_symbol_exit
;
773 if (rec
->opts
.target_pid
!= -1)
774 rec
->opts
.target_tid
= rec
->opts
.target_pid
;
776 if (perf_evlist__create_maps(evsel_list
, rec
->opts
.target_pid
,
777 rec
->opts
.target_tid
, rec
->opts
.cpu_list
) < 0)
778 usage_with_options(record_usage
, record_options
);
780 list_for_each_entry(pos
, &evsel_list
->entries
, node
) {
781 if (perf_evsel__alloc_fd(pos
, evsel_list
->cpus
->nr
,
782 evsel_list
->threads
->nr
) < 0)
784 if (perf_header__push_event(pos
->attr
.config
, event_name(pos
)))
788 if (perf_evlist__alloc_pollfd(evsel_list
) < 0)
791 if (rec
->opts
.user_interval
!= ULLONG_MAX
)
792 rec
->opts
.default_interval
= rec
->opts
.user_interval
;
793 if (rec
->opts
.user_freq
!= UINT_MAX
)
794 rec
->opts
.freq
= rec
->opts
.user_freq
;
797 * User specified count overrides default frequency.
799 if (rec
->opts
.default_interval
)
801 else if (rec
->opts
.freq
) {
802 rec
->opts
.default_interval
= rec
->opts
.freq
;
804 fprintf(stderr
, "frequency and count are zero, aborting\n");
809 err
= __cmd_record(&record
, argc
, argv
);
811 perf_evlist__delete_maps(evsel_list
);